OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static int opt_help(const char *opt, const char *arg);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int frame_width = 0;
227 static int frame_height = 0;
228 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int step = 0;
243 static int thread_count = 1;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=-1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261
262 static int rdftspeed=20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static VideoState *cur_stream;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
281 {
282     AVPacketList *pkt1;
283
284     /* duplicate the packet */
285     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
286         return -1;
287
288     pkt1 = av_malloc(sizeof(AVPacketList));
289     if (!pkt1)
290         return -1;
291     pkt1->pkt = *pkt;
292     pkt1->next = NULL;
293
294
295     SDL_LockMutex(q->mutex);
296
297     if (!q->last_pkt)
298
299         q->first_pkt = pkt1;
300     else
301         q->last_pkt->next = pkt1;
302     q->last_pkt = pkt1;
303     q->nb_packets++;
304     q->size += pkt1->pkt.size + sizeof(*pkt1);
305     /* XXX: should duplicate packet data in DV case */
306     SDL_CondSignal(q->cond);
307
308     SDL_UnlockMutex(q->mutex);
309     return 0;
310 }
311
312 /* packet queue handling */
313 static void packet_queue_init(PacketQueue *q)
314 {
315     memset(q, 0, sizeof(PacketQueue));
316     q->mutex = SDL_CreateMutex();
317     q->cond = SDL_CreateCond();
318     packet_queue_put(q, &flush_pkt);
319 }
320
321 static void packet_queue_flush(PacketQueue *q)
322 {
323     AVPacketList *pkt, *pkt1;
324
325     SDL_LockMutex(q->mutex);
326     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
327         pkt1 = pkt->next;
328         av_free_packet(&pkt->pkt);
329         av_freep(&pkt);
330     }
331     q->last_pkt = NULL;
332     q->first_pkt = NULL;
333     q->nb_packets = 0;
334     q->size = 0;
335     SDL_UnlockMutex(q->mutex);
336 }
337
338 static void packet_queue_end(PacketQueue *q)
339 {
340     packet_queue_flush(q);
341     SDL_DestroyMutex(q->mutex);
342     SDL_DestroyCond(q->cond);
343 }
344
345 static void packet_queue_abort(PacketQueue *q)
346 {
347     SDL_LockMutex(q->mutex);
348
349     q->abort_request = 1;
350
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
357 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
358 {
359     AVPacketList *pkt1;
360     int ret;
361
362     SDL_LockMutex(q->mutex);
363
364     for(;;) {
365         if (q->abort_request) {
366             ret = -1;
367             break;
368         }
369
370         pkt1 = q->first_pkt;
371         if (pkt1) {
372             q->first_pkt = pkt1->next;
373             if (!q->first_pkt)
374                 q->last_pkt = NULL;
375             q->nb_packets--;
376             q->size -= pkt1->pkt.size + sizeof(*pkt1);
377             *pkt = pkt1->pkt;
378             av_free(pkt1);
379             ret = 1;
380             break;
381         } else if (!block) {
382             ret = 0;
383             break;
384         } else {
385             SDL_CondWait(q->cond, q->mutex);
386         }
387     }
388     SDL_UnlockMutex(q->mutex);
389     return ret;
390 }
391
392 static inline void fill_rectangle(SDL_Surface *screen,
393                                   int x, int y, int w, int h, int color)
394 {
395     SDL_Rect rect;
396     rect.x = x;
397     rect.y = y;
398     rect.w = w;
399     rect.h = h;
400     SDL_FillRect(screen, &rect, color);
401 }
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408     unsigned int v = ((const uint32_t *)(s))[0];\
409     a = (v >> 24) & 0xff;\
410     r = (v >> 16) & 0xff;\
411     g = (v >> 8) & 0xff;\
412     b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
418     a = (val >> 24) & 0xff;\
419     y = (val >> 16) & 0xff;\
420     u = (val >> 8) & 0xff;\
421     v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434     int wrap, wrap3, width2, skip2;
435     int y, u, v, a, u1, v1, a1, w, h;
436     uint8_t *lum, *cb, *cr;
437     const uint8_t *p;
438     const uint32_t *pal;
439     int dstx, dsty, dstw, dsth;
440
441     dstw = av_clip(rect->w, 0, imgw);
442     dsth = av_clip(rect->h, 0, imgh);
443     dstx = av_clip(rect->x, 0, imgw - dstw);
444     dsty = av_clip(rect->y, 0, imgh - dsth);
445     lum = dst->data[0] + dsty * dst->linesize[0];
446     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
447     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
448
449     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
450     skip2 = dstx >> 1;
451     wrap = dst->linesize[0];
452     wrap3 = rect->pict.linesize[0];
453     p = rect->pict.data[0];
454     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
455
456     if (dsty & 1) {
457         lum += dstx;
458         cb += skip2;
459         cr += skip2;
460
461         if (dstx & 1) {
462             YUVA_IN(y, u, v, a, p, pal);
463             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
465             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
466             cb++;
467             cr++;
468             lum++;
469             p += BPP;
470         }
471         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
472             YUVA_IN(y, u, v, a, p, pal);
473             u1 = u;
474             v1 = v;
475             a1 = a;
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477
478             YUVA_IN(y, u, v, a, p + BPP, pal);
479             u1 += u;
480             v1 += v;
481             a1 += a;
482             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
483             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
484             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
485             cb++;
486             cr++;
487             p += 2 * BPP;
488             lum += 2;
489         }
490         if (w) {
491             YUVA_IN(y, u, v, a, p, pal);
492             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
493             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
494             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
495             p++;
496             lum++;
497         }
498         p += wrap3 - dstw * BPP;
499         lum += wrap - dstw - dstx;
500         cb += dst->linesize[1] - width2 - skip2;
501         cr += dst->linesize[2] - width2 - skip2;
502     }
503     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             u1 = u;
511             v1 = v;
512             a1 = a;
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             p += wrap3;
515             lum += wrap;
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 += u;
518             v1 += v;
519             a1 += a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523             cb++;
524             cr++;
525             p += -wrap3 + BPP;
526             lum += -wrap + 1;
527         }
528         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535             YUVA_IN(y, u, v, a, p + BPP, pal);
536             u1 += u;
537             v1 += v;
538             a1 += a;
539             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540             p += wrap3;
541             lum += wrap;
542
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554
555             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557
558             cb++;
559             cr++;
560             p += -wrap3 + 2 * BPP;
561             lum += -wrap + 2;
562         }
563         if (w) {
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 = u;
566             v1 = v;
567             a1 = a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             p += wrap3;
570             lum += wrap;
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 += u;
573             v1 += v;
574             a1 += a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578             cb++;
579             cr++;
580             p += -wrap3 + BPP;
581             lum += -wrap + 1;
582         }
583         p += wrap3 + (wrap3 - dstw * BPP);
584         lum += wrap + (wrap - dstw - dstx);
585         cb += dst->linesize[1] - width2 - skip2;
586         cr += dst->linesize[2] - width2 - skip2;
587     }
588     /* handle odd height */
589     if (h) {
590         lum += dstx;
591         cb += skip2;
592         cr += skip2;
593
594         if (dstx & 1) {
595             YUVA_IN(y, u, v, a, p, pal);
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599             cb++;
600             cr++;
601             lum++;
602             p += BPP;
603         }
604         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605             YUVA_IN(y, u, v, a, p, pal);
606             u1 = u;
607             v1 = v;
608             a1 = a;
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610
611             YUVA_IN(y, u, v, a, p + BPP, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618             cb++;
619             cr++;
620             p += 2 * BPP;
621             lum += 2;
622         }
623         if (w) {
624             YUVA_IN(y, u, v, a, p, pal);
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628         }
629     }
630 }
631
632 static void free_subpicture(SubPicture *sp)
633 {
634     avsubtitle_free(&sp->sub);
635 }
636
637 static void video_image_display(VideoState *is)
638 {
639     VideoPicture *vp;
640     SubPicture *sp;
641     AVPicture pict;
642     float aspect_ratio;
643     int width, height, x, y;
644     SDL_Rect rect;
645     int i;
646
647     vp = &is->pictq[is->pictq_rindex];
648     if (vp->bmp) {
649 #if CONFIG_AVFILTER
650          if (vp->picref->video->sample_aspect_ratio.num == 0)
651              aspect_ratio = 0;
652          else
653              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
654 #else
655
656         /* XXX: use variable in the frame */
657         if (is->video_st->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
659         else if (is->video_st->codec->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
661         else
662             aspect_ratio = 0;
663 #endif
664         if (aspect_ratio <= 0.0)
665             aspect_ratio = 1.0;
666         aspect_ratio *= (float)vp->width / (float)vp->height;
667
668         if (is->subtitle_st) {
669             if (is->subpq_size > 0) {
670                 sp = &is->subpq[is->subpq_rindex];
671
672                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
673                     SDL_LockYUVOverlay (vp->bmp);
674
675                     pict.data[0] = vp->bmp->pixels[0];
676                     pict.data[1] = vp->bmp->pixels[2];
677                     pict.data[2] = vp->bmp->pixels[1];
678
679                     pict.linesize[0] = vp->bmp->pitches[0];
680                     pict.linesize[1] = vp->bmp->pitches[2];
681                     pict.linesize[2] = vp->bmp->pitches[1];
682
683                     for (i = 0; i < sp->sub.num_rects; i++)
684                         blend_subrect(&pict, sp->sub.rects[i],
685                                       vp->bmp->w, vp->bmp->h);
686
687                     SDL_UnlockYUVOverlay (vp->bmp);
688                 }
689             }
690         }
691
692
693         /* XXX: we suppose the screen has a 1.0 pixel ratio */
694         height = is->height;
695         width = ((int)rint(height * aspect_ratio)) & ~1;
696         if (width > is->width) {
697             width = is->width;
698             height = ((int)rint(width / aspect_ratio)) & ~1;
699         }
700         x = (is->width - width) / 2;
701         y = (is->height - height) / 2;
702         is->no_background = 0;
703         rect.x = is->xleft + x;
704         rect.y = is->ytop  + y;
705         rect.w = FFMAX(width,  1);
706         rect.h = FFMAX(height, 1);
707         SDL_DisplayYUVOverlay(vp->bmp, &rect);
708     }
709 }
710
711 /* get the current audio output buffer size, in samples. With SDL, we
712    cannot have a precise information */
713 static int audio_write_get_buf_size(VideoState *is)
714 {
715     return is->audio_buf_size - is->audio_buf_index;
716 }
717
718 static inline int compute_mod(int a, int b)
719 {
720     return a < 0 ? a%b + b : a%b;
721 }
722
723 static void video_audio_display(VideoState *s)
724 {
725     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
726     int ch, channels, h, h2, bgcolor, fgcolor;
727     int16_t time_diff;
728     int rdft_bits, nb_freq;
729
730     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
731         ;
732     nb_freq= 1<<(rdft_bits-1);
733
734     /* compute display index : center on currently output samples */
735     channels = s->audio_st->codec->channels;
736     nb_display_channels = channels;
737     if (!s->paused) {
738         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
739         n = 2 * channels;
740         delay = audio_write_get_buf_size(s);
741         delay /= n;
742
743         /* to be more precise, we take into account the time spent since
744            the last buffer computation */
745         if (audio_callback_time) {
746             time_diff = av_gettime() - audio_callback_time;
747             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
748         }
749
750         delay += 2*data_used;
751         if (delay < data_used)
752             delay = data_used;
753
754         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
755         if (s->show_mode == SHOW_MODE_WAVES) {
756             h= INT_MIN;
757             for(i=0; i<1000; i+=channels){
758                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
759                 int a= s->sample_array[idx];
760                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
761                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
762                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
763                 int score= a-d;
764                 if(h<score && (b^c)<0){
765                     h= score;
766                     i_start= idx;
767                 }
768             }
769         }
770
771         s->last_i_start = i_start;
772     } else {
773         i_start = s->last_i_start;
774     }
775
776     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
777     if (s->show_mode == SHOW_MODE_WAVES) {
778         fill_rectangle(screen,
779                        s->xleft, s->ytop, s->width, s->height,
780                        bgcolor);
781
782         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
783
784         /* total height for one channel */
785         h = s->height / nb_display_channels;
786         /* graph height / 2 */
787         h2 = (h * 9) / 20;
788         for(ch = 0;ch < nb_display_channels; ch++) {
789             i = i_start + ch;
790             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
791             for(x = 0; x < s->width; x++) {
792                 y = (s->sample_array[i] * h2) >> 15;
793                 if (y < 0) {
794                     y = -y;
795                     ys = y1 - y;
796                 } else {
797                     ys = y1;
798                 }
799                 fill_rectangle(screen,
800                                s->xleft + x, ys, 1, y,
801                                fgcolor);
802                 i += channels;
803                 if (i >= SAMPLE_ARRAY_SIZE)
804                     i -= SAMPLE_ARRAY_SIZE;
805             }
806         }
807
808         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
809
810         for(ch = 1;ch < nb_display_channels; ch++) {
811             y = s->ytop + ch * h;
812             fill_rectangle(screen,
813                            s->xleft, y, s->width, 1,
814                            fgcolor);
815         }
816         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
817     }else{
818         nb_display_channels= FFMIN(nb_display_channels, 2);
819         if(rdft_bits != s->rdft_bits){
820             av_rdft_end(s->rdft);
821             av_free(s->rdft_data);
822             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
823             s->rdft_bits= rdft_bits;
824             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
825         }
826         {
827             FFTSample *data[2];
828             for(ch = 0;ch < nb_display_channels; ch++) {
829                 data[ch] = s->rdft_data + 2*nb_freq*ch;
830                 i = i_start + ch;
831                 for(x = 0; x < 2*nb_freq; x++) {
832                     double w= (x-nb_freq)*(1.0/nb_freq);
833                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
834                     i += channels;
835                     if (i >= SAMPLE_ARRAY_SIZE)
836                         i -= SAMPLE_ARRAY_SIZE;
837                 }
838                 av_rdft_calc(s->rdft, data[ch]);
839             }
840             //least efficient way to do this, we should of course directly access it but its more than fast enough
841             for(y=0; y<s->height; y++){
842                 double w= 1/sqrt(nb_freq);
843                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
844                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
845                        + data[1][2*y+1]*data[1][2*y+1])) : a;
846                 a= FFMIN(a,255);
847                 b= FFMIN(b,255);
848                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
849
850                 fill_rectangle(screen,
851                             s->xpos, s->height-y, 1, 1,
852                             fgcolor);
853             }
854         }
855         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
856         s->xpos++;
857         if(s->xpos >= s->width)
858             s->xpos= s->xleft;
859     }
860 }
861
862 static void stream_close(VideoState *is)
863 {
864     VideoPicture *vp;
865     int i;
866     /* XXX: use a special url_shutdown call to abort parse cleanly */
867     is->abort_request = 1;
868     SDL_WaitThread(is->read_tid, NULL);
869     SDL_WaitThread(is->refresh_tid, NULL);
870
871     /* free all pictures */
872     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
873         vp = &is->pictq[i];
874 #if CONFIG_AVFILTER
875         if (vp->picref) {
876             avfilter_unref_buffer(vp->picref);
877             vp->picref = NULL;
878         }
879 #endif
880         if (vp->bmp) {
881             SDL_FreeYUVOverlay(vp->bmp);
882             vp->bmp = NULL;
883         }
884     }
885     SDL_DestroyMutex(is->pictq_mutex);
886     SDL_DestroyCond(is->pictq_cond);
887     SDL_DestroyMutex(is->subpq_mutex);
888     SDL_DestroyCond(is->subpq_cond);
889 #if !CONFIG_AVFILTER
890     if (is->img_convert_ctx)
891         sws_freeContext(is->img_convert_ctx);
892 #endif
893     av_free(is);
894 }
895
896 static void do_exit(void)
897 {
898     if (cur_stream) {
899         stream_close(cur_stream);
900         cur_stream = NULL;
901     }
902     uninit_opts();
903 #if CONFIG_AVFILTER
904     avfilter_uninit();
905 #endif
906     if (show_status)
907         printf("\n");
908     SDL_Quit();
909     av_log(NULL, AV_LOG_QUIET, "%s", "");
910     exit(0);
911 }
912
913 static int video_open(VideoState *is){
914     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
915     int w,h;
916
917     if(is_full_screen) flags |= SDL_FULLSCREEN;
918     else               flags |= SDL_RESIZABLE;
919
920     if (is_full_screen && fs_screen_width) {
921         w = fs_screen_width;
922         h = fs_screen_height;
923     } else if(!is_full_screen && screen_width){
924         w = screen_width;
925         h = screen_height;
926 #if CONFIG_AVFILTER
927     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
928         w = is->out_video_filter->inputs[0]->w;
929         h = is->out_video_filter->inputs[0]->h;
930 #else
931     }else if (is->video_st && is->video_st->codec->width){
932         w = is->video_st->codec->width;
933         h = is->video_st->codec->height;
934 #endif
935     } else {
936         w = 640;
937         h = 480;
938     }
939     if(screen && is->width == screen->w && screen->w == w
940        && is->height== screen->h && screen->h == h)
941         return 0;
942
943 #ifndef __APPLE__
944     screen = SDL_SetVideoMode(w, h, 0, flags);
945 #else
946     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
947     screen = SDL_SetVideoMode(w, h, 24, flags);
948 #endif
949     if (!screen) {
950         fprintf(stderr, "SDL: could not set video mode - exiting\n");
951         do_exit();
952     }
953     if (!window_title)
954         window_title = input_filename;
955     SDL_WM_SetCaption(window_title, window_title);
956
957     is->width = screen->w;
958     is->height = screen->h;
959
960     return 0;
961 }
962
963 /* display the current picture, if any */
964 static void video_display(VideoState *is)
965 {
966     if(!screen)
967         video_open(cur_stream);
968     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
969         video_audio_display(is);
970     else if (is->video_st)
971         video_image_display(is);
972 }
973
974 static int refresh_thread(void *opaque)
975 {
976     VideoState *is= opaque;
977     while(!is->abort_request){
978         SDL_Event event;
979         event.type = FF_REFRESH_EVENT;
980         event.user.data1 = opaque;
981         if(!is->refresh){
982             is->refresh=1;
983             SDL_PushEvent(&event);
984         }
985         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
986         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
987     }
988     return 0;
989 }
990
991 /* get the current audio clock value */
992 static double get_audio_clock(VideoState *is)
993 {
994     double pts;
995     int hw_buf_size, bytes_per_sec;
996     pts = is->audio_clock;
997     hw_buf_size = audio_write_get_buf_size(is);
998     bytes_per_sec = 0;
999     if (is->audio_st) {
1000         bytes_per_sec = is->audio_st->codec->sample_rate *
1001             2 * is->audio_st->codec->channels;
1002     }
1003     if (bytes_per_sec)
1004         pts -= (double)hw_buf_size / bytes_per_sec;
1005     return pts;
1006 }
1007
1008 /* get the current video clock value */
1009 static double get_video_clock(VideoState *is)
1010 {
1011     if (is->paused) {
1012         return is->video_current_pts;
1013     } else {
1014         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1015     }
1016 }
1017
1018 /* get the current external clock value */
1019 static double get_external_clock(VideoState *is)
1020 {
1021     int64_t ti;
1022     ti = av_gettime();
1023     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1024 }
1025
1026 /* get the current master clock value */
1027 static double get_master_clock(VideoState *is)
1028 {
1029     double val;
1030
1031     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1032         if (is->video_st)
1033             val = get_video_clock(is);
1034         else
1035             val = get_audio_clock(is);
1036     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1037         if (is->audio_st)
1038             val = get_audio_clock(is);
1039         else
1040             val = get_video_clock(is);
1041     } else {
1042         val = get_external_clock(is);
1043     }
1044     return val;
1045 }
1046
1047 /* seek in the stream */
1048 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1049 {
1050     if (!is->seek_req) {
1051         is->seek_pos = pos;
1052         is->seek_rel = rel;
1053         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1054         if (seek_by_bytes)
1055             is->seek_flags |= AVSEEK_FLAG_BYTE;
1056         is->seek_req = 1;
1057     }
1058 }
1059
1060 /* pause or resume the video */
1061 static void stream_toggle_pause(VideoState *is)
1062 {
1063     if (is->paused) {
1064         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1065         if(is->read_pause_return != AVERROR(ENOSYS)){
1066             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1067         }
1068         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1069     }
1070     is->paused = !is->paused;
1071 }
1072
1073 static double compute_target_time(double frame_current_pts, VideoState *is)
1074 {
1075     double delay, sync_threshold, diff;
1076
1077     /* compute nominal delay */
1078     delay = frame_current_pts - is->frame_last_pts;
1079     if (delay <= 0 || delay >= 10.0) {
1080         /* if incorrect delay, use previous one */
1081         delay = is->frame_last_delay;
1082     } else {
1083         is->frame_last_delay = delay;
1084     }
1085     is->frame_last_pts = frame_current_pts;
1086
1087     /* update delay to follow master synchronisation source */
1088     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1089          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1090         /* if video is slave, we try to correct big delays by
1091            duplicating or deleting a frame */
1092         diff = get_video_clock(is) - get_master_clock(is);
1093
1094         /* skip or repeat frame. We take into account the
1095            delay to compute the threshold. I still don't know
1096            if it is the best guess */
1097         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1098         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1099             if (diff <= -sync_threshold)
1100                 delay = 0;
1101             else if (diff >= sync_threshold)
1102                 delay = 2 * delay;
1103         }
1104     }
1105     is->frame_timer += delay;
1106
1107     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1108             delay, frame_current_pts, -diff);
1109
1110     return is->frame_timer;
1111 }
1112
1113 /* called to display each frame */
1114 static void video_refresh(void *opaque)
1115 {
1116     VideoState *is = opaque;
1117     VideoPicture *vp;
1118
1119     SubPicture *sp, *sp2;
1120
1121     if (is->video_st) {
1122 retry:
1123         if (is->pictq_size == 0) {
1124             //nothing to do, no picture to display in the que
1125         } else {
1126             double time= av_gettime()/1000000.0;
1127             double next_target;
1128             /* dequeue the picture */
1129             vp = &is->pictq[is->pictq_rindex];
1130
1131             if(time < vp->target_clock)
1132                 return;
1133             /* update current video pts */
1134             is->video_current_pts = vp->pts;
1135             is->video_current_pts_drift = is->video_current_pts - time;
1136             is->video_current_pos = vp->pos;
1137             if(is->pictq_size > 1){
1138                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1139                 assert(nextvp->target_clock >= vp->target_clock);
1140                 next_target= nextvp->target_clock;
1141             }else{
1142                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1143             }
1144             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1145                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1146                 if(is->pictq_size > 1 || time > next_target + 0.5){
1147                     /* update queue size and signal for next picture */
1148                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1149                         is->pictq_rindex = 0;
1150
1151                     SDL_LockMutex(is->pictq_mutex);
1152                     is->pictq_size--;
1153                     SDL_CondSignal(is->pictq_cond);
1154                     SDL_UnlockMutex(is->pictq_mutex);
1155                     goto retry;
1156                 }
1157             }
1158
1159             if(is->subtitle_st) {
1160                 if (is->subtitle_stream_changed) {
1161                     SDL_LockMutex(is->subpq_mutex);
1162
1163                     while (is->subpq_size) {
1164                         free_subpicture(&is->subpq[is->subpq_rindex]);
1165
1166                         /* update queue size and signal for next picture */
1167                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1168                             is->subpq_rindex = 0;
1169
1170                         is->subpq_size--;
1171                     }
1172                     is->subtitle_stream_changed = 0;
1173
1174                     SDL_CondSignal(is->subpq_cond);
1175                     SDL_UnlockMutex(is->subpq_mutex);
1176                 } else {
1177                     if (is->subpq_size > 0) {
1178                         sp = &is->subpq[is->subpq_rindex];
1179
1180                         if (is->subpq_size > 1)
1181                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1182                         else
1183                             sp2 = NULL;
1184
1185                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1186                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1187                         {
1188                             free_subpicture(sp);
1189
1190                             /* update queue size and signal for next picture */
1191                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1192                                 is->subpq_rindex = 0;
1193
1194                             SDL_LockMutex(is->subpq_mutex);
1195                             is->subpq_size--;
1196                             SDL_CondSignal(is->subpq_cond);
1197                             SDL_UnlockMutex(is->subpq_mutex);
1198                         }
1199                     }
1200                 }
1201             }
1202
1203             /* display picture */
1204             if (!display_disable)
1205                 video_display(is);
1206
1207             /* update queue size and signal for next picture */
1208             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1209                 is->pictq_rindex = 0;
1210
1211             SDL_LockMutex(is->pictq_mutex);
1212             is->pictq_size--;
1213             SDL_CondSignal(is->pictq_cond);
1214             SDL_UnlockMutex(is->pictq_mutex);
1215         }
1216     } else if (is->audio_st) {
1217         /* draw the next audio frame */
1218
1219         /* if only audio stream, then display the audio bars (better
1220            than nothing, just to test the implementation */
1221
1222         /* display picture */
1223         if (!display_disable)
1224             video_display(is);
1225     }
1226     if (show_status) {
1227         static int64_t last_time;
1228         int64_t cur_time;
1229         int aqsize, vqsize, sqsize;
1230         double av_diff;
1231
1232         cur_time = av_gettime();
1233         if (!last_time || (cur_time - last_time) >= 30000) {
1234             aqsize = 0;
1235             vqsize = 0;
1236             sqsize = 0;
1237             if (is->audio_st)
1238                 aqsize = is->audioq.size;
1239             if (is->video_st)
1240                 vqsize = is->videoq.size;
1241             if (is->subtitle_st)
1242                 sqsize = is->subtitleq.size;
1243             av_diff = 0;
1244             if (is->audio_st && is->video_st)
1245                 av_diff = get_audio_clock(is) - get_video_clock(is);
1246             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1247                    get_master_clock(is),
1248                    av_diff,
1249                    FFMAX(is->skip_frames-1, 0),
1250                    aqsize / 1024,
1251                    vqsize / 1024,
1252                    sqsize,
1253                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1254                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1255             fflush(stdout);
1256             last_time = cur_time;
1257         }
1258     }
1259 }
1260
1261 /* allocate a picture (needs to do that in main thread to avoid
1262    potential locking problems */
1263 static void alloc_picture(void *opaque)
1264 {
1265     VideoState *is = opaque;
1266     VideoPicture *vp;
1267
1268     vp = &is->pictq[is->pictq_windex];
1269
1270     if (vp->bmp)
1271         SDL_FreeYUVOverlay(vp->bmp);
1272
1273 #if CONFIG_AVFILTER
1274     if (vp->picref)
1275         avfilter_unref_buffer(vp->picref);
1276     vp->picref = NULL;
1277
1278     vp->width   = is->out_video_filter->inputs[0]->w;
1279     vp->height  = is->out_video_filter->inputs[0]->h;
1280     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1281 #else
1282     vp->width   = is->video_st->codec->width;
1283     vp->height  = is->video_st->codec->height;
1284     vp->pix_fmt = is->video_st->codec->pix_fmt;
1285 #endif
1286
1287     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1288                                    SDL_YV12_OVERLAY,
1289                                    screen);
1290     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1291         /* SDL allocates a buffer smaller than requested if the video
1292          * overlay hardware is unable to support the requested size. */
1293         fprintf(stderr, "Error: the video system does not support an image\n"
1294                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1295                         "to reduce the image size.\n", vp->width, vp->height );
1296         do_exit();
1297     }
1298
1299     SDL_LockMutex(is->pictq_mutex);
1300     vp->allocated = 1;
1301     SDL_CondSignal(is->pictq_cond);
1302     SDL_UnlockMutex(is->pictq_mutex);
1303 }
1304
1305 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1306 {
1307     VideoPicture *vp;
1308     double frame_delay, pts = pts1;
1309
1310     /* compute the exact PTS for the picture if it is omitted in the stream
1311      * pts1 is the dts of the pkt / pts of the frame */
1312     if (pts != 0) {
1313         /* update video clock with pts, if present */
1314         is->video_clock = pts;
1315     } else {
1316         pts = is->video_clock;
1317     }
1318     /* update video clock for next frame */
1319     frame_delay = av_q2d(is->video_st->codec->time_base);
1320     /* for MPEG2, the frame can be repeated, so we update the
1321        clock accordingly */
1322     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1323     is->video_clock += frame_delay;
1324
1325 #if defined(DEBUG_SYNC) && 0
1326     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1327            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1328 #endif
1329
1330     /* wait until we have space to put a new picture */
1331     SDL_LockMutex(is->pictq_mutex);
1332
1333     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1334         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1335
1336     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1337            !is->videoq.abort_request) {
1338         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1339     }
1340     SDL_UnlockMutex(is->pictq_mutex);
1341
1342     if (is->videoq.abort_request)
1343         return -1;
1344
1345     vp = &is->pictq[is->pictq_windex];
1346
1347     /* alloc or resize hardware picture buffer */
1348     if (!vp->bmp ||
1349 #if CONFIG_AVFILTER
1350         vp->width  != is->out_video_filter->inputs[0]->w ||
1351         vp->height != is->out_video_filter->inputs[0]->h) {
1352 #else
1353         vp->width != is->video_st->codec->width ||
1354         vp->height != is->video_st->codec->height) {
1355 #endif
1356         SDL_Event event;
1357
1358         vp->allocated = 0;
1359
1360         /* the allocation must be done in the main thread to avoid
1361            locking problems */
1362         event.type = FF_ALLOC_EVENT;
1363         event.user.data1 = is;
1364         SDL_PushEvent(&event);
1365
1366         /* wait until the picture is allocated */
1367         SDL_LockMutex(is->pictq_mutex);
1368         while (!vp->allocated && !is->videoq.abort_request) {
1369             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370         }
1371         SDL_UnlockMutex(is->pictq_mutex);
1372
1373         if (is->videoq.abort_request)
1374             return -1;
1375     }
1376
1377     /* if the frame is not skipped, then display it */
1378     if (vp->bmp) {
1379         AVPicture pict;
1380 #if CONFIG_AVFILTER
1381         if(vp->picref)
1382             avfilter_unref_buffer(vp->picref);
1383         vp->picref = src_frame->opaque;
1384 #endif
1385
1386         /* get a pointer on the bitmap */
1387         SDL_LockYUVOverlay (vp->bmp);
1388
1389         memset(&pict,0,sizeof(AVPicture));
1390         pict.data[0] = vp->bmp->pixels[0];
1391         pict.data[1] = vp->bmp->pixels[2];
1392         pict.data[2] = vp->bmp->pixels[1];
1393
1394         pict.linesize[0] = vp->bmp->pitches[0];
1395         pict.linesize[1] = vp->bmp->pitches[2];
1396         pict.linesize[2] = vp->bmp->pitches[1];
1397
1398 #if CONFIG_AVFILTER
1399         //FIXME use direct rendering
1400         av_picture_copy(&pict, (AVPicture *)src_frame,
1401                         vp->pix_fmt, vp->width, vp->height);
1402 #else
1403         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1407         if (is->img_convert_ctx == NULL) {
1408             fprintf(stderr, "Cannot initialize the conversion context\n");
1409             exit(1);
1410         }
1411         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412                   0, vp->height, pict.data, pict.linesize);
1413 #endif
1414         /* update the bitmap content */
1415         SDL_UnlockYUVOverlay(vp->bmp);
1416
1417         vp->pts = pts;
1418         vp->pos = pos;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         vp->target_clock= compute_target_time(vp->pts, is);
1425
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1433 {
1434     int len1 av_unused, got_picture, i;
1435
1436     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1437         return -1;
1438
1439     if (pkt->data == flush_pkt.data) {
1440         avcodec_flush_buffers(is->video_st->codec);
1441
1442         SDL_LockMutex(is->pictq_mutex);
1443         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1444         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1445             is->pictq[i].target_clock= 0;
1446         }
1447         while (is->pictq_size && !is->videoq.abort_request) {
1448             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1449         }
1450         is->video_current_pos = -1;
1451         SDL_UnlockMutex(is->pictq_mutex);
1452
1453         is->frame_last_pts = AV_NOPTS_VALUE;
1454         is->frame_last_delay = 0;
1455         is->frame_timer = (double)av_gettime() / 1000000.0;
1456         is->skip_frames = 1;
1457         is->skip_frames_index = 0;
1458         return 0;
1459     }
1460
1461     len1 = avcodec_decode_video2(is->video_st->codec,
1462                                  frame, &got_picture,
1463                                  pkt);
1464
1465     if (got_picture) {
1466         if (decoder_reorder_pts == -1) {
1467             *pts = frame->best_effort_timestamp;
1468         } else if (decoder_reorder_pts) {
1469             *pts = frame->pkt_pts;
1470         } else {
1471             *pts = frame->pkt_dts;
1472         }
1473
1474         if (*pts == AV_NOPTS_VALUE) {
1475             *pts = 0;
1476         }
1477
1478         is->skip_frames_index += 1;
1479         if(is->skip_frames_index >= is->skip_frames){
1480             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1481             return 1;
1482         }
1483
1484     }
1485     return 0;
1486 }
1487
1488 #if CONFIG_AVFILTER
1489 typedef struct {
1490     VideoState *is;
1491     AVFrame *frame;
1492     int use_dr1;
1493 } FilterPriv;
1494
1495 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1496 {
1497     AVFilterContext *ctx = codec->opaque;
1498     AVFilterBufferRef  *ref;
1499     int perms = AV_PERM_WRITE;
1500     int i, w, h, stride[4];
1501     unsigned edge;
1502     int pixel_size;
1503
1504     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1505
1506     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1507         perms |= AV_PERM_NEG_LINESIZES;
1508
1509     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1511         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1512         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1513     }
1514     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1515
1516     w = codec->width;
1517     h = codec->height;
1518
1519     if(av_image_check_size(w, h, 0, codec))
1520         return -1;
1521
1522     avcodec_align_dimensions2(codec, &w, &h, stride);
1523     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1524     w += edge << 1;
1525     h += edge << 1;
1526
1527     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1528         return -1;
1529
1530     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1531     ref->video->w = codec->width;
1532     ref->video->h = codec->height;
1533     for(i = 0; i < 4; i ++) {
1534         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1535         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1536
1537         if (ref->data[i]) {
1538             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1539         }
1540         pic->data[i]     = ref->data[i];
1541         pic->linesize[i] = ref->linesize[i];
1542     }
1543     pic->opaque = ref;
1544     pic->age    = INT_MAX;
1545     pic->type   = FF_BUFFER_TYPE_USER;
1546     pic->reordered_opaque = codec->reordered_opaque;
1547     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1548     else           pic->pkt_pts = AV_NOPTS_VALUE;
1549     return 0;
1550 }
1551
1552 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1553 {
1554     memset(pic->data, 0, sizeof(pic->data));
1555     avfilter_unref_buffer(pic->opaque);
1556 }
1557
1558 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1559 {
1560     AVFilterBufferRef *ref = pic->opaque;
1561
1562     if (pic->data[0] == NULL) {
1563         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1564         return codec->get_buffer(codec, pic);
1565     }
1566
1567     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1568         (codec->pix_fmt != ref->format)) {
1569         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1570         return -1;
1571     }
1572
1573     pic->reordered_opaque = codec->reordered_opaque;
1574     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1575     else           pic->pkt_pts = AV_NOPTS_VALUE;
1576     return 0;
1577 }
1578
1579 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1580 {
1581     FilterPriv *priv = ctx->priv;
1582     AVCodecContext *codec;
1583     if(!opaque) return -1;
1584
1585     priv->is = opaque;
1586     codec    = priv->is->video_st->codec;
1587     codec->opaque = ctx;
1588     if((codec->codec->capabilities & CODEC_CAP_DR1)
1589     ) {
1590         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1591         priv->use_dr1 = 1;
1592         codec->get_buffer     = input_get_buffer;
1593         codec->release_buffer = input_release_buffer;
1594         codec->reget_buffer   = input_reget_buffer;
1595         codec->thread_safe_callbacks = 1;
1596     }
1597
1598     priv->frame = avcodec_alloc_frame();
1599
1600     return 0;
1601 }
1602
1603 static void input_uninit(AVFilterContext *ctx)
1604 {
1605     FilterPriv *priv = ctx->priv;
1606     av_free(priv->frame);
1607 }
1608
1609 static int input_request_frame(AVFilterLink *link)
1610 {
1611     FilterPriv *priv = link->src->priv;
1612     AVFilterBufferRef *picref;
1613     int64_t pts = 0;
1614     AVPacket pkt;
1615     int ret;
1616
1617     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1618         av_free_packet(&pkt);
1619     if (ret < 0)
1620         return -1;
1621
1622     if(priv->use_dr1 && priv->frame->opaque) {
1623         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1624     } else {
1625         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1626         av_image_copy(picref->data, picref->linesize,
1627                       priv->frame->data, priv->frame->linesize,
1628                       picref->format, link->w, link->h);
1629     }
1630     av_free_packet(&pkt);
1631
1632     avfilter_copy_frame_props(picref, priv->frame);
1633     picref->pts = pts;
1634
1635     avfilter_start_frame(link, picref);
1636     avfilter_draw_slice(link, 0, link->h, 1);
1637     avfilter_end_frame(link);
1638
1639     return 0;
1640 }
1641
1642 static int input_query_formats(AVFilterContext *ctx)
1643 {
1644     FilterPriv *priv = ctx->priv;
1645     enum PixelFormat pix_fmts[] = {
1646         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1647     };
1648
1649     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1650     return 0;
1651 }
1652
1653 static int input_config_props(AVFilterLink *link)
1654 {
1655     FilterPriv *priv  = link->src->priv;
1656     AVCodecContext *c = priv->is->video_st->codec;
1657
1658     link->w = c->width;
1659     link->h = c->height;
1660     link->time_base = priv->is->video_st->time_base;
1661
1662     return 0;
1663 }
1664
1665 static AVFilter input_filter =
1666 {
1667     .name      = "ffplay_input",
1668
1669     .priv_size = sizeof(FilterPriv),
1670
1671     .init      = input_init,
1672     .uninit    = input_uninit,
1673
1674     .query_formats = input_query_formats,
1675
1676     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1677     .outputs   = (AVFilterPad[]) {{ .name = "default",
1678                                     .type = AVMEDIA_TYPE_VIDEO,
1679                                     .request_frame = input_request_frame,
1680                                     .config_props  = input_config_props, },
1681                                   { .name = NULL }},
1682 };
1683
1684 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1685 {
1686     char sws_flags_str[128];
1687     int ret;
1688     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1689     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1690     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1691     graph->scale_sws_opts = av_strdup(sws_flags_str);
1692
1693     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1694                                             NULL, is, graph)) < 0)
1695         goto the_end;
1696     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1697                                             NULL, pix_fmts, graph)) < 0)
1698         goto the_end;
1699
1700     if(vfilters) {
1701         AVFilterInOut *outputs = avfilter_inout_alloc();
1702         AVFilterInOut *inputs  = avfilter_inout_alloc();
1703
1704         outputs->name    = av_strdup("in");
1705         outputs->filter_ctx = filt_src;
1706         outputs->pad_idx = 0;
1707         outputs->next    = NULL;
1708
1709         inputs->name    = av_strdup("out");
1710         inputs->filter_ctx = filt_out;
1711         inputs->pad_idx = 0;
1712         inputs->next    = NULL;
1713
1714         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1715             goto the_end;
1716         av_freep(&vfilters);
1717     } else {
1718         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1719             goto the_end;
1720     }
1721
1722     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1723         goto the_end;
1724
1725     is->out_video_filter = filt_out;
1726 the_end:
1727     return ret;
1728 }
1729
1730 #endif  /* CONFIG_AVFILTER */
1731
1732 static int video_thread(void *arg)
1733 {
1734     VideoState *is = arg;
1735     AVFrame *frame= avcodec_alloc_frame();
1736     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1737     double pts;
1738     int ret;
1739
1740 #if CONFIG_AVFILTER
1741     AVFilterGraph *graph = avfilter_graph_alloc();
1742     AVFilterContext *filt_out = NULL;
1743
1744     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1745         goto the_end;
1746     filt_out = is->out_video_filter;
1747 #endif
1748
1749     for(;;) {
1750 #if !CONFIG_AVFILTER
1751         AVPacket pkt;
1752 #else
1753         AVFilterBufferRef *picref;
1754         AVRational tb = filt_out->inputs[0]->time_base;
1755 #endif
1756         while (is->paused && !is->videoq.abort_request)
1757             SDL_Delay(10);
1758 #if CONFIG_AVFILTER
1759         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1760         if (picref) {
1761             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1762             pts_int = picref->pts;
1763             pos     = picref->pos;
1764             frame->opaque = picref;
1765         }
1766
1767         if (av_cmp_q(tb, is->video_st->time_base)) {
1768             av_unused int64_t pts1 = pts_int;
1769             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1770             av_dlog(NULL, "video_thread(): "
1771                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1772                     tb.num, tb.den, pts1,
1773                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1774         }
1775 #else
1776         ret = get_video_frame(is, frame, &pts_int, &pkt);
1777         pos = pkt.pos;
1778         av_free_packet(&pkt);
1779 #endif
1780
1781         if (ret < 0) goto the_end;
1782
1783         if (!picref)
1784             continue;
1785
1786         pts = pts_int*av_q2d(is->video_st->time_base);
1787
1788         ret = queue_picture(is, frame, pts, pos);
1789
1790         if (ret < 0)
1791             goto the_end;
1792
1793         if (step)
1794             if (cur_stream)
1795                 stream_toggle_pause(cur_stream);
1796     }
1797  the_end:
1798 #if CONFIG_AVFILTER
1799     avfilter_graph_free(&graph);
1800 #endif
1801     av_free(frame);
1802     return 0;
1803 }
1804
1805 static int subtitle_thread(void *arg)
1806 {
1807     VideoState *is = arg;
1808     SubPicture *sp;
1809     AVPacket pkt1, *pkt = &pkt1;
1810     int len1 av_unused, got_subtitle;
1811     double pts;
1812     int i, j;
1813     int r, g, b, y, u, v, a;
1814
1815     for(;;) {
1816         while (is->paused && !is->subtitleq.abort_request) {
1817             SDL_Delay(10);
1818         }
1819         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1820             break;
1821
1822         if(pkt->data == flush_pkt.data){
1823             avcodec_flush_buffers(is->subtitle_st->codec);
1824             continue;
1825         }
1826         SDL_LockMutex(is->subpq_mutex);
1827         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1828                !is->subtitleq.abort_request) {
1829             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1830         }
1831         SDL_UnlockMutex(is->subpq_mutex);
1832
1833         if (is->subtitleq.abort_request)
1834             goto the_end;
1835
1836         sp = &is->subpq[is->subpq_windex];
1837
1838        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1839            this packet, if any */
1840         pts = 0;
1841         if (pkt->pts != AV_NOPTS_VALUE)
1842             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1843
1844         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1845                                     &sp->sub, &got_subtitle,
1846                                     pkt);
1847         if (got_subtitle && sp->sub.format == 0) {
1848             sp->pts = pts;
1849
1850             for (i = 0; i < sp->sub.num_rects; i++)
1851             {
1852                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1853                 {
1854                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1855                     y = RGB_TO_Y_CCIR(r, g, b);
1856                     u = RGB_TO_U_CCIR(r, g, b, 0);
1857                     v = RGB_TO_V_CCIR(r, g, b, 0);
1858                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1859                 }
1860             }
1861
1862             /* now we can update the picture count */
1863             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1864                 is->subpq_windex = 0;
1865             SDL_LockMutex(is->subpq_mutex);
1866             is->subpq_size++;
1867             SDL_UnlockMutex(is->subpq_mutex);
1868         }
1869         av_free_packet(pkt);
1870     }
1871  the_end:
1872     return 0;
1873 }
1874
1875 /* copy samples for viewing in editor window */
1876 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1877 {
1878     int size, len;
1879
1880     size = samples_size / sizeof(short);
1881     while (size > 0) {
1882         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1883         if (len > size)
1884             len = size;
1885         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1886         samples += len;
1887         is->sample_array_index += len;
1888         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1889             is->sample_array_index = 0;
1890         size -= len;
1891     }
1892 }
1893
1894 /* return the new audio buffer size (samples can be added or deleted
1895    to get better sync if video or external master clock) */
1896 static int synchronize_audio(VideoState *is, short *samples,
1897                              int samples_size1, double pts)
1898 {
1899     int n, samples_size;
1900     double ref_clock;
1901
1902     n = 2 * is->audio_st->codec->channels;
1903     samples_size = samples_size1;
1904
1905     /* if not master, then we try to remove or add samples to correct the clock */
1906     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1907          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1908         double diff, avg_diff;
1909         int wanted_size, min_size, max_size, nb_samples;
1910
1911         ref_clock = get_master_clock(is);
1912         diff = get_audio_clock(is) - ref_clock;
1913
1914         if (diff < AV_NOSYNC_THRESHOLD) {
1915             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1916             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1917                 /* not enough measures to have a correct estimate */
1918                 is->audio_diff_avg_count++;
1919             } else {
1920                 /* estimate the A-V difference */
1921                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1922
1923                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1924                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1925                     nb_samples = samples_size / n;
1926
1927                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1928                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1929                     if (wanted_size < min_size)
1930                         wanted_size = min_size;
1931                     else if (wanted_size > max_size)
1932                         wanted_size = max_size;
1933
1934                     /* add or remove samples to correction the synchro */
1935                     if (wanted_size < samples_size) {
1936                         /* remove samples */
1937                         samples_size = wanted_size;
1938                     } else if (wanted_size > samples_size) {
1939                         uint8_t *samples_end, *q;
1940                         int nb;
1941
1942                         /* add samples */
1943                         nb = (samples_size - wanted_size);
1944                         samples_end = (uint8_t *)samples + samples_size - n;
1945                         q = samples_end + n;
1946                         while (nb > 0) {
1947                             memcpy(q, samples_end, n);
1948                             q += n;
1949                             nb -= n;
1950                         }
1951                         samples_size = wanted_size;
1952                     }
1953                 }
1954 #if 0
1955                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1956                        diff, avg_diff, samples_size - samples_size1,
1957                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1958 #endif
1959             }
1960         } else {
1961             /* too big difference : may be initial PTS errors, so
1962                reset A-V filter */
1963             is->audio_diff_avg_count = 0;
1964             is->audio_diff_cum = 0;
1965         }
1966     }
1967
1968     return samples_size;
1969 }
1970
1971 /* decode one audio frame and returns its uncompressed size */
1972 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1973 {
1974     AVPacket *pkt_temp = &is->audio_pkt_temp;
1975     AVPacket *pkt = &is->audio_pkt;
1976     AVCodecContext *dec= is->audio_st->codec;
1977     int n, len1, data_size;
1978     double pts;
1979
1980     for(;;) {
1981         /* NOTE: the audio packet can contain several frames */
1982         while (pkt_temp->size > 0) {
1983             data_size = sizeof(is->audio_buf1);
1984             len1 = avcodec_decode_audio3(dec,
1985                                         (int16_t *)is->audio_buf1, &data_size,
1986                                         pkt_temp);
1987             if (len1 < 0) {
1988                 /* if error, we skip the frame */
1989                 pkt_temp->size = 0;
1990                 break;
1991             }
1992
1993             pkt_temp->data += len1;
1994             pkt_temp->size -= len1;
1995             if (data_size <= 0)
1996                 continue;
1997
1998             if (dec->sample_fmt != is->audio_src_fmt) {
1999                 if (is->reformat_ctx)
2000                     av_audio_convert_free(is->reformat_ctx);
2001                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2002                                                          dec->sample_fmt, 1, NULL, 0);
2003                 if (!is->reformat_ctx) {
2004                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2005                         av_get_sample_fmt_name(dec->sample_fmt),
2006                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2007                         break;
2008                 }
2009                 is->audio_src_fmt= dec->sample_fmt;
2010             }
2011
2012             if (is->reformat_ctx) {
2013                 const void *ibuf[6]= {is->audio_buf1};
2014                 void *obuf[6]= {is->audio_buf2};
2015                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2016                 int ostride[6]= {2};
2017                 int len= data_size/istride[0];
2018                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2019                     printf("av_audio_convert() failed\n");
2020                     break;
2021                 }
2022                 is->audio_buf= is->audio_buf2;
2023                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2024                           remove this legacy cruft */
2025                 data_size= len*2;
2026             }else{
2027                 is->audio_buf= is->audio_buf1;
2028             }
2029
2030             /* if no pts, then compute it */
2031             pts = is->audio_clock;
2032             *pts_ptr = pts;
2033             n = 2 * dec->channels;
2034             is->audio_clock += (double)data_size /
2035                 (double)(n * dec->sample_rate);
2036 #ifdef DEBUG
2037             {
2038                 static double last_clock;
2039                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2040                        is->audio_clock - last_clock,
2041                        is->audio_clock, pts);
2042                 last_clock = is->audio_clock;
2043             }
2044 #endif
2045             return data_size;
2046         }
2047
2048         /* free the current packet */
2049         if (pkt->data)
2050             av_free_packet(pkt);
2051
2052         if (is->paused || is->audioq.abort_request) {
2053             return -1;
2054         }
2055
2056         /* read next packet */
2057         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2058             return -1;
2059         if(pkt->data == flush_pkt.data){
2060             avcodec_flush_buffers(dec);
2061             continue;
2062         }
2063
2064         pkt_temp->data = pkt->data;
2065         pkt_temp->size = pkt->size;
2066
2067         /* if update the audio clock with the pts */
2068         if (pkt->pts != AV_NOPTS_VALUE) {
2069             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2070         }
2071     }
2072 }
2073
2074 /* prepare a new audio buffer */
2075 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2076 {
2077     VideoState *is = opaque;
2078     int audio_size, len1;
2079     double pts;
2080
2081     audio_callback_time = av_gettime();
2082
2083     while (len > 0) {
2084         if (is->audio_buf_index >= is->audio_buf_size) {
2085            audio_size = audio_decode_frame(is, &pts);
2086            if (audio_size < 0) {
2087                 /* if error, just output silence */
2088                is->audio_buf = is->audio_buf1;
2089                is->audio_buf_size = 1024;
2090                memset(is->audio_buf, 0, is->audio_buf_size);
2091            } else {
2092                if (is->show_mode != SHOW_MODE_VIDEO)
2093                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2094                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2095                                               pts);
2096                is->audio_buf_size = audio_size;
2097            }
2098            is->audio_buf_index = 0;
2099         }
2100         len1 = is->audio_buf_size - is->audio_buf_index;
2101         if (len1 > len)
2102             len1 = len;
2103         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2104         len -= len1;
2105         stream += len1;
2106         is->audio_buf_index += len1;
2107     }
2108 }
2109
2110 /* open a given stream. Return 0 if OK */
2111 static int stream_component_open(VideoState *is, int stream_index)
2112 {
2113     AVFormatContext *ic = is->ic;
2114     AVCodecContext *avctx;
2115     AVCodec *codec;
2116     SDL_AudioSpec wanted_spec, spec;
2117
2118     if (stream_index < 0 || stream_index >= ic->nb_streams)
2119         return -1;
2120     avctx = ic->streams[stream_index]->codec;
2121
2122     /* prepare audio output */
2123     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2124         if (avctx->channels > 0) {
2125             avctx->request_channels = FFMIN(2, avctx->channels);
2126         } else {
2127             avctx->request_channels = 2;
2128         }
2129     }
2130
2131     codec = avcodec_find_decoder(avctx->codec_id);
2132     if (!codec)
2133         return -1;
2134
2135     avctx->workaround_bugs = workaround_bugs;
2136     avctx->lowres = lowres;
2137     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2138     avctx->idct_algo= idct;
2139     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2140     avctx->skip_frame= skip_frame;
2141     avctx->skip_idct= skip_idct;
2142     avctx->skip_loop_filter= skip_loop_filter;
2143     avctx->error_recognition= error_recognition;
2144     avctx->error_concealment= error_concealment;
2145     avctx->thread_count= thread_count;
2146
2147     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2148
2149     if(codec->capabilities & CODEC_CAP_DR1)
2150         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2151
2152     if (avcodec_open(avctx, codec) < 0)
2153         return -1;
2154
2155     /* prepare audio output */
2156     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2157         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2158             fprintf(stderr, "Invalid sample rate or channel count\n");
2159             return -1;
2160         }
2161         wanted_spec.freq = avctx->sample_rate;
2162         wanted_spec.format = AUDIO_S16SYS;
2163         wanted_spec.channels = avctx->channels;
2164         wanted_spec.silence = 0;
2165         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2166         wanted_spec.callback = sdl_audio_callback;
2167         wanted_spec.userdata = is;
2168         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2169             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2170             return -1;
2171         }
2172         is->audio_hw_buf_size = spec.size;
2173         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2174     }
2175
2176     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2177     switch(avctx->codec_type) {
2178     case AVMEDIA_TYPE_AUDIO:
2179         is->audio_stream = stream_index;
2180         is->audio_st = ic->streams[stream_index];
2181         is->audio_buf_size = 0;
2182         is->audio_buf_index = 0;
2183
2184         /* init averaging filter */
2185         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2186         is->audio_diff_avg_count = 0;
2187         /* since we do not have a precise anough audio fifo fullness,
2188            we correct audio sync only if larger than this threshold */
2189         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2190
2191         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2192         packet_queue_init(&is->audioq);
2193         SDL_PauseAudio(0);
2194         break;
2195     case AVMEDIA_TYPE_VIDEO:
2196         is->video_stream = stream_index;
2197         is->video_st = ic->streams[stream_index];
2198
2199         packet_queue_init(&is->videoq);
2200         is->video_tid = SDL_CreateThread(video_thread, is);
2201         break;
2202     case AVMEDIA_TYPE_SUBTITLE:
2203         is->subtitle_stream = stream_index;
2204         is->subtitle_st = ic->streams[stream_index];
2205         packet_queue_init(&is->subtitleq);
2206
2207         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2208         break;
2209     default:
2210         break;
2211     }
2212     return 0;
2213 }
2214
2215 static void stream_component_close(VideoState *is, int stream_index)
2216 {
2217     AVFormatContext *ic = is->ic;
2218     AVCodecContext *avctx;
2219
2220     if (stream_index < 0 || stream_index >= ic->nb_streams)
2221         return;
2222     avctx = ic->streams[stream_index]->codec;
2223
2224     switch(avctx->codec_type) {
2225     case AVMEDIA_TYPE_AUDIO:
2226         packet_queue_abort(&is->audioq);
2227
2228         SDL_CloseAudio();
2229
2230         packet_queue_end(&is->audioq);
2231         if (is->reformat_ctx)
2232             av_audio_convert_free(is->reformat_ctx);
2233         is->reformat_ctx = NULL;
2234         break;
2235     case AVMEDIA_TYPE_VIDEO:
2236         packet_queue_abort(&is->videoq);
2237
2238         /* note: we also signal this mutex to make sure we deblock the
2239            video thread in all cases */
2240         SDL_LockMutex(is->pictq_mutex);
2241         SDL_CondSignal(is->pictq_cond);
2242         SDL_UnlockMutex(is->pictq_mutex);
2243
2244         SDL_WaitThread(is->video_tid, NULL);
2245
2246         packet_queue_end(&is->videoq);
2247         break;
2248     case AVMEDIA_TYPE_SUBTITLE:
2249         packet_queue_abort(&is->subtitleq);
2250
2251         /* note: we also signal this mutex to make sure we deblock the
2252            video thread in all cases */
2253         SDL_LockMutex(is->subpq_mutex);
2254         is->subtitle_stream_changed = 1;
2255
2256         SDL_CondSignal(is->subpq_cond);
2257         SDL_UnlockMutex(is->subpq_mutex);
2258
2259         SDL_WaitThread(is->subtitle_tid, NULL);
2260
2261         packet_queue_end(&is->subtitleq);
2262         break;
2263     default:
2264         break;
2265     }
2266
2267     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2268     avcodec_close(avctx);
2269     switch(avctx->codec_type) {
2270     case AVMEDIA_TYPE_AUDIO:
2271         is->audio_st = NULL;
2272         is->audio_stream = -1;
2273         break;
2274     case AVMEDIA_TYPE_VIDEO:
2275         is->video_st = NULL;
2276         is->video_stream = -1;
2277         break;
2278     case AVMEDIA_TYPE_SUBTITLE:
2279         is->subtitle_st = NULL;
2280         is->subtitle_stream = -1;
2281         break;
2282     default:
2283         break;
2284     }
2285 }
2286
2287 /* since we have only one decoding thread, we can use a global
2288    variable instead of a thread local variable */
2289 static VideoState *global_video_state;
2290
2291 static int decode_interrupt_cb(void)
2292 {
2293     return (global_video_state && global_video_state->abort_request);
2294 }
2295
2296 /* this thread gets the stream from the disk or the network */
2297 static int read_thread(void *arg)
2298 {
2299     VideoState *is = arg;
2300     AVFormatContext *ic = NULL;
2301     int err, i, ret;
2302     int st_index[AVMEDIA_TYPE_NB];
2303     AVPacket pkt1, *pkt = &pkt1;
2304     int eof=0;
2305     int pkt_in_play_range = 0;
2306     AVDictionaryEntry *t;
2307
2308     memset(st_index, -1, sizeof(st_index));
2309     is->video_stream = -1;
2310     is->audio_stream = -1;
2311     is->subtitle_stream = -1;
2312
2313     global_video_state = is;
2314     avio_set_interrupt_cb(decode_interrupt_cb);
2315
2316     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2317     if (err < 0) {
2318         print_error(is->filename, err);
2319         ret = -1;
2320         goto fail;
2321     }
2322     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2323         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2324         ret = AVERROR_OPTION_NOT_FOUND;
2325         goto fail;
2326     }
2327     is->ic = ic;
2328
2329     if(genpts)
2330         ic->flags |= AVFMT_FLAG_GENPTS;
2331
2332     err = av_find_stream_info(ic);
2333     if (err < 0) {
2334         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2335         ret = -1;
2336         goto fail;
2337     }
2338     if(ic->pb)
2339         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2340
2341     if(seek_by_bytes<0)
2342         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2343
2344     /* if seeking requested, we execute it */
2345     if (start_time != AV_NOPTS_VALUE) {
2346         int64_t timestamp;
2347
2348         timestamp = start_time;
2349         /* add the stream start time */
2350         if (ic->start_time != AV_NOPTS_VALUE)
2351             timestamp += ic->start_time;
2352         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2353         if (ret < 0) {
2354             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2355                     is->filename, (double)timestamp / AV_TIME_BASE);
2356         }
2357     }
2358
2359     for (i = 0; i < ic->nb_streams; i++)
2360         ic->streams[i]->discard = AVDISCARD_ALL;
2361     if (!video_disable)
2362         st_index[AVMEDIA_TYPE_VIDEO] =
2363             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2364                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2365     if (!audio_disable)
2366         st_index[AVMEDIA_TYPE_AUDIO] =
2367             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2368                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2369                                 st_index[AVMEDIA_TYPE_VIDEO],
2370                                 NULL, 0);
2371     if (!video_disable)
2372         st_index[AVMEDIA_TYPE_SUBTITLE] =
2373             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2374                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2375                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2376                                  st_index[AVMEDIA_TYPE_AUDIO] :
2377                                  st_index[AVMEDIA_TYPE_VIDEO]),
2378                                 NULL, 0);
2379     if (show_status) {
2380         av_dump_format(ic, 0, is->filename, 0);
2381     }
2382
2383     is->show_mode = show_mode;
2384
2385     /* open the streams */
2386     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2387         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2388     }
2389
2390     ret=-1;
2391     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2392         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2393     }
2394     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2395     if (is->show_mode == SHOW_MODE_NONE)
2396         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2397
2398     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2399         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2400     }
2401
2402     if (is->video_stream < 0 && is->audio_stream < 0) {
2403         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2404         ret = -1;
2405         goto fail;
2406     }
2407
2408     for(;;) {
2409         if (is->abort_request)
2410             break;
2411         if (is->paused != is->last_paused) {
2412             is->last_paused = is->paused;
2413             if (is->paused)
2414                 is->read_pause_return= av_read_pause(ic);
2415             else
2416                 av_read_play(ic);
2417         }
2418 #if CONFIG_RTSP_DEMUXER
2419         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2420             /* wait 10 ms to avoid trying to get another packet */
2421             /* XXX: horrible */
2422             SDL_Delay(10);
2423             continue;
2424         }
2425 #endif
2426         if (is->seek_req) {
2427             int64_t seek_target= is->seek_pos;
2428             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2429             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2430 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2431 //      of the seek_pos/seek_rel variables
2432
2433             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2434             if (ret < 0) {
2435                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2436             }else{
2437                 if (is->audio_stream >= 0) {
2438                     packet_queue_flush(&is->audioq);
2439                     packet_queue_put(&is->audioq, &flush_pkt);
2440                 }
2441                 if (is->subtitle_stream >= 0) {
2442                     packet_queue_flush(&is->subtitleq);
2443                     packet_queue_put(&is->subtitleq, &flush_pkt);
2444                 }
2445                 if (is->video_stream >= 0) {
2446                     packet_queue_flush(&is->videoq);
2447                     packet_queue_put(&is->videoq, &flush_pkt);
2448                 }
2449             }
2450             is->seek_req = 0;
2451             eof= 0;
2452         }
2453
2454         /* if the queue are full, no need to read more */
2455         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2456             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2457                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2458                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2459             /* wait 10 ms */
2460             SDL_Delay(10);
2461             continue;
2462         }
2463         if(eof) {
2464             if(is->video_stream >= 0){
2465                 av_init_packet(pkt);
2466                 pkt->data=NULL;
2467                 pkt->size=0;
2468                 pkt->stream_index= is->video_stream;
2469                 packet_queue_put(&is->videoq, pkt);
2470             }
2471             SDL_Delay(10);
2472             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2473                 if(loop!=1 && (!loop || --loop)){
2474                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2475                 }else if(autoexit){
2476                     ret=AVERROR_EOF;
2477                     goto fail;
2478                 }
2479             }
2480             eof=0;
2481             continue;
2482         }
2483         ret = av_read_frame(ic, pkt);
2484         if (ret < 0) {
2485             if (ret == AVERROR_EOF || url_feof(ic->pb))
2486                 eof=1;
2487             if (ic->pb && ic->pb->error)
2488                 break;
2489             SDL_Delay(100); /* wait for user event */
2490             continue;
2491         }
2492         /* check if packet is in play range specified by user, then queue, otherwise discard */
2493         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2494                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2495                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2496                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2497                 <= ((double)duration/1000000);
2498         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2499             packet_queue_put(&is->audioq, pkt);
2500         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2501             packet_queue_put(&is->videoq, pkt);
2502         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2503             packet_queue_put(&is->subtitleq, pkt);
2504         } else {
2505             av_free_packet(pkt);
2506         }
2507     }
2508     /* wait until the end */
2509     while (!is->abort_request) {
2510         SDL_Delay(100);
2511     }
2512
2513     ret = 0;
2514  fail:
2515     /* disable interrupting */
2516     global_video_state = NULL;
2517
2518     /* close each stream */
2519     if (is->audio_stream >= 0)
2520         stream_component_close(is, is->audio_stream);
2521     if (is->video_stream >= 0)
2522         stream_component_close(is, is->video_stream);
2523     if (is->subtitle_stream >= 0)
2524         stream_component_close(is, is->subtitle_stream);
2525     if (is->ic) {
2526         av_close_input_file(is->ic);
2527         is->ic = NULL; /* safety */
2528     }
2529     avio_set_interrupt_cb(NULL);
2530
2531     if (ret != 0) {
2532         SDL_Event event;
2533
2534         event.type = FF_QUIT_EVENT;
2535         event.user.data1 = is;
2536         SDL_PushEvent(&event);
2537     }
2538     return 0;
2539 }
2540
2541 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2542 {
2543     VideoState *is;
2544
2545     is = av_mallocz(sizeof(VideoState));
2546     if (!is)
2547         return NULL;
2548     av_strlcpy(is->filename, filename, sizeof(is->filename));
2549     is->iformat = iformat;
2550     is->ytop = 0;
2551     is->xleft = 0;
2552
2553     /* start video display */
2554     is->pictq_mutex = SDL_CreateMutex();
2555     is->pictq_cond = SDL_CreateCond();
2556
2557     is->subpq_mutex = SDL_CreateMutex();
2558     is->subpq_cond = SDL_CreateCond();
2559
2560     is->av_sync_type = av_sync_type;
2561     is->read_tid = SDL_CreateThread(read_thread, is);
2562     if (!is->read_tid) {
2563         av_free(is);
2564         return NULL;
2565     }
2566     return is;
2567 }
2568
2569 static void stream_cycle_channel(VideoState *is, int codec_type)
2570 {
2571     AVFormatContext *ic = is->ic;
2572     int start_index, stream_index;
2573     AVStream *st;
2574
2575     if (codec_type == AVMEDIA_TYPE_VIDEO)
2576         start_index = is->video_stream;
2577     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2578         start_index = is->audio_stream;
2579     else
2580         start_index = is->subtitle_stream;
2581     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2582         return;
2583     stream_index = start_index;
2584     for(;;) {
2585         if (++stream_index >= is->ic->nb_streams)
2586         {
2587             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2588             {
2589                 stream_index = -1;
2590                 goto the_end;
2591             } else
2592                 stream_index = 0;
2593         }
2594         if (stream_index == start_index)
2595             return;
2596         st = ic->streams[stream_index];
2597         if (st->codec->codec_type == codec_type) {
2598             /* check that parameters are OK */
2599             switch(codec_type) {
2600             case AVMEDIA_TYPE_AUDIO:
2601                 if (st->codec->sample_rate != 0 &&
2602                     st->codec->channels != 0)
2603                     goto the_end;
2604                 break;
2605             case AVMEDIA_TYPE_VIDEO:
2606             case AVMEDIA_TYPE_SUBTITLE:
2607                 goto the_end;
2608             default:
2609                 break;
2610             }
2611         }
2612     }
2613  the_end:
2614     stream_component_close(is, start_index);
2615     stream_component_open(is, stream_index);
2616 }
2617
2618
2619 static void toggle_full_screen(void)
2620 {
2621     is_full_screen = !is_full_screen;
2622     video_open(cur_stream);
2623 }
2624
2625 static void toggle_pause(void)
2626 {
2627     if (cur_stream)
2628         stream_toggle_pause(cur_stream);
2629     step = 0;
2630 }
2631
2632 static void step_to_next_frame(void)
2633 {
2634     if (cur_stream) {
2635         /* if the stream is paused unpause it, then step */
2636         if (cur_stream->paused)
2637             stream_toggle_pause(cur_stream);
2638     }
2639     step = 1;
2640 }
2641
2642 static void toggle_audio_display(void)
2643 {
2644     if (cur_stream) {
2645         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2646         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2647         fill_rectangle(screen,
2648                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2649                     bgcolor);
2650         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2651     }
2652 }
2653
2654 /* handle an event sent by the GUI */
2655 static void event_loop(void)
2656 {
2657     SDL_Event event;
2658     double incr, pos, frac;
2659
2660     for(;;) {
2661         double x;
2662         SDL_WaitEvent(&event);
2663         switch(event.type) {
2664         case SDL_KEYDOWN:
2665             if (exit_on_keydown) {
2666                 do_exit();
2667                 break;
2668             }
2669             switch(event.key.keysym.sym) {
2670             case SDLK_ESCAPE:
2671             case SDLK_q:
2672                 do_exit();
2673                 break;
2674             case SDLK_f:
2675                 toggle_full_screen();
2676                 break;
2677             case SDLK_p:
2678             case SDLK_SPACE:
2679                 toggle_pause();
2680                 break;
2681             case SDLK_s: //S: Step to next frame
2682                 step_to_next_frame();
2683                 break;
2684             case SDLK_a:
2685                 if (cur_stream)
2686                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2687                 break;
2688             case SDLK_v:
2689                 if (cur_stream)
2690                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2691                 break;
2692             case SDLK_t:
2693                 if (cur_stream)
2694                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2695                 break;
2696             case SDLK_w:
2697                 toggle_audio_display();
2698                 break;
2699             case SDLK_LEFT:
2700                 incr = -10.0;
2701                 goto do_seek;
2702             case SDLK_RIGHT:
2703                 incr = 10.0;
2704                 goto do_seek;
2705             case SDLK_UP:
2706                 incr = 60.0;
2707                 goto do_seek;
2708             case SDLK_DOWN:
2709                 incr = -60.0;
2710             do_seek:
2711                 if (cur_stream) {
2712                     if (seek_by_bytes) {
2713                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2714                             pos= cur_stream->video_current_pos;
2715                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2716                             pos= cur_stream->audio_pkt.pos;
2717                         }else
2718                             pos = avio_tell(cur_stream->ic->pb);
2719                         if (cur_stream->ic->bit_rate)
2720                             incr *= cur_stream->ic->bit_rate / 8.0;
2721                         else
2722                             incr *= 180000.0;
2723                         pos += incr;
2724                         stream_seek(cur_stream, pos, incr, 1);
2725                     } else {
2726                         pos = get_master_clock(cur_stream);
2727                         pos += incr;
2728                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2729                     }
2730                 }
2731                 break;
2732             default:
2733                 break;
2734             }
2735             break;
2736         case SDL_MOUSEBUTTONDOWN:
2737             if (exit_on_mousedown) {
2738                 do_exit();
2739                 break;
2740             }
2741         case SDL_MOUSEMOTION:
2742             if(event.type ==SDL_MOUSEBUTTONDOWN){
2743                 x= event.button.x;
2744             }else{
2745                 if(event.motion.state != SDL_PRESSED)
2746                     break;
2747                 x= event.motion.x;
2748             }
2749             if (cur_stream) {
2750                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2751                     uint64_t size=  avio_size(cur_stream->ic->pb);
2752                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2753                 }else{
2754                     int64_t ts;
2755                     int ns, hh, mm, ss;
2756                     int tns, thh, tmm, tss;
2757                     tns = cur_stream->ic->duration/1000000LL;
2758                     thh = tns/3600;
2759                     tmm = (tns%3600)/60;
2760                     tss = (tns%60);
2761                     frac = x/cur_stream->width;
2762                     ns = frac*tns;
2763                     hh = ns/3600;
2764                     mm = (ns%3600)/60;
2765                     ss = (ns%60);
2766                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2767                             hh, mm, ss, thh, tmm, tss);
2768                     ts = frac*cur_stream->ic->duration;
2769                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2770                         ts += cur_stream->ic->start_time;
2771                     stream_seek(cur_stream, ts, 0, 0);
2772                 }
2773             }
2774             break;
2775         case SDL_VIDEORESIZE:
2776             if (cur_stream) {
2777                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2778                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2779                 screen_width = cur_stream->width = event.resize.w;
2780                 screen_height= cur_stream->height= event.resize.h;
2781             }
2782             break;
2783         case SDL_QUIT:
2784         case FF_QUIT_EVENT:
2785             do_exit();
2786             break;
2787         case FF_ALLOC_EVENT:
2788             video_open(event.user.data1);
2789             alloc_picture(event.user.data1);
2790             break;
2791         case FF_REFRESH_EVENT:
2792             video_refresh(event.user.data1);
2793             cur_stream->refresh=0;
2794             break;
2795         default:
2796             break;
2797         }
2798     }
2799 }
2800
2801 static int opt_frame_size(const char *opt, const char *arg)
2802 {
2803     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2804         fprintf(stderr, "Incorrect frame size\n");
2805         return AVERROR(EINVAL);
2806     }
2807     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2808         fprintf(stderr, "Frame size must be a multiple of 2\n");
2809         return AVERROR(EINVAL);
2810     }
2811     return 0;
2812 }
2813
2814 static int opt_width(const char *opt, const char *arg)
2815 {
2816     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2817     return 0;
2818 }
2819
2820 static int opt_height(const char *opt, const char *arg)
2821 {
2822     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2823     return 0;
2824 }
2825
2826 static int opt_format(const char *opt, const char *arg)
2827 {
2828     file_iformat = av_find_input_format(arg);
2829     if (!file_iformat) {
2830         fprintf(stderr, "Unknown input format: %s\n", arg);
2831         return AVERROR(EINVAL);
2832     }
2833     return 0;
2834 }
2835
2836 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2837 {
2838     frame_pix_fmt = av_get_pix_fmt(arg);
2839     return 0;
2840 }
2841
2842 static int opt_sync(const char *opt, const char *arg)
2843 {
2844     if (!strcmp(arg, "audio"))
2845         av_sync_type = AV_SYNC_AUDIO_MASTER;
2846     else if (!strcmp(arg, "video"))
2847         av_sync_type = AV_SYNC_VIDEO_MASTER;
2848     else if (!strcmp(arg, "ext"))
2849         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2850     else {
2851         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2852         exit(1);
2853     }
2854     return 0;
2855 }
2856
2857 static int opt_seek(const char *opt, const char *arg)
2858 {
2859     start_time = parse_time_or_die(opt, arg, 1);
2860     return 0;
2861 }
2862
2863 static int opt_duration(const char *opt, const char *arg)
2864 {
2865     duration = parse_time_or_die(opt, arg, 1);
2866     return 0;
2867 }
2868
2869 static int opt_thread_count(const char *opt, const char *arg)
2870 {
2871     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2872 #if !HAVE_THREADS
2873     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2874 #endif
2875     return 0;
2876 }
2877
2878 static int opt_show_mode(const char *opt, const char *arg)
2879 {
2880     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2881                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2882                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2883                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2884     return 0;
2885 }
2886
2887 static int opt_input_file(const char *opt, const char *filename)
2888 {
2889     if (input_filename) {
2890         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2891                 filename, input_filename);
2892         exit(1);
2893     }
2894     if (!strcmp(filename, "-"))
2895         filename = "pipe:";
2896     input_filename = filename;
2897     return 0;
2898 }
2899
2900 static const OptionDef options[] = {
2901 #include "cmdutils_common_opts.h"
2902     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2903     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2904     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2905     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2906     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2907     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2908     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2909     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2910     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2911     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2912     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2913     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2914     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2915     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2916     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2917     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2918     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2919     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2920     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2921     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2922     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2923     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2924     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2925     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2926     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2927     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2928     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2929     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2930     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2931     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2932     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2933     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2934     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2935     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2936     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2937 #if CONFIG_AVFILTER
2938     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2939 #endif
2940     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2941     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2942     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2943     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2944     { NULL, },
2945 };
2946
2947 static void show_usage(void)
2948 {
2949     printf("Simple media player\n");
2950     printf("usage: ffplay [options] input_file\n");
2951     printf("\n");
2952 }
2953
2954 static int opt_help(const char *opt, const char *arg)
2955 {
2956     av_log_set_callback(log_callback_help);
2957     show_usage();
2958     show_help_options(options, "Main options:\n",
2959                       OPT_EXPERT, 0);
2960     show_help_options(options, "\nAdvanced options:\n",
2961                       OPT_EXPERT, OPT_EXPERT);
2962     printf("\n");
2963     av_opt_show2(avcodec_opts[0], NULL,
2964                  AV_OPT_FLAG_DECODING_PARAM, 0);
2965     printf("\n");
2966     av_opt_show2(avformat_opts, NULL,
2967                  AV_OPT_FLAG_DECODING_PARAM, 0);
2968 #if !CONFIG_AVFILTER
2969     printf("\n");
2970     av_opt_show2(sws_opts, NULL,
2971                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2972 #endif
2973     printf("\nWhile playing:\n"
2974            "q, ESC              quit\n"
2975            "f                   toggle full screen\n"
2976            "p, SPC              pause\n"
2977            "a                   cycle audio channel\n"
2978            "v                   cycle video channel\n"
2979            "t                   cycle subtitle channel\n"
2980            "w                   show audio waves\n"
2981            "s                   activate frame-step mode\n"
2982            "left/right          seek backward/forward 10 seconds\n"
2983            "down/up             seek backward/forward 1 minute\n"
2984            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2985            );
2986     return 0;
2987 }
2988
2989 /* Called from the main */
2990 int main(int argc, char **argv)
2991 {
2992     int flags;
2993
2994     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2995
2996     /* register all codecs, demux and protocols */
2997     avcodec_register_all();
2998 #if CONFIG_AVDEVICE
2999     avdevice_register_all();
3000 #endif
3001 #if CONFIG_AVFILTER
3002     avfilter_register_all();
3003 #endif
3004     av_register_all();
3005
3006     init_opts();
3007
3008     show_banner();
3009
3010     parse_options(argc, argv, options, opt_input_file);
3011
3012     if (!input_filename) {
3013         show_usage();
3014         fprintf(stderr, "An input file must be specified\n");
3015         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3016         exit(1);
3017     }
3018
3019     if (display_disable) {
3020         video_disable = 1;
3021     }
3022     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3023     if (audio_disable)
3024         flags &= ~SDL_INIT_AUDIO;
3025 #if !defined(__MINGW32__) && !defined(__APPLE__)
3026     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3027 #endif
3028     if (SDL_Init (flags)) {
3029         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3030         exit(1);
3031     }
3032
3033     if (!display_disable) {
3034 #if HAVE_SDL_VIDEO_SIZE
3035         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3036         fs_screen_width = vi->current_w;
3037         fs_screen_height = vi->current_h;
3038 #endif
3039     }
3040
3041     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3042     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3043     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3044
3045     av_init_packet(&flush_pkt);
3046     flush_pkt.data= "FLUSH";
3047
3048     cur_stream = stream_open(input_filename, file_iformat);
3049
3050     event_loop();
3051
3052     /* never returns */
3053
3054     return 0;
3055 }