OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static int opt_help(const char *opt, const char *arg);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int frame_width = 0;
227 static int frame_height = 0;
228 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
229 static int audio_disable;
230 static int video_disable;
231 static int wanted_stream[AVMEDIA_TYPE_NB]={
232     [AVMEDIA_TYPE_AUDIO]=-1,
233     [AVMEDIA_TYPE_VIDEO]=-1,
234     [AVMEDIA_TYPE_SUBTITLE]=-1,
235 };
236 static int seek_by_bytes=-1;
237 static int display_disable;
238 static int show_status = 1;
239 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
240 static int64_t start_time = AV_NOPTS_VALUE;
241 static int64_t duration = AV_NOPTS_VALUE;
242 static int step = 0;
243 static int thread_count = 1;
244 static int workaround_bugs = 1;
245 static int fast = 0;
246 static int genpts = 0;
247 static int lowres = 0;
248 static int idct = FF_IDCT_AUTO;
249 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
250 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
252 static int error_recognition = FF_ER_CAREFUL;
253 static int error_concealment = 3;
254 static int decoder_reorder_pts= -1;
255 static int autoexit;
256 static int exit_on_keydown;
257 static int exit_on_mousedown;
258 static int loop=1;
259 static int framedrop=-1;
260 static enum ShowMode show_mode = SHOW_MODE_NONE;
261
262 static int rdftspeed=20;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static VideoState *cur_stream;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
281 {
282     AVPacketList *pkt1;
283
284     /* duplicate the packet */
285     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
286         return -1;
287
288     pkt1 = av_malloc(sizeof(AVPacketList));
289     if (!pkt1)
290         return -1;
291     pkt1->pkt = *pkt;
292     pkt1->next = NULL;
293
294
295     SDL_LockMutex(q->mutex);
296
297     if (!q->last_pkt)
298
299         q->first_pkt = pkt1;
300     else
301         q->last_pkt->next = pkt1;
302     q->last_pkt = pkt1;
303     q->nb_packets++;
304     q->size += pkt1->pkt.size + sizeof(*pkt1);
305     /* XXX: should duplicate packet data in DV case */
306     SDL_CondSignal(q->cond);
307
308     SDL_UnlockMutex(q->mutex);
309     return 0;
310 }
311
312 /* packet queue handling */
313 static void packet_queue_init(PacketQueue *q)
314 {
315     memset(q, 0, sizeof(PacketQueue));
316     q->mutex = SDL_CreateMutex();
317     q->cond = SDL_CreateCond();
318     packet_queue_put(q, &flush_pkt);
319 }
320
321 static void packet_queue_flush(PacketQueue *q)
322 {
323     AVPacketList *pkt, *pkt1;
324
325     SDL_LockMutex(q->mutex);
326     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
327         pkt1 = pkt->next;
328         av_free_packet(&pkt->pkt);
329         av_freep(&pkt);
330     }
331     q->last_pkt = NULL;
332     q->first_pkt = NULL;
333     q->nb_packets = 0;
334     q->size = 0;
335     SDL_UnlockMutex(q->mutex);
336 }
337
338 static void packet_queue_end(PacketQueue *q)
339 {
340     packet_queue_flush(q);
341     SDL_DestroyMutex(q->mutex);
342     SDL_DestroyCond(q->cond);
343 }
344
345 static void packet_queue_abort(PacketQueue *q)
346 {
347     SDL_LockMutex(q->mutex);
348
349     q->abort_request = 1;
350
351     SDL_CondSignal(q->cond);
352
353     SDL_UnlockMutex(q->mutex);
354 }
355
356 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
357 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
358 {
359     AVPacketList *pkt1;
360     int ret;
361
362     SDL_LockMutex(q->mutex);
363
364     for(;;) {
365         if (q->abort_request) {
366             ret = -1;
367             break;
368         }
369
370         pkt1 = q->first_pkt;
371         if (pkt1) {
372             q->first_pkt = pkt1->next;
373             if (!q->first_pkt)
374                 q->last_pkt = NULL;
375             q->nb_packets--;
376             q->size -= pkt1->pkt.size + sizeof(*pkt1);
377             *pkt = pkt1->pkt;
378             av_free(pkt1);
379             ret = 1;
380             break;
381         } else if (!block) {
382             ret = 0;
383             break;
384         } else {
385             SDL_CondWait(q->cond, q->mutex);
386         }
387     }
388     SDL_UnlockMutex(q->mutex);
389     return ret;
390 }
391
392 static inline void fill_rectangle(SDL_Surface *screen,
393                                   int x, int y, int w, int h, int color)
394 {
395     SDL_Rect rect;
396     rect.x = x;
397     rect.y = y;
398     rect.w = w;
399     rect.h = h;
400     SDL_FillRect(screen, &rect, color);
401 }
402
403 #define ALPHA_BLEND(a, oldp, newp, s)\
404 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
405
406 #define RGBA_IN(r, g, b, a, s)\
407 {\
408     unsigned int v = ((const uint32_t *)(s))[0];\
409     a = (v >> 24) & 0xff;\
410     r = (v >> 16) & 0xff;\
411     g = (v >> 8) & 0xff;\
412     b = v & 0xff;\
413 }
414
415 #define YUVA_IN(y, u, v, a, s, pal)\
416 {\
417     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
418     a = (val >> 24) & 0xff;\
419     y = (val >> 16) & 0xff;\
420     u = (val >> 8) & 0xff;\
421     v = val & 0xff;\
422 }
423
424 #define YUVA_OUT(d, y, u, v, a)\
425 {\
426     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 }
428
429
430 #define BPP 1
431
432 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
433 {
434     int wrap, wrap3, width2, skip2;
435     int y, u, v, a, u1, v1, a1, w, h;
436     uint8_t *lum, *cb, *cr;
437     const uint8_t *p;
438     const uint32_t *pal;
439     int dstx, dsty, dstw, dsth;
440
441     dstw = av_clip(rect->w, 0, imgw);
442     dsth = av_clip(rect->h, 0, imgh);
443     dstx = av_clip(rect->x, 0, imgw - dstw);
444     dsty = av_clip(rect->y, 0, imgh - dsth);
445     lum = dst->data[0] + dsty * dst->linesize[0];
446     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
447     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
448
449     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
450     skip2 = dstx >> 1;
451     wrap = dst->linesize[0];
452     wrap3 = rect->pict.linesize[0];
453     p = rect->pict.data[0];
454     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
455
456     if (dsty & 1) {
457         lum += dstx;
458         cb += skip2;
459         cr += skip2;
460
461         if (dstx & 1) {
462             YUVA_IN(y, u, v, a, p, pal);
463             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
465             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
466             cb++;
467             cr++;
468             lum++;
469             p += BPP;
470         }
471         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
472             YUVA_IN(y, u, v, a, p, pal);
473             u1 = u;
474             v1 = v;
475             a1 = a;
476             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
477
478             YUVA_IN(y, u, v, a, p + BPP, pal);
479             u1 += u;
480             v1 += v;
481             a1 += a;
482             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
483             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
484             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
485             cb++;
486             cr++;
487             p += 2 * BPP;
488             lum += 2;
489         }
490         if (w) {
491             YUVA_IN(y, u, v, a, p, pal);
492             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
493             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
494             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
495             p++;
496             lum++;
497         }
498         p += wrap3 - dstw * BPP;
499         lum += wrap - dstw - dstx;
500         cb += dst->linesize[1] - width2 - skip2;
501         cr += dst->linesize[2] - width2 - skip2;
502     }
503     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504         lum += dstx;
505         cb += skip2;
506         cr += skip2;
507
508         if (dstx & 1) {
509             YUVA_IN(y, u, v, a, p, pal);
510             u1 = u;
511             v1 = v;
512             a1 = a;
513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514             p += wrap3;
515             lum += wrap;
516             YUVA_IN(y, u, v, a, p, pal);
517             u1 += u;
518             v1 += v;
519             a1 += a;
520             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523             cb++;
524             cr++;
525             p += -wrap3 + BPP;
526             lum += -wrap + 1;
527         }
528         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529             YUVA_IN(y, u, v, a, p, pal);
530             u1 = u;
531             v1 = v;
532             a1 = a;
533             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534
535             YUVA_IN(y, u, v, a, p + BPP, pal);
536             u1 += u;
537             v1 += v;
538             a1 += a;
539             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540             p += wrap3;
541             lum += wrap;
542
543             YUVA_IN(y, u, v, a, p, pal);
544             u1 += u;
545             v1 += v;
546             a1 += a;
547             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548
549             YUVA_IN(y, u, v, a, p + BPP, pal);
550             u1 += u;
551             v1 += v;
552             a1 += a;
553             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554
555             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557
558             cb++;
559             cr++;
560             p += -wrap3 + 2 * BPP;
561             lum += -wrap + 2;
562         }
563         if (w) {
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 = u;
566             v1 = v;
567             a1 = a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             p += wrap3;
570             lum += wrap;
571             YUVA_IN(y, u, v, a, p, pal);
572             u1 += u;
573             v1 += v;
574             a1 += a;
575             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578             cb++;
579             cr++;
580             p += -wrap3 + BPP;
581             lum += -wrap + 1;
582         }
583         p += wrap3 + (wrap3 - dstw * BPP);
584         lum += wrap + (wrap - dstw - dstx);
585         cb += dst->linesize[1] - width2 - skip2;
586         cr += dst->linesize[2] - width2 - skip2;
587     }
588     /* handle odd height */
589     if (h) {
590         lum += dstx;
591         cb += skip2;
592         cr += skip2;
593
594         if (dstx & 1) {
595             YUVA_IN(y, u, v, a, p, pal);
596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599             cb++;
600             cr++;
601             lum++;
602             p += BPP;
603         }
604         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605             YUVA_IN(y, u, v, a, p, pal);
606             u1 = u;
607             v1 = v;
608             a1 = a;
609             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610
611             YUVA_IN(y, u, v, a, p + BPP, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618             cb++;
619             cr++;
620             p += 2 * BPP;
621             lum += 2;
622         }
623         if (w) {
624             YUVA_IN(y, u, v, a, p, pal);
625             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628         }
629     }
630 }
631
632 static void free_subpicture(SubPicture *sp)
633 {
634     avsubtitle_free(&sp->sub);
635 }
636
637 static void video_image_display(VideoState *is)
638 {
639     VideoPicture *vp;
640     SubPicture *sp;
641     AVPicture pict;
642     float aspect_ratio;
643     int width, height, x, y;
644     SDL_Rect rect;
645     int i;
646
647     vp = &is->pictq[is->pictq_rindex];
648     if (vp->bmp) {
649 #if CONFIG_AVFILTER
650          if (vp->picref->video->sample_aspect_ratio.num == 0)
651              aspect_ratio = 0;
652          else
653              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
654 #else
655
656         /* XXX: use variable in the frame */
657         if (is->video_st->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
659         else if (is->video_st->codec->sample_aspect_ratio.num)
660             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
661         else
662             aspect_ratio = 0;
663 #endif
664         if (aspect_ratio <= 0.0)
665             aspect_ratio = 1.0;
666         aspect_ratio *= (float)vp->width / (float)vp->height;
667
668         if (is->subtitle_st) {
669             if (is->subpq_size > 0) {
670                 sp = &is->subpq[is->subpq_rindex];
671
672                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
673                     SDL_LockYUVOverlay (vp->bmp);
674
675                     pict.data[0] = vp->bmp->pixels[0];
676                     pict.data[1] = vp->bmp->pixels[2];
677                     pict.data[2] = vp->bmp->pixels[1];
678
679                     pict.linesize[0] = vp->bmp->pitches[0];
680                     pict.linesize[1] = vp->bmp->pitches[2];
681                     pict.linesize[2] = vp->bmp->pitches[1];
682
683                     for (i = 0; i < sp->sub.num_rects; i++)
684                         blend_subrect(&pict, sp->sub.rects[i],
685                                       vp->bmp->w, vp->bmp->h);
686
687                     SDL_UnlockYUVOverlay (vp->bmp);
688                 }
689             }
690         }
691
692
693         /* XXX: we suppose the screen has a 1.0 pixel ratio */
694         height = is->height;
695         width = ((int)rint(height * aspect_ratio)) & ~1;
696         if (width > is->width) {
697             width = is->width;
698             height = ((int)rint(width / aspect_ratio)) & ~1;
699         }
700         x = (is->width - width) / 2;
701         y = (is->height - height) / 2;
702         is->no_background = 0;
703         rect.x = is->xleft + x;
704         rect.y = is->ytop  + y;
705         rect.w = FFMAX(width,  1);
706         rect.h = FFMAX(height, 1);
707         SDL_DisplayYUVOverlay(vp->bmp, &rect);
708     }
709 }
710
711 /* get the current audio output buffer size, in samples. With SDL, we
712    cannot have a precise information */
713 static int audio_write_get_buf_size(VideoState *is)
714 {
715     return is->audio_buf_size - is->audio_buf_index;
716 }
717
718 static inline int compute_mod(int a, int b)
719 {
720     return a < 0 ? a%b + b : a%b;
721 }
722
723 static void video_audio_display(VideoState *s)
724 {
725     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
726     int ch, channels, h, h2, bgcolor, fgcolor;
727     int16_t time_diff;
728     int rdft_bits, nb_freq;
729
730     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
731         ;
732     nb_freq= 1<<(rdft_bits-1);
733
734     /* compute display index : center on currently output samples */
735     channels = s->audio_st->codec->channels;
736     nb_display_channels = channels;
737     if (!s->paused) {
738         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
739         n = 2 * channels;
740         delay = audio_write_get_buf_size(s);
741         delay /= n;
742
743         /* to be more precise, we take into account the time spent since
744            the last buffer computation */
745         if (audio_callback_time) {
746             time_diff = av_gettime() - audio_callback_time;
747             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
748         }
749
750         delay += 2*data_used;
751         if (delay < data_used)
752             delay = data_used;
753
754         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
755         if (s->show_mode == SHOW_MODE_WAVES) {
756             h= INT_MIN;
757             for(i=0; i<1000; i+=channels){
758                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
759                 int a= s->sample_array[idx];
760                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
761                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
762                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
763                 int score= a-d;
764                 if(h<score && (b^c)<0){
765                     h= score;
766                     i_start= idx;
767                 }
768             }
769         }
770
771         s->last_i_start = i_start;
772     } else {
773         i_start = s->last_i_start;
774     }
775
776     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
777     if (s->show_mode == SHOW_MODE_WAVES) {
778         fill_rectangle(screen,
779                        s->xleft, s->ytop, s->width, s->height,
780                        bgcolor);
781
782         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
783
784         /* total height for one channel */
785         h = s->height / nb_display_channels;
786         /* graph height / 2 */
787         h2 = (h * 9) / 20;
788         for(ch = 0;ch < nb_display_channels; ch++) {
789             i = i_start + ch;
790             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
791             for(x = 0; x < s->width; x++) {
792                 y = (s->sample_array[i] * h2) >> 15;
793                 if (y < 0) {
794                     y = -y;
795                     ys = y1 - y;
796                 } else {
797                     ys = y1;
798                 }
799                 fill_rectangle(screen,
800                                s->xleft + x, ys, 1, y,
801                                fgcolor);
802                 i += channels;
803                 if (i >= SAMPLE_ARRAY_SIZE)
804                     i -= SAMPLE_ARRAY_SIZE;
805             }
806         }
807
808         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
809
810         for(ch = 1;ch < nb_display_channels; ch++) {
811             y = s->ytop + ch * h;
812             fill_rectangle(screen,
813                            s->xleft, y, s->width, 1,
814                            fgcolor);
815         }
816         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
817     }else{
818         nb_display_channels= FFMIN(nb_display_channels, 2);
819         if(rdft_bits != s->rdft_bits){
820             av_rdft_end(s->rdft);
821             av_free(s->rdft_data);
822             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
823             s->rdft_bits= rdft_bits;
824             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
825         }
826         {
827             FFTSample *data[2];
828             for(ch = 0;ch < nb_display_channels; ch++) {
829                 data[ch] = s->rdft_data + 2*nb_freq*ch;
830                 i = i_start + ch;
831                 for(x = 0; x < 2*nb_freq; x++) {
832                     double w= (x-nb_freq)*(1.0/nb_freq);
833                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
834                     i += channels;
835                     if (i >= SAMPLE_ARRAY_SIZE)
836                         i -= SAMPLE_ARRAY_SIZE;
837                 }
838                 av_rdft_calc(s->rdft, data[ch]);
839             }
840             //least efficient way to do this, we should of course directly access it but its more than fast enough
841             for(y=0; y<s->height; y++){
842                 double w= 1/sqrt(nb_freq);
843                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
844                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
845                        + data[1][2*y+1]*data[1][2*y+1])) : a;
846                 a= FFMIN(a,255);
847                 b= FFMIN(b,255);
848                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
849
850                 fill_rectangle(screen,
851                             s->xpos, s->height-y, 1, 1,
852                             fgcolor);
853             }
854         }
855         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
856         s->xpos++;
857         if(s->xpos >= s->width)
858             s->xpos= s->xleft;
859     }
860 }
861
862 static void stream_close(VideoState *is)
863 {
864     VideoPicture *vp;
865     int i;
866     /* XXX: use a special url_shutdown call to abort parse cleanly */
867     is->abort_request = 1;
868     SDL_WaitThread(is->read_tid, NULL);
869     SDL_WaitThread(is->refresh_tid, NULL);
870
871     /* free all pictures */
872     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
873         vp = &is->pictq[i];
874 #if CONFIG_AVFILTER
875         if (vp->picref) {
876             avfilter_unref_buffer(vp->picref);
877             vp->picref = NULL;
878         }
879 #endif
880         if (vp->bmp) {
881             SDL_FreeYUVOverlay(vp->bmp);
882             vp->bmp = NULL;
883         }
884     }
885     SDL_DestroyMutex(is->pictq_mutex);
886     SDL_DestroyCond(is->pictq_cond);
887     SDL_DestroyMutex(is->subpq_mutex);
888     SDL_DestroyCond(is->subpq_cond);
889 #if !CONFIG_AVFILTER
890     if (is->img_convert_ctx)
891         sws_freeContext(is->img_convert_ctx);
892 #endif
893     av_free(is);
894 }
895
896 static void do_exit(void)
897 {
898     if (cur_stream) {
899         stream_close(cur_stream);
900         cur_stream = NULL;
901     }
902     uninit_opts();
903 #if CONFIG_AVFILTER
904     avfilter_uninit();
905 #endif
906     if (show_status)
907         printf("\n");
908     SDL_Quit();
909     av_log(NULL, AV_LOG_QUIET, "%s", "");
910     exit(0);
911 }
912
913 static int video_open(VideoState *is){
914     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
915     int w,h;
916
917     if(is_full_screen) flags |= SDL_FULLSCREEN;
918     else               flags |= SDL_RESIZABLE;
919
920     if (is_full_screen && fs_screen_width) {
921         w = fs_screen_width;
922         h = fs_screen_height;
923     } else if(!is_full_screen && screen_width){
924         w = screen_width;
925         h = screen_height;
926 #if CONFIG_AVFILTER
927     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
928         w = is->out_video_filter->inputs[0]->w;
929         h = is->out_video_filter->inputs[0]->h;
930 #else
931     }else if (is->video_st && is->video_st->codec->width){
932         w = is->video_st->codec->width;
933         h = is->video_st->codec->height;
934 #endif
935     } else {
936         w = 640;
937         h = 480;
938     }
939     if(screen && is->width == screen->w && screen->w == w
940        && is->height== screen->h && screen->h == h)
941         return 0;
942
943 #ifndef __APPLE__
944     screen = SDL_SetVideoMode(w, h, 0, flags);
945 #else
946     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
947     screen = SDL_SetVideoMode(w, h, 24, flags);
948 #endif
949     if (!screen) {
950         fprintf(stderr, "SDL: could not set video mode - exiting\n");
951         do_exit();
952     }
953     if (!window_title)
954         window_title = input_filename;
955     SDL_WM_SetCaption(window_title, window_title);
956
957     is->width = screen->w;
958     is->height = screen->h;
959
960     return 0;
961 }
962
963 /* display the current picture, if any */
964 static void video_display(VideoState *is)
965 {
966     if(!screen)
967         video_open(cur_stream);
968     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
969         video_audio_display(is);
970     else if (is->video_st)
971         video_image_display(is);
972 }
973
974 static int refresh_thread(void *opaque)
975 {
976     VideoState *is= opaque;
977     while(!is->abort_request){
978         SDL_Event event;
979         event.type = FF_REFRESH_EVENT;
980         event.user.data1 = opaque;
981         if(!is->refresh){
982             is->refresh=1;
983             SDL_PushEvent(&event);
984         }
985         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
986         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
987     }
988     return 0;
989 }
990
991 /* get the current audio clock value */
992 static double get_audio_clock(VideoState *is)
993 {
994     double pts;
995     int hw_buf_size, bytes_per_sec;
996     pts = is->audio_clock;
997     hw_buf_size = audio_write_get_buf_size(is);
998     bytes_per_sec = 0;
999     if (is->audio_st) {
1000         bytes_per_sec = is->audio_st->codec->sample_rate *
1001             2 * is->audio_st->codec->channels;
1002     }
1003     if (bytes_per_sec)
1004         pts -= (double)hw_buf_size / bytes_per_sec;
1005     return pts;
1006 }
1007
1008 /* get the current video clock value */
1009 static double get_video_clock(VideoState *is)
1010 {
1011     if (is->paused) {
1012         return is->video_current_pts;
1013     } else {
1014         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1015     }
1016 }
1017
1018 /* get the current external clock value */
1019 static double get_external_clock(VideoState *is)
1020 {
1021     int64_t ti;
1022     ti = av_gettime();
1023     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1024 }
1025
1026 /* get the current master clock value */
1027 static double get_master_clock(VideoState *is)
1028 {
1029     double val;
1030
1031     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1032         if (is->video_st)
1033             val = get_video_clock(is);
1034         else
1035             val = get_audio_clock(is);
1036     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1037         if (is->audio_st)
1038             val = get_audio_clock(is);
1039         else
1040             val = get_video_clock(is);
1041     } else {
1042         val = get_external_clock(is);
1043     }
1044     return val;
1045 }
1046
1047 /* seek in the stream */
1048 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1049 {
1050     if (!is->seek_req) {
1051         is->seek_pos = pos;
1052         is->seek_rel = rel;
1053         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1054         if (seek_by_bytes)
1055             is->seek_flags |= AVSEEK_FLAG_BYTE;
1056         is->seek_req = 1;
1057     }
1058 }
1059
1060 /* pause or resume the video */
1061 static void stream_toggle_pause(VideoState *is)
1062 {
1063     if (is->paused) {
1064         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1065         if(is->read_pause_return != AVERROR(ENOSYS)){
1066             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1067         }
1068         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1069     }
1070     is->paused = !is->paused;
1071 }
1072
1073 static double compute_target_time(double frame_current_pts, VideoState *is)
1074 {
1075     double delay, sync_threshold, diff;
1076
1077     /* compute nominal delay */
1078     delay = frame_current_pts - is->frame_last_pts;
1079     if (delay <= 0 || delay >= 10.0) {
1080         /* if incorrect delay, use previous one */
1081         delay = is->frame_last_delay;
1082     } else {
1083         is->frame_last_delay = delay;
1084     }
1085     is->frame_last_pts = frame_current_pts;
1086
1087     /* update delay to follow master synchronisation source */
1088     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1089          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1090         /* if video is slave, we try to correct big delays by
1091            duplicating or deleting a frame */
1092         diff = get_video_clock(is) - get_master_clock(is);
1093
1094         /* skip or repeat frame. We take into account the
1095            delay to compute the threshold. I still don't know
1096            if it is the best guess */
1097         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1098         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1099             if (diff <= -sync_threshold)
1100                 delay = 0;
1101             else if (diff >= sync_threshold)
1102                 delay = 2 * delay;
1103         }
1104     }
1105     is->frame_timer += delay;
1106
1107     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1108             delay, frame_current_pts, -diff);
1109
1110     return is->frame_timer;
1111 }
1112
1113 /* called to display each frame */
1114 static void video_refresh(void *opaque)
1115 {
1116     VideoState *is = opaque;
1117     VideoPicture *vp;
1118
1119     SubPicture *sp, *sp2;
1120
1121     if (is->video_st) {
1122 retry:
1123         if (is->pictq_size == 0) {
1124             //nothing to do, no picture to display in the que
1125         } else {
1126             double time= av_gettime()/1000000.0;
1127             double next_target;
1128             /* dequeue the picture */
1129             vp = &is->pictq[is->pictq_rindex];
1130
1131             if(time < vp->target_clock)
1132                 return;
1133             /* update current video pts */
1134             is->video_current_pts = vp->pts;
1135             is->video_current_pts_drift = is->video_current_pts - time;
1136             is->video_current_pos = vp->pos;
1137             if(is->pictq_size > 1){
1138                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1139                 assert(nextvp->target_clock >= vp->target_clock);
1140                 next_target= nextvp->target_clock;
1141             }else{
1142                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1143             }
1144             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1145                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1146                 if(is->pictq_size > 1 || time > next_target + 0.5){
1147                     /* update queue size and signal for next picture */
1148                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1149                         is->pictq_rindex = 0;
1150
1151                     SDL_LockMutex(is->pictq_mutex);
1152                     is->pictq_size--;
1153                     SDL_CondSignal(is->pictq_cond);
1154                     SDL_UnlockMutex(is->pictq_mutex);
1155                     goto retry;
1156                 }
1157             }
1158
1159             if(is->subtitle_st) {
1160                 if (is->subtitle_stream_changed) {
1161                     SDL_LockMutex(is->subpq_mutex);
1162
1163                     while (is->subpq_size) {
1164                         free_subpicture(&is->subpq[is->subpq_rindex]);
1165
1166                         /* update queue size and signal for next picture */
1167                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1168                             is->subpq_rindex = 0;
1169
1170                         is->subpq_size--;
1171                     }
1172                     is->subtitle_stream_changed = 0;
1173
1174                     SDL_CondSignal(is->subpq_cond);
1175                     SDL_UnlockMutex(is->subpq_mutex);
1176                 } else {
1177                     if (is->subpq_size > 0) {
1178                         sp = &is->subpq[is->subpq_rindex];
1179
1180                         if (is->subpq_size > 1)
1181                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1182                         else
1183                             sp2 = NULL;
1184
1185                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1186                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1187                         {
1188                             free_subpicture(sp);
1189
1190                             /* update queue size and signal for next picture */
1191                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1192                                 is->subpq_rindex = 0;
1193
1194                             SDL_LockMutex(is->subpq_mutex);
1195                             is->subpq_size--;
1196                             SDL_CondSignal(is->subpq_cond);
1197                             SDL_UnlockMutex(is->subpq_mutex);
1198                         }
1199                     }
1200                 }
1201             }
1202
1203             /* display picture */
1204             if (!display_disable)
1205                 video_display(is);
1206
1207             /* update queue size and signal for next picture */
1208             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1209                 is->pictq_rindex = 0;
1210
1211             SDL_LockMutex(is->pictq_mutex);
1212             is->pictq_size--;
1213             SDL_CondSignal(is->pictq_cond);
1214             SDL_UnlockMutex(is->pictq_mutex);
1215         }
1216     } else if (is->audio_st) {
1217         /* draw the next audio frame */
1218
1219         /* if only audio stream, then display the audio bars (better
1220            than nothing, just to test the implementation */
1221
1222         /* display picture */
1223         if (!display_disable)
1224             video_display(is);
1225     }
1226     if (show_status) {
1227         static int64_t last_time;
1228         int64_t cur_time;
1229         int aqsize, vqsize, sqsize;
1230         double av_diff;
1231
1232         cur_time = av_gettime();
1233         if (!last_time || (cur_time - last_time) >= 30000) {
1234             aqsize = 0;
1235             vqsize = 0;
1236             sqsize = 0;
1237             if (is->audio_st)
1238                 aqsize = is->audioq.size;
1239             if (is->video_st)
1240                 vqsize = is->videoq.size;
1241             if (is->subtitle_st)
1242                 sqsize = is->subtitleq.size;
1243             av_diff = 0;
1244             if (is->audio_st && is->video_st)
1245                 av_diff = get_audio_clock(is) - get_video_clock(is);
1246             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1247                    get_master_clock(is),
1248                    av_diff,
1249                    FFMAX(is->skip_frames-1, 0),
1250                    aqsize / 1024,
1251                    vqsize / 1024,
1252                    sqsize,
1253                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1254                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1255             fflush(stdout);
1256             last_time = cur_time;
1257         }
1258     }
1259 }
1260
1261 /* allocate a picture (needs to do that in main thread to avoid
1262    potential locking problems */
1263 static void alloc_picture(void *opaque)
1264 {
1265     VideoState *is = opaque;
1266     VideoPicture *vp;
1267
1268     vp = &is->pictq[is->pictq_windex];
1269
1270     if (vp->bmp)
1271         SDL_FreeYUVOverlay(vp->bmp);
1272
1273 #if CONFIG_AVFILTER
1274     if (vp->picref)
1275         avfilter_unref_buffer(vp->picref);
1276     vp->picref = NULL;
1277
1278     vp->width   = is->out_video_filter->inputs[0]->w;
1279     vp->height  = is->out_video_filter->inputs[0]->h;
1280     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1281 #else
1282     vp->width   = is->video_st->codec->width;
1283     vp->height  = is->video_st->codec->height;
1284     vp->pix_fmt = is->video_st->codec->pix_fmt;
1285 #endif
1286
1287     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1288                                    SDL_YV12_OVERLAY,
1289                                    screen);
1290     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1291         /* SDL allocates a buffer smaller than requested if the video
1292          * overlay hardware is unable to support the requested size. */
1293         fprintf(stderr, "Error: the video system does not support an image\n"
1294                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1295                         "to reduce the image size.\n", vp->width, vp->height );
1296         do_exit();
1297     }
1298
1299     SDL_LockMutex(is->pictq_mutex);
1300     vp->allocated = 1;
1301     SDL_CondSignal(is->pictq_cond);
1302     SDL_UnlockMutex(is->pictq_mutex);
1303 }
1304
1305 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1306 {
1307     VideoPicture *vp;
1308     double frame_delay, pts = pts1;
1309
1310     /* compute the exact PTS for the picture if it is omitted in the stream
1311      * pts1 is the dts of the pkt / pts of the frame */
1312     if (pts != 0) {
1313         /* update video clock with pts, if present */
1314         is->video_clock = pts;
1315     } else {
1316         pts = is->video_clock;
1317     }
1318     /* update video clock for next frame */
1319     frame_delay = av_q2d(is->video_st->codec->time_base);
1320     /* for MPEG2, the frame can be repeated, so we update the
1321        clock accordingly */
1322     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1323     is->video_clock += frame_delay;
1324
1325 #if defined(DEBUG_SYNC) && 0
1326     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1327            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1328 #endif
1329
1330     /* wait until we have space to put a new picture */
1331     SDL_LockMutex(is->pictq_mutex);
1332
1333     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1334         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1335
1336     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1337            !is->videoq.abort_request) {
1338         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1339     }
1340     SDL_UnlockMutex(is->pictq_mutex);
1341
1342     if (is->videoq.abort_request)
1343         return -1;
1344
1345     vp = &is->pictq[is->pictq_windex];
1346
1347     /* alloc or resize hardware picture buffer */
1348     if (!vp->bmp ||
1349 #if CONFIG_AVFILTER
1350         vp->width  != is->out_video_filter->inputs[0]->w ||
1351         vp->height != is->out_video_filter->inputs[0]->h) {
1352 #else
1353         vp->width != is->video_st->codec->width ||
1354         vp->height != is->video_st->codec->height) {
1355 #endif
1356         SDL_Event event;
1357
1358         vp->allocated = 0;
1359
1360         /* the allocation must be done in the main thread to avoid
1361            locking problems */
1362         event.type = FF_ALLOC_EVENT;
1363         event.user.data1 = is;
1364         SDL_PushEvent(&event);
1365
1366         /* wait until the picture is allocated */
1367         SDL_LockMutex(is->pictq_mutex);
1368         while (!vp->allocated && !is->videoq.abort_request) {
1369             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370         }
1371         SDL_UnlockMutex(is->pictq_mutex);
1372
1373         if (is->videoq.abort_request)
1374             return -1;
1375     }
1376
1377     /* if the frame is not skipped, then display it */
1378     if (vp->bmp) {
1379         AVPicture pict;
1380 #if CONFIG_AVFILTER
1381         if(vp->picref)
1382             avfilter_unref_buffer(vp->picref);
1383         vp->picref = src_frame->opaque;
1384 #endif
1385
1386         /* get a pointer on the bitmap */
1387         SDL_LockYUVOverlay (vp->bmp);
1388
1389         memset(&pict,0,sizeof(AVPicture));
1390         pict.data[0] = vp->bmp->pixels[0];
1391         pict.data[1] = vp->bmp->pixels[2];
1392         pict.data[2] = vp->bmp->pixels[1];
1393
1394         pict.linesize[0] = vp->bmp->pitches[0];
1395         pict.linesize[1] = vp->bmp->pitches[2];
1396         pict.linesize[2] = vp->bmp->pitches[1];
1397
1398 #if CONFIG_AVFILTER
1399         //FIXME use direct rendering
1400         av_picture_copy(&pict, (AVPicture *)src_frame,
1401                         vp->pix_fmt, vp->width, vp->height);
1402 #else
1403         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1404         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1405             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1406             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1407         if (is->img_convert_ctx == NULL) {
1408             fprintf(stderr, "Cannot initialize the conversion context\n");
1409             exit(1);
1410         }
1411         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1412                   0, vp->height, pict.data, pict.linesize);
1413 #endif
1414         /* update the bitmap content */
1415         SDL_UnlockYUVOverlay(vp->bmp);
1416
1417         vp->pts = pts;
1418         vp->pos = pos;
1419
1420         /* now we can update the picture count */
1421         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1422             is->pictq_windex = 0;
1423         SDL_LockMutex(is->pictq_mutex);
1424         vp->target_clock= compute_target_time(vp->pts, is);
1425
1426         is->pictq_size++;
1427         SDL_UnlockMutex(is->pictq_mutex);
1428     }
1429     return 0;
1430 }
1431
1432 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1433 {
1434     int got_picture, i;
1435
1436     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1437         return -1;
1438
1439     if (pkt->data == flush_pkt.data) {
1440         avcodec_flush_buffers(is->video_st->codec);
1441
1442         SDL_LockMutex(is->pictq_mutex);
1443         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1444         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1445             is->pictq[i].target_clock= 0;
1446         }
1447         while (is->pictq_size && !is->videoq.abort_request) {
1448             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1449         }
1450         is->video_current_pos = -1;
1451         SDL_UnlockMutex(is->pictq_mutex);
1452
1453         is->frame_last_pts = AV_NOPTS_VALUE;
1454         is->frame_last_delay = 0;
1455         is->frame_timer = (double)av_gettime() / 1000000.0;
1456         is->skip_frames = 1;
1457         is->skip_frames_index = 0;
1458         return 0;
1459     }
1460
1461     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1462
1463     if (got_picture) {
1464         if (decoder_reorder_pts == -1) {
1465             *pts = frame->best_effort_timestamp;
1466         } else if (decoder_reorder_pts) {
1467             *pts = frame->pkt_pts;
1468         } else {
1469             *pts = frame->pkt_dts;
1470         }
1471
1472         if (*pts == AV_NOPTS_VALUE) {
1473             *pts = 0;
1474         }
1475
1476         is->skip_frames_index += 1;
1477         if(is->skip_frames_index >= is->skip_frames){
1478             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1479             return 1;
1480         }
1481
1482     }
1483     return 0;
1484 }
1485
1486 #if CONFIG_AVFILTER
1487 typedef struct {
1488     VideoState *is;
1489     AVFrame *frame;
1490     int use_dr1;
1491 } FilterPriv;
1492
1493 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1494 {
1495     AVFilterContext *ctx = codec->opaque;
1496     AVFilterBufferRef  *ref;
1497     int perms = AV_PERM_WRITE;
1498     int i, w, h, stride[4];
1499     unsigned edge;
1500     int pixel_size;
1501
1502     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1503
1504     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1505         perms |= AV_PERM_NEG_LINESIZES;
1506
1507     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1511     }
1512     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1513
1514     w = codec->width;
1515     h = codec->height;
1516
1517     if(av_image_check_size(w, h, 0, codec))
1518         return -1;
1519
1520     avcodec_align_dimensions2(codec, &w, &h, stride);
1521     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1522     w += edge << 1;
1523     h += edge << 1;
1524
1525     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1526         return -1;
1527
1528     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1529     ref->video->w = codec->width;
1530     ref->video->h = codec->height;
1531     for(i = 0; i < 4; i ++) {
1532         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1533         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1534
1535         if (ref->data[i]) {
1536             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1537         }
1538         pic->data[i]     = ref->data[i];
1539         pic->linesize[i] = ref->linesize[i];
1540     }
1541     pic->opaque = ref;
1542     pic->age    = INT_MAX;
1543     pic->type   = FF_BUFFER_TYPE_USER;
1544     pic->reordered_opaque = codec->reordered_opaque;
1545     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1546     else           pic->pkt_pts = AV_NOPTS_VALUE;
1547     return 0;
1548 }
1549
1550 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1551 {
1552     memset(pic->data, 0, sizeof(pic->data));
1553     avfilter_unref_buffer(pic->opaque);
1554 }
1555
1556 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1557 {
1558     AVFilterBufferRef *ref = pic->opaque;
1559
1560     if (pic->data[0] == NULL) {
1561         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1562         return codec->get_buffer(codec, pic);
1563     }
1564
1565     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1566         (codec->pix_fmt != ref->format)) {
1567         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1568         return -1;
1569     }
1570
1571     pic->reordered_opaque = codec->reordered_opaque;
1572     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1573     else           pic->pkt_pts = AV_NOPTS_VALUE;
1574     return 0;
1575 }
1576
1577 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1578 {
1579     FilterPriv *priv = ctx->priv;
1580     AVCodecContext *codec;
1581     if(!opaque) return -1;
1582
1583     priv->is = opaque;
1584     codec    = priv->is->video_st->codec;
1585     codec->opaque = ctx;
1586     if((codec->codec->capabilities & CODEC_CAP_DR1)
1587     ) {
1588         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1589         priv->use_dr1 = 1;
1590         codec->get_buffer     = input_get_buffer;
1591         codec->release_buffer = input_release_buffer;
1592         codec->reget_buffer   = input_reget_buffer;
1593         codec->thread_safe_callbacks = 1;
1594     }
1595
1596     priv->frame = avcodec_alloc_frame();
1597
1598     return 0;
1599 }
1600
1601 static void input_uninit(AVFilterContext *ctx)
1602 {
1603     FilterPriv *priv = ctx->priv;
1604     av_free(priv->frame);
1605 }
1606
1607 static int input_request_frame(AVFilterLink *link)
1608 {
1609     FilterPriv *priv = link->src->priv;
1610     AVFilterBufferRef *picref;
1611     int64_t pts = 0;
1612     AVPacket pkt;
1613     int ret;
1614
1615     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1616         av_free_packet(&pkt);
1617     if (ret < 0)
1618         return -1;
1619
1620     if(priv->use_dr1 && priv->frame->opaque) {
1621         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1622     } else {
1623         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1624         av_image_copy(picref->data, picref->linesize,
1625                       priv->frame->data, priv->frame->linesize,
1626                       picref->format, link->w, link->h);
1627     }
1628     av_free_packet(&pkt);
1629
1630     avfilter_copy_frame_props(picref, priv->frame);
1631     picref->pts = pts;
1632
1633     avfilter_start_frame(link, picref);
1634     avfilter_draw_slice(link, 0, link->h, 1);
1635     avfilter_end_frame(link);
1636
1637     return 0;
1638 }
1639
1640 static int input_query_formats(AVFilterContext *ctx)
1641 {
1642     FilterPriv *priv = ctx->priv;
1643     enum PixelFormat pix_fmts[] = {
1644         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1645     };
1646
1647     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1648     return 0;
1649 }
1650
1651 static int input_config_props(AVFilterLink *link)
1652 {
1653     FilterPriv *priv  = link->src->priv;
1654     AVCodecContext *c = priv->is->video_st->codec;
1655
1656     link->w = c->width;
1657     link->h = c->height;
1658     link->time_base = priv->is->video_st->time_base;
1659
1660     return 0;
1661 }
1662
1663 static AVFilter input_filter =
1664 {
1665     .name      = "ffplay_input",
1666
1667     .priv_size = sizeof(FilterPriv),
1668
1669     .init      = input_init,
1670     .uninit    = input_uninit,
1671
1672     .query_formats = input_query_formats,
1673
1674     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1675     .outputs   = (AVFilterPad[]) {{ .name = "default",
1676                                     .type = AVMEDIA_TYPE_VIDEO,
1677                                     .request_frame = input_request_frame,
1678                                     .config_props  = input_config_props, },
1679                                   { .name = NULL }},
1680 };
1681
1682 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1683 {
1684     char sws_flags_str[128];
1685     int ret;
1686     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1687     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1688     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1689     graph->scale_sws_opts = av_strdup(sws_flags_str);
1690
1691     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1692                                             NULL, is, graph)) < 0)
1693         goto the_end;
1694     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1695                                             NULL, pix_fmts, graph)) < 0)
1696         goto the_end;
1697
1698     if(vfilters) {
1699         AVFilterInOut *outputs = avfilter_inout_alloc();
1700         AVFilterInOut *inputs  = avfilter_inout_alloc();
1701
1702         outputs->name    = av_strdup("in");
1703         outputs->filter_ctx = filt_src;
1704         outputs->pad_idx = 0;
1705         outputs->next    = NULL;
1706
1707         inputs->name    = av_strdup("out");
1708         inputs->filter_ctx = filt_out;
1709         inputs->pad_idx = 0;
1710         inputs->next    = NULL;
1711
1712         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1713             goto the_end;
1714         av_freep(&vfilters);
1715     } else {
1716         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1717             goto the_end;
1718     }
1719
1720     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1721         goto the_end;
1722
1723     is->out_video_filter = filt_out;
1724 the_end:
1725     return ret;
1726 }
1727
1728 #endif  /* CONFIG_AVFILTER */
1729
1730 static int video_thread(void *arg)
1731 {
1732     VideoState *is = arg;
1733     AVFrame *frame= avcodec_alloc_frame();
1734     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1735     double pts;
1736     int ret;
1737
1738 #if CONFIG_AVFILTER
1739     AVFilterGraph *graph = avfilter_graph_alloc();
1740     AVFilterContext *filt_out = NULL;
1741
1742     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1743         goto the_end;
1744     filt_out = is->out_video_filter;
1745 #endif
1746
1747     for(;;) {
1748 #if !CONFIG_AVFILTER
1749         AVPacket pkt;
1750 #else
1751         AVFilterBufferRef *picref;
1752         AVRational tb = filt_out->inputs[0]->time_base;
1753 #endif
1754         while (is->paused && !is->videoq.abort_request)
1755             SDL_Delay(10);
1756 #if CONFIG_AVFILTER
1757         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1758         if (picref) {
1759             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1760             pts_int = picref->pts;
1761             pos     = picref->pos;
1762             frame->opaque = picref;
1763         }
1764
1765         if (av_cmp_q(tb, is->video_st->time_base)) {
1766             av_unused int64_t pts1 = pts_int;
1767             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1768             av_dlog(NULL, "video_thread(): "
1769                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1770                     tb.num, tb.den, pts1,
1771                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1772         }
1773 #else
1774         ret = get_video_frame(is, frame, &pts_int, &pkt);
1775         pos = pkt.pos;
1776         av_free_packet(&pkt);
1777 #endif
1778
1779         if (ret < 0) goto the_end;
1780
1781         if (!picref)
1782             continue;
1783
1784         pts = pts_int*av_q2d(is->video_st->time_base);
1785
1786         ret = queue_picture(is, frame, pts, pos);
1787
1788         if (ret < 0)
1789             goto the_end;
1790
1791         if (step)
1792             if (cur_stream)
1793                 stream_toggle_pause(cur_stream);
1794     }
1795  the_end:
1796 #if CONFIG_AVFILTER
1797     avfilter_graph_free(&graph);
1798 #endif
1799     av_free(frame);
1800     return 0;
1801 }
1802
1803 static int subtitle_thread(void *arg)
1804 {
1805     VideoState *is = arg;
1806     SubPicture *sp;
1807     AVPacket pkt1, *pkt = &pkt1;
1808     int got_subtitle;
1809     double pts;
1810     int i, j;
1811     int r, g, b, y, u, v, a;
1812
1813     for(;;) {
1814         while (is->paused && !is->subtitleq.abort_request) {
1815             SDL_Delay(10);
1816         }
1817         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1818             break;
1819
1820         if(pkt->data == flush_pkt.data){
1821             avcodec_flush_buffers(is->subtitle_st->codec);
1822             continue;
1823         }
1824         SDL_LockMutex(is->subpq_mutex);
1825         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1826                !is->subtitleq.abort_request) {
1827             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1828         }
1829         SDL_UnlockMutex(is->subpq_mutex);
1830
1831         if (is->subtitleq.abort_request)
1832             goto the_end;
1833
1834         sp = &is->subpq[is->subpq_windex];
1835
1836        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1837            this packet, if any */
1838         pts = 0;
1839         if (pkt->pts != AV_NOPTS_VALUE)
1840             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1841
1842         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1843                                  &got_subtitle, pkt);
1844
1845         if (got_subtitle && sp->sub.format == 0) {
1846             sp->pts = pts;
1847
1848             for (i = 0; i < sp->sub.num_rects; i++)
1849             {
1850                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1851                 {
1852                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1853                     y = RGB_TO_Y_CCIR(r, g, b);
1854                     u = RGB_TO_U_CCIR(r, g, b, 0);
1855                     v = RGB_TO_V_CCIR(r, g, b, 0);
1856                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1857                 }
1858             }
1859
1860             /* now we can update the picture count */
1861             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1862                 is->subpq_windex = 0;
1863             SDL_LockMutex(is->subpq_mutex);
1864             is->subpq_size++;
1865             SDL_UnlockMutex(is->subpq_mutex);
1866         }
1867         av_free_packet(pkt);
1868     }
1869  the_end:
1870     return 0;
1871 }
1872
1873 /* copy samples for viewing in editor window */
1874 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1875 {
1876     int size, len;
1877
1878     size = samples_size / sizeof(short);
1879     while (size > 0) {
1880         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1881         if (len > size)
1882             len = size;
1883         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1884         samples += len;
1885         is->sample_array_index += len;
1886         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1887             is->sample_array_index = 0;
1888         size -= len;
1889     }
1890 }
1891
1892 /* return the new audio buffer size (samples can be added or deleted
1893    to get better sync if video or external master clock) */
1894 static int synchronize_audio(VideoState *is, short *samples,
1895                              int samples_size1, double pts)
1896 {
1897     int n, samples_size;
1898     double ref_clock;
1899
1900     n = 2 * is->audio_st->codec->channels;
1901     samples_size = samples_size1;
1902
1903     /* if not master, then we try to remove or add samples to correct the clock */
1904     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1905          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1906         double diff, avg_diff;
1907         int wanted_size, min_size, max_size, nb_samples;
1908
1909         ref_clock = get_master_clock(is);
1910         diff = get_audio_clock(is) - ref_clock;
1911
1912         if (diff < AV_NOSYNC_THRESHOLD) {
1913             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1914             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1915                 /* not enough measures to have a correct estimate */
1916                 is->audio_diff_avg_count++;
1917             } else {
1918                 /* estimate the A-V difference */
1919                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1920
1921                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1922                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1923                     nb_samples = samples_size / n;
1924
1925                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1926                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1927                     if (wanted_size < min_size)
1928                         wanted_size = min_size;
1929                     else if (wanted_size > max_size)
1930                         wanted_size = max_size;
1931
1932                     /* add or remove samples to correction the synchro */
1933                     if (wanted_size < samples_size) {
1934                         /* remove samples */
1935                         samples_size = wanted_size;
1936                     } else if (wanted_size > samples_size) {
1937                         uint8_t *samples_end, *q;
1938                         int nb;
1939
1940                         /* add samples */
1941                         nb = (samples_size - wanted_size);
1942                         samples_end = (uint8_t *)samples + samples_size - n;
1943                         q = samples_end + n;
1944                         while (nb > 0) {
1945                             memcpy(q, samples_end, n);
1946                             q += n;
1947                             nb -= n;
1948                         }
1949                         samples_size = wanted_size;
1950                     }
1951                 }
1952 #if 0
1953                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1954                        diff, avg_diff, samples_size - samples_size1,
1955                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1956 #endif
1957             }
1958         } else {
1959             /* too big difference : may be initial PTS errors, so
1960                reset A-V filter */
1961             is->audio_diff_avg_count = 0;
1962             is->audio_diff_cum = 0;
1963         }
1964     }
1965
1966     return samples_size;
1967 }
1968
1969 /* decode one audio frame and returns its uncompressed size */
1970 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1971 {
1972     AVPacket *pkt_temp = &is->audio_pkt_temp;
1973     AVPacket *pkt = &is->audio_pkt;
1974     AVCodecContext *dec= is->audio_st->codec;
1975     int n, len1, data_size;
1976     double pts;
1977
1978     for(;;) {
1979         /* NOTE: the audio packet can contain several frames */
1980         while (pkt_temp->size > 0) {
1981             data_size = sizeof(is->audio_buf1);
1982             len1 = avcodec_decode_audio3(dec,
1983                                         (int16_t *)is->audio_buf1, &data_size,
1984                                         pkt_temp);
1985             if (len1 < 0) {
1986                 /* if error, we skip the frame */
1987                 pkt_temp->size = 0;
1988                 break;
1989             }
1990
1991             pkt_temp->data += len1;
1992             pkt_temp->size -= len1;
1993             if (data_size <= 0)
1994                 continue;
1995
1996             if (dec->sample_fmt != is->audio_src_fmt) {
1997                 if (is->reformat_ctx)
1998                     av_audio_convert_free(is->reformat_ctx);
1999                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2000                                                          dec->sample_fmt, 1, NULL, 0);
2001                 if (!is->reformat_ctx) {
2002                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2003                         av_get_sample_fmt_name(dec->sample_fmt),
2004                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2005                         break;
2006                 }
2007                 is->audio_src_fmt= dec->sample_fmt;
2008             }
2009
2010             if (is->reformat_ctx) {
2011                 const void *ibuf[6]= {is->audio_buf1};
2012                 void *obuf[6]= {is->audio_buf2};
2013                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2014                 int ostride[6]= {2};
2015                 int len= data_size/istride[0];
2016                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2017                     printf("av_audio_convert() failed\n");
2018                     break;
2019                 }
2020                 is->audio_buf= is->audio_buf2;
2021                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2022                           remove this legacy cruft */
2023                 data_size= len*2;
2024             }else{
2025                 is->audio_buf= is->audio_buf1;
2026             }
2027
2028             /* if no pts, then compute it */
2029             pts = is->audio_clock;
2030             *pts_ptr = pts;
2031             n = 2 * dec->channels;
2032             is->audio_clock += (double)data_size /
2033                 (double)(n * dec->sample_rate);
2034 #ifdef DEBUG
2035             {
2036                 static double last_clock;
2037                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2038                        is->audio_clock - last_clock,
2039                        is->audio_clock, pts);
2040                 last_clock = is->audio_clock;
2041             }
2042 #endif
2043             return data_size;
2044         }
2045
2046         /* free the current packet */
2047         if (pkt->data)
2048             av_free_packet(pkt);
2049
2050         if (is->paused || is->audioq.abort_request) {
2051             return -1;
2052         }
2053
2054         /* read next packet */
2055         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2056             return -1;
2057         if(pkt->data == flush_pkt.data){
2058             avcodec_flush_buffers(dec);
2059             continue;
2060         }
2061
2062         pkt_temp->data = pkt->data;
2063         pkt_temp->size = pkt->size;
2064
2065         /* if update the audio clock with the pts */
2066         if (pkt->pts != AV_NOPTS_VALUE) {
2067             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2068         }
2069     }
2070 }
2071
2072 /* prepare a new audio buffer */
2073 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2074 {
2075     VideoState *is = opaque;
2076     int audio_size, len1;
2077     double pts;
2078
2079     audio_callback_time = av_gettime();
2080
2081     while (len > 0) {
2082         if (is->audio_buf_index >= is->audio_buf_size) {
2083            audio_size = audio_decode_frame(is, &pts);
2084            if (audio_size < 0) {
2085                 /* if error, just output silence */
2086                is->audio_buf = is->audio_buf1;
2087                is->audio_buf_size = 1024;
2088                memset(is->audio_buf, 0, is->audio_buf_size);
2089            } else {
2090                if (is->show_mode != SHOW_MODE_VIDEO)
2091                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2092                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2093                                               pts);
2094                is->audio_buf_size = audio_size;
2095            }
2096            is->audio_buf_index = 0;
2097         }
2098         len1 = is->audio_buf_size - is->audio_buf_index;
2099         if (len1 > len)
2100             len1 = len;
2101         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2102         len -= len1;
2103         stream += len1;
2104         is->audio_buf_index += len1;
2105     }
2106 }
2107
2108 /* open a given stream. Return 0 if OK */
2109 static int stream_component_open(VideoState *is, int stream_index)
2110 {
2111     AVFormatContext *ic = is->ic;
2112     AVCodecContext *avctx;
2113     AVCodec *codec;
2114     SDL_AudioSpec wanted_spec, spec;
2115
2116     if (stream_index < 0 || stream_index >= ic->nb_streams)
2117         return -1;
2118     avctx = ic->streams[stream_index]->codec;
2119
2120     /* prepare audio output */
2121     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2122         if (avctx->channels > 0) {
2123             avctx->request_channels = FFMIN(2, avctx->channels);
2124         } else {
2125             avctx->request_channels = 2;
2126         }
2127     }
2128
2129     codec = avcodec_find_decoder(avctx->codec_id);
2130     if (!codec)
2131         return -1;
2132
2133     avctx->workaround_bugs = workaround_bugs;
2134     avctx->lowres = lowres;
2135     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2136     avctx->idct_algo= idct;
2137     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2138     avctx->skip_frame= skip_frame;
2139     avctx->skip_idct= skip_idct;
2140     avctx->skip_loop_filter= skip_loop_filter;
2141     avctx->error_recognition= error_recognition;
2142     avctx->error_concealment= error_concealment;
2143     avctx->thread_count= thread_count;
2144
2145     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2146
2147     if(codec->capabilities & CODEC_CAP_DR1)
2148         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2149
2150     if (avcodec_open(avctx, codec) < 0)
2151         return -1;
2152
2153     /* prepare audio output */
2154     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2155         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2156             fprintf(stderr, "Invalid sample rate or channel count\n");
2157             return -1;
2158         }
2159         wanted_spec.freq = avctx->sample_rate;
2160         wanted_spec.format = AUDIO_S16SYS;
2161         wanted_spec.channels = avctx->channels;
2162         wanted_spec.silence = 0;
2163         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2164         wanted_spec.callback = sdl_audio_callback;
2165         wanted_spec.userdata = is;
2166         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2167             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2168             return -1;
2169         }
2170         is->audio_hw_buf_size = spec.size;
2171         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2172     }
2173
2174     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2175     switch(avctx->codec_type) {
2176     case AVMEDIA_TYPE_AUDIO:
2177         is->audio_stream = stream_index;
2178         is->audio_st = ic->streams[stream_index];
2179         is->audio_buf_size = 0;
2180         is->audio_buf_index = 0;
2181
2182         /* init averaging filter */
2183         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2184         is->audio_diff_avg_count = 0;
2185         /* since we do not have a precise anough audio fifo fullness,
2186            we correct audio sync only if larger than this threshold */
2187         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2188
2189         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2190         packet_queue_init(&is->audioq);
2191         SDL_PauseAudio(0);
2192         break;
2193     case AVMEDIA_TYPE_VIDEO:
2194         is->video_stream = stream_index;
2195         is->video_st = ic->streams[stream_index];
2196
2197         packet_queue_init(&is->videoq);
2198         is->video_tid = SDL_CreateThread(video_thread, is);
2199         break;
2200     case AVMEDIA_TYPE_SUBTITLE:
2201         is->subtitle_stream = stream_index;
2202         is->subtitle_st = ic->streams[stream_index];
2203         packet_queue_init(&is->subtitleq);
2204
2205         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2206         break;
2207     default:
2208         break;
2209     }
2210     return 0;
2211 }
2212
2213 static void stream_component_close(VideoState *is, int stream_index)
2214 {
2215     AVFormatContext *ic = is->ic;
2216     AVCodecContext *avctx;
2217
2218     if (stream_index < 0 || stream_index >= ic->nb_streams)
2219         return;
2220     avctx = ic->streams[stream_index]->codec;
2221
2222     switch(avctx->codec_type) {
2223     case AVMEDIA_TYPE_AUDIO:
2224         packet_queue_abort(&is->audioq);
2225
2226         SDL_CloseAudio();
2227
2228         packet_queue_end(&is->audioq);
2229         if (is->reformat_ctx)
2230             av_audio_convert_free(is->reformat_ctx);
2231         is->reformat_ctx = NULL;
2232         break;
2233     case AVMEDIA_TYPE_VIDEO:
2234         packet_queue_abort(&is->videoq);
2235
2236         /* note: we also signal this mutex to make sure we deblock the
2237            video thread in all cases */
2238         SDL_LockMutex(is->pictq_mutex);
2239         SDL_CondSignal(is->pictq_cond);
2240         SDL_UnlockMutex(is->pictq_mutex);
2241
2242         SDL_WaitThread(is->video_tid, NULL);
2243
2244         packet_queue_end(&is->videoq);
2245         break;
2246     case AVMEDIA_TYPE_SUBTITLE:
2247         packet_queue_abort(&is->subtitleq);
2248
2249         /* note: we also signal this mutex to make sure we deblock the
2250            video thread in all cases */
2251         SDL_LockMutex(is->subpq_mutex);
2252         is->subtitle_stream_changed = 1;
2253
2254         SDL_CondSignal(is->subpq_cond);
2255         SDL_UnlockMutex(is->subpq_mutex);
2256
2257         SDL_WaitThread(is->subtitle_tid, NULL);
2258
2259         packet_queue_end(&is->subtitleq);
2260         break;
2261     default:
2262         break;
2263     }
2264
2265     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2266     avcodec_close(avctx);
2267     switch(avctx->codec_type) {
2268     case AVMEDIA_TYPE_AUDIO:
2269         is->audio_st = NULL;
2270         is->audio_stream = -1;
2271         break;
2272     case AVMEDIA_TYPE_VIDEO:
2273         is->video_st = NULL;
2274         is->video_stream = -1;
2275         break;
2276     case AVMEDIA_TYPE_SUBTITLE:
2277         is->subtitle_st = NULL;
2278         is->subtitle_stream = -1;
2279         break;
2280     default:
2281         break;
2282     }
2283 }
2284
2285 /* since we have only one decoding thread, we can use a global
2286    variable instead of a thread local variable */
2287 static VideoState *global_video_state;
2288
2289 static int decode_interrupt_cb(void)
2290 {
2291     return (global_video_state && global_video_state->abort_request);
2292 }
2293
2294 /* this thread gets the stream from the disk or the network */
2295 static int read_thread(void *arg)
2296 {
2297     VideoState *is = arg;
2298     AVFormatContext *ic = NULL;
2299     int err, i, ret;
2300     int st_index[AVMEDIA_TYPE_NB];
2301     AVPacket pkt1, *pkt = &pkt1;
2302     int eof=0;
2303     int pkt_in_play_range = 0;
2304     AVDictionaryEntry *t;
2305
2306     memset(st_index, -1, sizeof(st_index));
2307     is->video_stream = -1;
2308     is->audio_stream = -1;
2309     is->subtitle_stream = -1;
2310
2311     global_video_state = is;
2312     avio_set_interrupt_cb(decode_interrupt_cb);
2313
2314     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2315     if (err < 0) {
2316         print_error(is->filename, err);
2317         ret = -1;
2318         goto fail;
2319     }
2320     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2321         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2322         ret = AVERROR_OPTION_NOT_FOUND;
2323         goto fail;
2324     }
2325     is->ic = ic;
2326
2327     if(genpts)
2328         ic->flags |= AVFMT_FLAG_GENPTS;
2329
2330     err = av_find_stream_info(ic);
2331     if (err < 0) {
2332         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2333         ret = -1;
2334         goto fail;
2335     }
2336     if(ic->pb)
2337         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2338
2339     if(seek_by_bytes<0)
2340         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2341
2342     /* if seeking requested, we execute it */
2343     if (start_time != AV_NOPTS_VALUE) {
2344         int64_t timestamp;
2345
2346         timestamp = start_time;
2347         /* add the stream start time */
2348         if (ic->start_time != AV_NOPTS_VALUE)
2349             timestamp += ic->start_time;
2350         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2351         if (ret < 0) {
2352             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2353                     is->filename, (double)timestamp / AV_TIME_BASE);
2354         }
2355     }
2356
2357     for (i = 0; i < ic->nb_streams; i++)
2358         ic->streams[i]->discard = AVDISCARD_ALL;
2359     if (!video_disable)
2360         st_index[AVMEDIA_TYPE_VIDEO] =
2361             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2362                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2363     if (!audio_disable)
2364         st_index[AVMEDIA_TYPE_AUDIO] =
2365             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2366                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2367                                 st_index[AVMEDIA_TYPE_VIDEO],
2368                                 NULL, 0);
2369     if (!video_disable)
2370         st_index[AVMEDIA_TYPE_SUBTITLE] =
2371             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2372                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2373                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2374                                  st_index[AVMEDIA_TYPE_AUDIO] :
2375                                  st_index[AVMEDIA_TYPE_VIDEO]),
2376                                 NULL, 0);
2377     if (show_status) {
2378         av_dump_format(ic, 0, is->filename, 0);
2379     }
2380
2381     is->show_mode = show_mode;
2382
2383     /* open the streams */
2384     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2385         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2386     }
2387
2388     ret=-1;
2389     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2390         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2391     }
2392     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2393     if (is->show_mode == SHOW_MODE_NONE)
2394         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2395
2396     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2397         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2398     }
2399
2400     if (is->video_stream < 0 && is->audio_stream < 0) {
2401         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2402         ret = -1;
2403         goto fail;
2404     }
2405
2406     for(;;) {
2407         if (is->abort_request)
2408             break;
2409         if (is->paused != is->last_paused) {
2410             is->last_paused = is->paused;
2411             if (is->paused)
2412                 is->read_pause_return= av_read_pause(ic);
2413             else
2414                 av_read_play(ic);
2415         }
2416 #if CONFIG_RTSP_DEMUXER
2417         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2418             /* wait 10 ms to avoid trying to get another packet */
2419             /* XXX: horrible */
2420             SDL_Delay(10);
2421             continue;
2422         }
2423 #endif
2424         if (is->seek_req) {
2425             int64_t seek_target= is->seek_pos;
2426             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2427             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2428 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2429 //      of the seek_pos/seek_rel variables
2430
2431             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2432             if (ret < 0) {
2433                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2434             }else{
2435                 if (is->audio_stream >= 0) {
2436                     packet_queue_flush(&is->audioq);
2437                     packet_queue_put(&is->audioq, &flush_pkt);
2438                 }
2439                 if (is->subtitle_stream >= 0) {
2440                     packet_queue_flush(&is->subtitleq);
2441                     packet_queue_put(&is->subtitleq, &flush_pkt);
2442                 }
2443                 if (is->video_stream >= 0) {
2444                     packet_queue_flush(&is->videoq);
2445                     packet_queue_put(&is->videoq, &flush_pkt);
2446                 }
2447             }
2448             is->seek_req = 0;
2449             eof= 0;
2450         }
2451
2452         /* if the queue are full, no need to read more */
2453         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2454             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2455                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2456                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2457             /* wait 10 ms */
2458             SDL_Delay(10);
2459             continue;
2460         }
2461         if(eof) {
2462             if(is->video_stream >= 0){
2463                 av_init_packet(pkt);
2464                 pkt->data=NULL;
2465                 pkt->size=0;
2466                 pkt->stream_index= is->video_stream;
2467                 packet_queue_put(&is->videoq, pkt);
2468             }
2469             SDL_Delay(10);
2470             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2471                 if(loop!=1 && (!loop || --loop)){
2472                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2473                 }else if(autoexit){
2474                     ret=AVERROR_EOF;
2475                     goto fail;
2476                 }
2477             }
2478             eof=0;
2479             continue;
2480         }
2481         ret = av_read_frame(ic, pkt);
2482         if (ret < 0) {
2483             if (ret == AVERROR_EOF || url_feof(ic->pb))
2484                 eof=1;
2485             if (ic->pb && ic->pb->error)
2486                 break;
2487             SDL_Delay(100); /* wait for user event */
2488             continue;
2489         }
2490         /* check if packet is in play range specified by user, then queue, otherwise discard */
2491         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2492                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2493                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2494                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2495                 <= ((double)duration/1000000);
2496         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2497             packet_queue_put(&is->audioq, pkt);
2498         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2499             packet_queue_put(&is->videoq, pkt);
2500         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2501             packet_queue_put(&is->subtitleq, pkt);
2502         } else {
2503             av_free_packet(pkt);
2504         }
2505     }
2506     /* wait until the end */
2507     while (!is->abort_request) {
2508         SDL_Delay(100);
2509     }
2510
2511     ret = 0;
2512  fail:
2513     /* disable interrupting */
2514     global_video_state = NULL;
2515
2516     /* close each stream */
2517     if (is->audio_stream >= 0)
2518         stream_component_close(is, is->audio_stream);
2519     if (is->video_stream >= 0)
2520         stream_component_close(is, is->video_stream);
2521     if (is->subtitle_stream >= 0)
2522         stream_component_close(is, is->subtitle_stream);
2523     if (is->ic) {
2524         av_close_input_file(is->ic);
2525         is->ic = NULL; /* safety */
2526     }
2527     avio_set_interrupt_cb(NULL);
2528
2529     if (ret != 0) {
2530         SDL_Event event;
2531
2532         event.type = FF_QUIT_EVENT;
2533         event.user.data1 = is;
2534         SDL_PushEvent(&event);
2535     }
2536     return 0;
2537 }
2538
2539 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2540 {
2541     VideoState *is;
2542
2543     is = av_mallocz(sizeof(VideoState));
2544     if (!is)
2545         return NULL;
2546     av_strlcpy(is->filename, filename, sizeof(is->filename));
2547     is->iformat = iformat;
2548     is->ytop = 0;
2549     is->xleft = 0;
2550
2551     /* start video display */
2552     is->pictq_mutex = SDL_CreateMutex();
2553     is->pictq_cond = SDL_CreateCond();
2554
2555     is->subpq_mutex = SDL_CreateMutex();
2556     is->subpq_cond = SDL_CreateCond();
2557
2558     is->av_sync_type = av_sync_type;
2559     is->read_tid = SDL_CreateThread(read_thread, is);
2560     if (!is->read_tid) {
2561         av_free(is);
2562         return NULL;
2563     }
2564     return is;
2565 }
2566
2567 static void stream_cycle_channel(VideoState *is, int codec_type)
2568 {
2569     AVFormatContext *ic = is->ic;
2570     int start_index, stream_index;
2571     AVStream *st;
2572
2573     if (codec_type == AVMEDIA_TYPE_VIDEO)
2574         start_index = is->video_stream;
2575     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2576         start_index = is->audio_stream;
2577     else
2578         start_index = is->subtitle_stream;
2579     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2580         return;
2581     stream_index = start_index;
2582     for(;;) {
2583         if (++stream_index >= is->ic->nb_streams)
2584         {
2585             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2586             {
2587                 stream_index = -1;
2588                 goto the_end;
2589             } else
2590                 stream_index = 0;
2591         }
2592         if (stream_index == start_index)
2593             return;
2594         st = ic->streams[stream_index];
2595         if (st->codec->codec_type == codec_type) {
2596             /* check that parameters are OK */
2597             switch(codec_type) {
2598             case AVMEDIA_TYPE_AUDIO:
2599                 if (st->codec->sample_rate != 0 &&
2600                     st->codec->channels != 0)
2601                     goto the_end;
2602                 break;
2603             case AVMEDIA_TYPE_VIDEO:
2604             case AVMEDIA_TYPE_SUBTITLE:
2605                 goto the_end;
2606             default:
2607                 break;
2608             }
2609         }
2610     }
2611  the_end:
2612     stream_component_close(is, start_index);
2613     stream_component_open(is, stream_index);
2614 }
2615
2616
2617 static void toggle_full_screen(void)
2618 {
2619     is_full_screen = !is_full_screen;
2620     video_open(cur_stream);
2621 }
2622
2623 static void toggle_pause(void)
2624 {
2625     if (cur_stream)
2626         stream_toggle_pause(cur_stream);
2627     step = 0;
2628 }
2629
2630 static void step_to_next_frame(void)
2631 {
2632     if (cur_stream) {
2633         /* if the stream is paused unpause it, then step */
2634         if (cur_stream->paused)
2635             stream_toggle_pause(cur_stream);
2636     }
2637     step = 1;
2638 }
2639
2640 static void toggle_audio_display(void)
2641 {
2642     if (cur_stream) {
2643         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2644         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2645         fill_rectangle(screen,
2646                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2647                     bgcolor);
2648         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2649     }
2650 }
2651
2652 /* handle an event sent by the GUI */
2653 static void event_loop(void)
2654 {
2655     SDL_Event event;
2656     double incr, pos, frac;
2657
2658     for(;;) {
2659         double x;
2660         SDL_WaitEvent(&event);
2661         switch(event.type) {
2662         case SDL_KEYDOWN:
2663             if (exit_on_keydown) {
2664                 do_exit();
2665                 break;
2666             }
2667             switch(event.key.keysym.sym) {
2668             case SDLK_ESCAPE:
2669             case SDLK_q:
2670                 do_exit();
2671                 break;
2672             case SDLK_f:
2673                 toggle_full_screen();
2674                 break;
2675             case SDLK_p:
2676             case SDLK_SPACE:
2677                 toggle_pause();
2678                 break;
2679             case SDLK_s: //S: Step to next frame
2680                 step_to_next_frame();
2681                 break;
2682             case SDLK_a:
2683                 if (cur_stream)
2684                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2685                 break;
2686             case SDLK_v:
2687                 if (cur_stream)
2688                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2689                 break;
2690             case SDLK_t:
2691                 if (cur_stream)
2692                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2693                 break;
2694             case SDLK_w:
2695                 toggle_audio_display();
2696                 break;
2697             case SDLK_LEFT:
2698                 incr = -10.0;
2699                 goto do_seek;
2700             case SDLK_RIGHT:
2701                 incr = 10.0;
2702                 goto do_seek;
2703             case SDLK_UP:
2704                 incr = 60.0;
2705                 goto do_seek;
2706             case SDLK_DOWN:
2707                 incr = -60.0;
2708             do_seek:
2709                 if (cur_stream) {
2710                     if (seek_by_bytes) {
2711                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2712                             pos= cur_stream->video_current_pos;
2713                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2714                             pos= cur_stream->audio_pkt.pos;
2715                         }else
2716                             pos = avio_tell(cur_stream->ic->pb);
2717                         if (cur_stream->ic->bit_rate)
2718                             incr *= cur_stream->ic->bit_rate / 8.0;
2719                         else
2720                             incr *= 180000.0;
2721                         pos += incr;
2722                         stream_seek(cur_stream, pos, incr, 1);
2723                     } else {
2724                         pos = get_master_clock(cur_stream);
2725                         pos += incr;
2726                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2727                     }
2728                 }
2729                 break;
2730             default:
2731                 break;
2732             }
2733             break;
2734         case SDL_MOUSEBUTTONDOWN:
2735             if (exit_on_mousedown) {
2736                 do_exit();
2737                 break;
2738             }
2739         case SDL_MOUSEMOTION:
2740             if(event.type ==SDL_MOUSEBUTTONDOWN){
2741                 x= event.button.x;
2742             }else{
2743                 if(event.motion.state != SDL_PRESSED)
2744                     break;
2745                 x= event.motion.x;
2746             }
2747             if (cur_stream) {
2748                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2749                     uint64_t size=  avio_size(cur_stream->ic->pb);
2750                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2751                 }else{
2752                     int64_t ts;
2753                     int ns, hh, mm, ss;
2754                     int tns, thh, tmm, tss;
2755                     tns = cur_stream->ic->duration/1000000LL;
2756                     thh = tns/3600;
2757                     tmm = (tns%3600)/60;
2758                     tss = (tns%60);
2759                     frac = x/cur_stream->width;
2760                     ns = frac*tns;
2761                     hh = ns/3600;
2762                     mm = (ns%3600)/60;
2763                     ss = (ns%60);
2764                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2765                             hh, mm, ss, thh, tmm, tss);
2766                     ts = frac*cur_stream->ic->duration;
2767                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2768                         ts += cur_stream->ic->start_time;
2769                     stream_seek(cur_stream, ts, 0, 0);
2770                 }
2771             }
2772             break;
2773         case SDL_VIDEORESIZE:
2774             if (cur_stream) {
2775                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2776                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2777                 screen_width = cur_stream->width = event.resize.w;
2778                 screen_height= cur_stream->height= event.resize.h;
2779             }
2780             break;
2781         case SDL_QUIT:
2782         case FF_QUIT_EVENT:
2783             do_exit();
2784             break;
2785         case FF_ALLOC_EVENT:
2786             video_open(event.user.data1);
2787             alloc_picture(event.user.data1);
2788             break;
2789         case FF_REFRESH_EVENT:
2790             video_refresh(event.user.data1);
2791             cur_stream->refresh=0;
2792             break;
2793         default:
2794             break;
2795         }
2796     }
2797 }
2798
2799 static int opt_frame_size(const char *opt, const char *arg)
2800 {
2801     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2802         fprintf(stderr, "Incorrect frame size\n");
2803         return AVERROR(EINVAL);
2804     }
2805     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2806         fprintf(stderr, "Frame size must be a multiple of 2\n");
2807         return AVERROR(EINVAL);
2808     }
2809     return 0;
2810 }
2811
2812 static int opt_width(const char *opt, const char *arg)
2813 {
2814     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2815     return 0;
2816 }
2817
2818 static int opt_height(const char *opt, const char *arg)
2819 {
2820     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2821     return 0;
2822 }
2823
2824 static int opt_format(const char *opt, const char *arg)
2825 {
2826     file_iformat = av_find_input_format(arg);
2827     if (!file_iformat) {
2828         fprintf(stderr, "Unknown input format: %s\n", arg);
2829         return AVERROR(EINVAL);
2830     }
2831     return 0;
2832 }
2833
2834 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2835 {
2836     frame_pix_fmt = av_get_pix_fmt(arg);
2837     return 0;
2838 }
2839
2840 static int opt_sync(const char *opt, const char *arg)
2841 {
2842     if (!strcmp(arg, "audio"))
2843         av_sync_type = AV_SYNC_AUDIO_MASTER;
2844     else if (!strcmp(arg, "video"))
2845         av_sync_type = AV_SYNC_VIDEO_MASTER;
2846     else if (!strcmp(arg, "ext"))
2847         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2848     else {
2849         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2850         exit(1);
2851     }
2852     return 0;
2853 }
2854
2855 static int opt_seek(const char *opt, const char *arg)
2856 {
2857     start_time = parse_time_or_die(opt, arg, 1);
2858     return 0;
2859 }
2860
2861 static int opt_duration(const char *opt, const char *arg)
2862 {
2863     duration = parse_time_or_die(opt, arg, 1);
2864     return 0;
2865 }
2866
2867 static int opt_thread_count(const char *opt, const char *arg)
2868 {
2869     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2870 #if !HAVE_THREADS
2871     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2872 #endif
2873     return 0;
2874 }
2875
2876 static int opt_show_mode(const char *opt, const char *arg)
2877 {
2878     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2879                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2880                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2881                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2882     return 0;
2883 }
2884
2885 static int opt_input_file(const char *opt, const char *filename)
2886 {
2887     if (input_filename) {
2888         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2889                 filename, input_filename);
2890         exit(1);
2891     }
2892     if (!strcmp(filename, "-"))
2893         filename = "pipe:";
2894     input_filename = filename;
2895     return 0;
2896 }
2897
2898 static const OptionDef options[] = {
2899 #include "cmdutils_common_opts.h"
2900     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2901     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2902     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2903     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2904     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2905     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2906     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2907     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2908     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2909     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2910     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2911     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2912     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2913     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2914     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2915     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2916     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2917     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2918     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2919     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2920     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2921     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2922     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2923     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2924     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2925     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2926     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2927     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2928     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2929     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2930     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2931     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2932     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2933     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2934     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2935 #if CONFIG_AVFILTER
2936     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2937 #endif
2938     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2939     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2940     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2941     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2942     { NULL, },
2943 };
2944
2945 static void show_usage(void)
2946 {
2947     printf("Simple media player\n");
2948     printf("usage: ffplay [options] input_file\n");
2949     printf("\n");
2950 }
2951
2952 static int opt_help(const char *opt, const char *arg)
2953 {
2954     av_log_set_callback(log_callback_help);
2955     show_usage();
2956     show_help_options(options, "Main options:\n",
2957                       OPT_EXPERT, 0);
2958     show_help_options(options, "\nAdvanced options:\n",
2959                       OPT_EXPERT, OPT_EXPERT);
2960     printf("\n");
2961     av_opt_show2(avcodec_opts[0], NULL,
2962                  AV_OPT_FLAG_DECODING_PARAM, 0);
2963     printf("\n");
2964     av_opt_show2(avformat_opts, NULL,
2965                  AV_OPT_FLAG_DECODING_PARAM, 0);
2966 #if !CONFIG_AVFILTER
2967     printf("\n");
2968     av_opt_show2(sws_opts, NULL,
2969                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2970 #endif
2971     printf("\nWhile playing:\n"
2972            "q, ESC              quit\n"
2973            "f                   toggle full screen\n"
2974            "p, SPC              pause\n"
2975            "a                   cycle audio channel\n"
2976            "v                   cycle video channel\n"
2977            "t                   cycle subtitle channel\n"
2978            "w                   show audio waves\n"
2979            "s                   activate frame-step mode\n"
2980            "left/right          seek backward/forward 10 seconds\n"
2981            "down/up             seek backward/forward 1 minute\n"
2982            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2983            );
2984     return 0;
2985 }
2986
2987 /* Called from the main */
2988 int main(int argc, char **argv)
2989 {
2990     int flags;
2991
2992     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2993
2994     /* register all codecs, demux and protocols */
2995     avcodec_register_all();
2996 #if CONFIG_AVDEVICE
2997     avdevice_register_all();
2998 #endif
2999 #if CONFIG_AVFILTER
3000     avfilter_register_all();
3001 #endif
3002     av_register_all();
3003
3004     init_opts();
3005
3006     show_banner();
3007
3008     parse_options(argc, argv, options, opt_input_file);
3009
3010     if (!input_filename) {
3011         show_usage();
3012         fprintf(stderr, "An input file must be specified\n");
3013         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3014         exit(1);
3015     }
3016
3017     if (display_disable) {
3018         video_disable = 1;
3019     }
3020     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3021     if (audio_disable)
3022         flags &= ~SDL_INIT_AUDIO;
3023 #if !defined(__MINGW32__) && !defined(__APPLE__)
3024     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3025 #endif
3026     if (SDL_Init (flags)) {
3027         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3028         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3029         exit(1);
3030     }
3031
3032     if (!display_disable) {
3033 #if HAVE_SDL_VIDEO_SIZE
3034         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3035         fs_screen_width = vi->current_w;
3036         fs_screen_height = vi->current_h;
3037 #endif
3038     }
3039
3040     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3041     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3042     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3043
3044     av_init_packet(&flush_pkt);
3045     flush_pkt.data= "FLUSH";
3046
3047     cur_stream = stream_open(input_filename, file_iformat);
3048
3049     event_loop();
3050
3051     /* never returns */
3052
3053     return 0;
3054 }