OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static int opt_help(const char *opt, const char *arg);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int step = 0;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
282 {
283     AVPacketList *pkt1;
284
285     /* duplicate the packet */
286     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
287         return -1;
288
289     pkt1 = av_malloc(sizeof(AVPacketList));
290     if (!pkt1)
291         return -1;
292     pkt1->pkt = *pkt;
293     pkt1->next = NULL;
294
295
296     SDL_LockMutex(q->mutex);
297
298     if (!q->last_pkt)
299
300         q->first_pkt = pkt1;
301     else
302         q->last_pkt->next = pkt1;
303     q->last_pkt = pkt1;
304     q->nb_packets++;
305     q->size += pkt1->pkt.size + sizeof(*pkt1);
306     /* XXX: should duplicate packet data in DV case */
307     SDL_CondSignal(q->cond);
308
309     SDL_UnlockMutex(q->mutex);
310     return 0;
311 }
312
313 /* packet queue handling */
314 static void packet_queue_init(PacketQueue *q)
315 {
316     memset(q, 0, sizeof(PacketQueue));
317     q->mutex = SDL_CreateMutex();
318     q->cond = SDL_CreateCond();
319     packet_queue_put(q, &flush_pkt);
320 }
321
322 static void packet_queue_flush(PacketQueue *q)
323 {
324     AVPacketList *pkt, *pkt1;
325
326     SDL_LockMutex(q->mutex);
327     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
328         pkt1 = pkt->next;
329         av_free_packet(&pkt->pkt);
330         av_freep(&pkt);
331     }
332     q->last_pkt = NULL;
333     q->first_pkt = NULL;
334     q->nb_packets = 0;
335     q->size = 0;
336     SDL_UnlockMutex(q->mutex);
337 }
338
339 static void packet_queue_end(PacketQueue *q)
340 {
341     packet_queue_flush(q);
342     SDL_DestroyMutex(q->mutex);
343     SDL_DestroyCond(q->cond);
344 }
345
346 static void packet_queue_abort(PacketQueue *q)
347 {
348     SDL_LockMutex(q->mutex);
349
350     q->abort_request = 1;
351
352     SDL_CondSignal(q->cond);
353
354     SDL_UnlockMutex(q->mutex);
355 }
356
357 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
358 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
359 {
360     AVPacketList *pkt1;
361     int ret;
362
363     SDL_LockMutex(q->mutex);
364
365     for(;;) {
366         if (q->abort_request) {
367             ret = -1;
368             break;
369         }
370
371         pkt1 = q->first_pkt;
372         if (pkt1) {
373             q->first_pkt = pkt1->next;
374             if (!q->first_pkt)
375                 q->last_pkt = NULL;
376             q->nb_packets--;
377             q->size -= pkt1->pkt.size + sizeof(*pkt1);
378             *pkt = pkt1->pkt;
379             av_free(pkt1);
380             ret = 1;
381             break;
382         } else if (!block) {
383             ret = 0;
384             break;
385         } else {
386             SDL_CondWait(q->cond, q->mutex);
387         }
388     }
389     SDL_UnlockMutex(q->mutex);
390     return ret;
391 }
392
393 static inline void fill_rectangle(SDL_Surface *screen,
394                                   int x, int y, int w, int h, int color)
395 {
396     SDL_Rect rect;
397     rect.x = x;
398     rect.y = y;
399     rect.w = w;
400     rect.h = h;
401     SDL_FillRect(screen, &rect, color);
402 }
403
404 #define ALPHA_BLEND(a, oldp, newp, s)\
405 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
406
407 #define RGBA_IN(r, g, b, a, s)\
408 {\
409     unsigned int v = ((const uint32_t *)(s))[0];\
410     a = (v >> 24) & 0xff;\
411     r = (v >> 16) & 0xff;\
412     g = (v >> 8) & 0xff;\
413     b = v & 0xff;\
414 }
415
416 #define YUVA_IN(y, u, v, a, s, pal)\
417 {\
418     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
419     a = (val >> 24) & 0xff;\
420     y = (val >> 16) & 0xff;\
421     u = (val >> 8) & 0xff;\
422     v = val & 0xff;\
423 }
424
425 #define YUVA_OUT(d, y, u, v, a)\
426 {\
427     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
428 }
429
430
431 #define BPP 1
432
433 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
434 {
435     int wrap, wrap3, width2, skip2;
436     int y, u, v, a, u1, v1, a1, w, h;
437     uint8_t *lum, *cb, *cr;
438     const uint8_t *p;
439     const uint32_t *pal;
440     int dstx, dsty, dstw, dsth;
441
442     dstw = av_clip(rect->w, 0, imgw);
443     dsth = av_clip(rect->h, 0, imgh);
444     dstx = av_clip(rect->x, 0, imgw - dstw);
445     dsty = av_clip(rect->y, 0, imgh - dsth);
446     lum = dst->data[0] + dsty * dst->linesize[0];
447     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
448     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
449
450     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
451     skip2 = dstx >> 1;
452     wrap = dst->linesize[0];
453     wrap3 = rect->pict.linesize[0];
454     p = rect->pict.data[0];
455     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
456
457     if (dsty & 1) {
458         lum += dstx;
459         cb += skip2;
460         cr += skip2;
461
462         if (dstx & 1) {
463             YUVA_IN(y, u, v, a, p, pal);
464             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
466             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
467             cb++;
468             cr++;
469             lum++;
470             p += BPP;
471         }
472         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
473             YUVA_IN(y, u, v, a, p, pal);
474             u1 = u;
475             v1 = v;
476             a1 = a;
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478
479             YUVA_IN(y, u, v, a, p + BPP, pal);
480             u1 += u;
481             v1 += v;
482             a1 += a;
483             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
484             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
485             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
486             cb++;
487             cr++;
488             p += 2 * BPP;
489             lum += 2;
490         }
491         if (w) {
492             YUVA_IN(y, u, v, a, p, pal);
493             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
494             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
495             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
496             p++;
497             lum++;
498         }
499         p += wrap3 - dstw * BPP;
500         lum += wrap - dstw - dstx;
501         cb += dst->linesize[1] - width2 - skip2;
502         cr += dst->linesize[2] - width2 - skip2;
503     }
504     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             u1 = u;
512             v1 = v;
513             a1 = a;
514             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515             p += wrap3;
516             lum += wrap;
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 += u;
519             v1 += v;
520             a1 += a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524             cb++;
525             cr++;
526             p += -wrap3 + BPP;
527             lum += -wrap + 1;
528         }
529         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 = u;
532             v1 = v;
533             a1 = a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536             YUVA_IN(y, u, v, a, p + BPP, pal);
537             u1 += u;
538             v1 += v;
539             a1 += a;
540             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541             p += wrap3;
542             lum += wrap;
543
544             YUVA_IN(y, u, v, a, p, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550             YUVA_IN(y, u, v, a, p + BPP, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555
556             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558
559             cb++;
560             cr++;
561             p += -wrap3 + 2 * BPP;
562             lum += -wrap + 2;
563         }
564         if (w) {
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 = u;
567             v1 = v;
568             a1 = a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             p += wrap3;
571             lum += wrap;
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 += u;
574             v1 += v;
575             a1 += a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579             cb++;
580             cr++;
581             p += -wrap3 + BPP;
582             lum += -wrap + 1;
583         }
584         p += wrap3 + (wrap3 - dstw * BPP);
585         lum += wrap + (wrap - dstw - dstx);
586         cb += dst->linesize[1] - width2 - skip2;
587         cr += dst->linesize[2] - width2 - skip2;
588     }
589     /* handle odd height */
590     if (h) {
591         lum += dstx;
592         cb += skip2;
593         cr += skip2;
594
595         if (dstx & 1) {
596             YUVA_IN(y, u, v, a, p, pal);
597             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600             cb++;
601             cr++;
602             lum++;
603             p += BPP;
604         }
605         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606             YUVA_IN(y, u, v, a, p, pal);
607             u1 = u;
608             v1 = v;
609             a1 = a;
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611
612             YUVA_IN(y, u, v, a, p + BPP, pal);
613             u1 += u;
614             v1 += v;
615             a1 += a;
616             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619             cb++;
620             cr++;
621             p += 2 * BPP;
622             lum += 2;
623         }
624         if (w) {
625             YUVA_IN(y, u, v, a, p, pal);
626             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629         }
630     }
631 }
632
633 static void free_subpicture(SubPicture *sp)
634 {
635     avsubtitle_free(&sp->sub);
636 }
637
638 static void video_image_display(VideoState *is)
639 {
640     VideoPicture *vp;
641     SubPicture *sp;
642     AVPicture pict;
643     float aspect_ratio;
644     int width, height, x, y;
645     SDL_Rect rect;
646     int i;
647
648     vp = &is->pictq[is->pictq_rindex];
649     if (vp->bmp) {
650 #if CONFIG_AVFILTER
651          if (vp->picref->video->sample_aspect_ratio.num == 0)
652              aspect_ratio = 0;
653          else
654              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
655 #else
656
657         /* XXX: use variable in the frame */
658         if (is->video_st->sample_aspect_ratio.num)
659             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
660         else if (is->video_st->codec->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
662         else
663             aspect_ratio = 0;
664 #endif
665         if (aspect_ratio <= 0.0)
666             aspect_ratio = 1.0;
667         aspect_ratio *= (float)vp->width / (float)vp->height;
668
669         if (is->subtitle_st) {
670             if (is->subpq_size > 0) {
671                 sp = &is->subpq[is->subpq_rindex];
672
673                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
674                     SDL_LockYUVOverlay (vp->bmp);
675
676                     pict.data[0] = vp->bmp->pixels[0];
677                     pict.data[1] = vp->bmp->pixels[2];
678                     pict.data[2] = vp->bmp->pixels[1];
679
680                     pict.linesize[0] = vp->bmp->pitches[0];
681                     pict.linesize[1] = vp->bmp->pitches[2];
682                     pict.linesize[2] = vp->bmp->pitches[1];
683
684                     for (i = 0; i < sp->sub.num_rects; i++)
685                         blend_subrect(&pict, sp->sub.rects[i],
686                                       vp->bmp->w, vp->bmp->h);
687
688                     SDL_UnlockYUVOverlay (vp->bmp);
689                 }
690             }
691         }
692
693
694         /* XXX: we suppose the screen has a 1.0 pixel ratio */
695         height = is->height;
696         width = ((int)rint(height * aspect_ratio)) & ~1;
697         if (width > is->width) {
698             width = is->width;
699             height = ((int)rint(width / aspect_ratio)) & ~1;
700         }
701         x = (is->width - width) / 2;
702         y = (is->height - height) / 2;
703         is->no_background = 0;
704         rect.x = is->xleft + x;
705         rect.y = is->ytop  + y;
706         rect.w = FFMAX(width,  1);
707         rect.h = FFMAX(height, 1);
708         SDL_DisplayYUVOverlay(vp->bmp, &rect);
709     }
710 }
711
712 static inline int compute_mod(int a, int b)
713 {
714     return a < 0 ? a%b + b : a%b;
715 }
716
717 static void video_audio_display(VideoState *s)
718 {
719     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
720     int ch, channels, h, h2, bgcolor, fgcolor;
721     int16_t time_diff;
722     int rdft_bits, nb_freq;
723
724     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
725         ;
726     nb_freq= 1<<(rdft_bits-1);
727
728     /* compute display index : center on currently output samples */
729     channels = s->audio_st->codec->channels;
730     nb_display_channels = channels;
731     if (!s->paused) {
732         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
733         n = 2 * channels;
734         delay = s->audio_write_buf_size;
735         delay /= n;
736
737         /* to be more precise, we take into account the time spent since
738            the last buffer computation */
739         if (audio_callback_time) {
740             time_diff = av_gettime() - audio_callback_time;
741             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
742         }
743
744         delay += 2*data_used;
745         if (delay < data_used)
746             delay = data_used;
747
748         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
749         if (s->show_mode == SHOW_MODE_WAVES) {
750             h= INT_MIN;
751             for(i=0; i<1000; i+=channels){
752                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
753                 int a= s->sample_array[idx];
754                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
755                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
756                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
757                 int score= a-d;
758                 if(h<score && (b^c)<0){
759                     h= score;
760                     i_start= idx;
761                 }
762             }
763         }
764
765         s->last_i_start = i_start;
766     } else {
767         i_start = s->last_i_start;
768     }
769
770     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
771     if (s->show_mode == SHOW_MODE_WAVES) {
772         fill_rectangle(screen,
773                        s->xleft, s->ytop, s->width, s->height,
774                        bgcolor);
775
776         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
777
778         /* total height for one channel */
779         h = s->height / nb_display_channels;
780         /* graph height / 2 */
781         h2 = (h * 9) / 20;
782         for(ch = 0;ch < nb_display_channels; ch++) {
783             i = i_start + ch;
784             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
785             for(x = 0; x < s->width; x++) {
786                 y = (s->sample_array[i] * h2) >> 15;
787                 if (y < 0) {
788                     y = -y;
789                     ys = y1 - y;
790                 } else {
791                     ys = y1;
792                 }
793                 fill_rectangle(screen,
794                                s->xleft + x, ys, 1, y,
795                                fgcolor);
796                 i += channels;
797                 if (i >= SAMPLE_ARRAY_SIZE)
798                     i -= SAMPLE_ARRAY_SIZE;
799             }
800         }
801
802         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
803
804         for(ch = 1;ch < nb_display_channels; ch++) {
805             y = s->ytop + ch * h;
806             fill_rectangle(screen,
807                            s->xleft, y, s->width, 1,
808                            fgcolor);
809         }
810         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
811     }else{
812         nb_display_channels= FFMIN(nb_display_channels, 2);
813         if(rdft_bits != s->rdft_bits){
814             av_rdft_end(s->rdft);
815             av_free(s->rdft_data);
816             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
817             s->rdft_bits= rdft_bits;
818             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
819         }
820         {
821             FFTSample *data[2];
822             for(ch = 0;ch < nb_display_channels; ch++) {
823                 data[ch] = s->rdft_data + 2*nb_freq*ch;
824                 i = i_start + ch;
825                 for(x = 0; x < 2*nb_freq; x++) {
826                     double w= (x-nb_freq)*(1.0/nb_freq);
827                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
828                     i += channels;
829                     if (i >= SAMPLE_ARRAY_SIZE)
830                         i -= SAMPLE_ARRAY_SIZE;
831                 }
832                 av_rdft_calc(s->rdft, data[ch]);
833             }
834             //least efficient way to do this, we should of course directly access it but its more than fast enough
835             for(y=0; y<s->height; y++){
836                 double w= 1/sqrt(nb_freq);
837                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
838                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
839                        + data[1][2*y+1]*data[1][2*y+1])) : a;
840                 a= FFMIN(a,255);
841                 b= FFMIN(b,255);
842                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
843
844                 fill_rectangle(screen,
845                             s->xpos, s->height-y, 1, 1,
846                             fgcolor);
847             }
848         }
849         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
850         s->xpos++;
851         if(s->xpos >= s->width)
852             s->xpos= s->xleft;
853     }
854 }
855
856 static void stream_close(VideoState *is)
857 {
858     VideoPicture *vp;
859     int i;
860     /* XXX: use a special url_shutdown call to abort parse cleanly */
861     is->abort_request = 1;
862     SDL_WaitThread(is->read_tid, NULL);
863     SDL_WaitThread(is->refresh_tid, NULL);
864
865     /* free all pictures */
866     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
867         vp = &is->pictq[i];
868 #if CONFIG_AVFILTER
869         if (vp->picref) {
870             avfilter_unref_buffer(vp->picref);
871             vp->picref = NULL;
872         }
873 #endif
874         if (vp->bmp) {
875             SDL_FreeYUVOverlay(vp->bmp);
876             vp->bmp = NULL;
877         }
878     }
879     SDL_DestroyMutex(is->pictq_mutex);
880     SDL_DestroyCond(is->pictq_cond);
881     SDL_DestroyMutex(is->subpq_mutex);
882     SDL_DestroyCond(is->subpq_cond);
883 #if !CONFIG_AVFILTER
884     if (is->img_convert_ctx)
885         sws_freeContext(is->img_convert_ctx);
886 #endif
887     av_free(is);
888 }
889
890 static void do_exit(void)
891 {
892     if (cur_stream) {
893         stream_close(cur_stream);
894         cur_stream = NULL;
895     }
896     uninit_opts();
897 #if CONFIG_AVFILTER
898     avfilter_uninit();
899 #endif
900     if (show_status)
901         printf("\n");
902     SDL_Quit();
903     av_log(NULL, AV_LOG_QUIET, "%s", "");
904     exit(0);
905 }
906
907 static int video_open(VideoState *is){
908     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
909     int w,h;
910
911     if(is_full_screen) flags |= SDL_FULLSCREEN;
912     else               flags |= SDL_RESIZABLE;
913
914     if (is_full_screen && fs_screen_width) {
915         w = fs_screen_width;
916         h = fs_screen_height;
917     } else if(!is_full_screen && screen_width){
918         w = screen_width;
919         h = screen_height;
920 #if CONFIG_AVFILTER
921     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
922         w = is->out_video_filter->inputs[0]->w;
923         h = is->out_video_filter->inputs[0]->h;
924 #else
925     }else if (is->video_st && is->video_st->codec->width){
926         w = is->video_st->codec->width;
927         h = is->video_st->codec->height;
928 #endif
929     } else {
930         w = 640;
931         h = 480;
932     }
933     if(screen && is->width == screen->w && screen->w == w
934        && is->height== screen->h && screen->h == h)
935         return 0;
936
937 #ifndef __APPLE__
938     screen = SDL_SetVideoMode(w, h, 0, flags);
939 #else
940     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
941     screen = SDL_SetVideoMode(w, h, 24, flags);
942 #endif
943     if (!screen) {
944         fprintf(stderr, "SDL: could not set video mode - exiting\n");
945         do_exit();
946     }
947     if (!window_title)
948         window_title = input_filename;
949     SDL_WM_SetCaption(window_title, window_title);
950
951     is->width = screen->w;
952     is->height = screen->h;
953
954     return 0;
955 }
956
957 /* display the current picture, if any */
958 static void video_display(VideoState *is)
959 {
960     if(!screen)
961         video_open(cur_stream);
962     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
963         video_audio_display(is);
964     else if (is->video_st)
965         video_image_display(is);
966 }
967
968 static int refresh_thread(void *opaque)
969 {
970     VideoState *is= opaque;
971     while(!is->abort_request){
972         SDL_Event event;
973         event.type = FF_REFRESH_EVENT;
974         event.user.data1 = opaque;
975         if(!is->refresh){
976             is->refresh=1;
977             SDL_PushEvent(&event);
978         }
979         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
980         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
981     }
982     return 0;
983 }
984
985 /* get the current audio clock value */
986 static double get_audio_clock(VideoState *is)
987 {
988     if (is->paused) {
989         return is->audio_current_pts;
990     } else {
991         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
992     }
993 }
994
995 /* get the current video clock value */
996 static double get_video_clock(VideoState *is)
997 {
998     if (is->paused) {
999         return is->video_current_pts;
1000     } else {
1001         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1002     }
1003 }
1004
1005 /* get the current external clock value */
1006 static double get_external_clock(VideoState *is)
1007 {
1008     int64_t ti;
1009     ti = av_gettime();
1010     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1011 }
1012
1013 /* get the current master clock value */
1014 static double get_master_clock(VideoState *is)
1015 {
1016     double val;
1017
1018     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1019         if (is->video_st)
1020             val = get_video_clock(is);
1021         else
1022             val = get_audio_clock(is);
1023     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1024         if (is->audio_st)
1025             val = get_audio_clock(is);
1026         else
1027             val = get_video_clock(is);
1028     } else {
1029         val = get_external_clock(is);
1030     }
1031     return val;
1032 }
1033
1034 /* seek in the stream */
1035 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1036 {
1037     if (!is->seek_req) {
1038         is->seek_pos = pos;
1039         is->seek_rel = rel;
1040         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1041         if (seek_by_bytes)
1042             is->seek_flags |= AVSEEK_FLAG_BYTE;
1043         is->seek_req = 1;
1044     }
1045 }
1046
1047 /* pause or resume the video */
1048 static void stream_toggle_pause(VideoState *is)
1049 {
1050     if (is->paused) {
1051         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1052         if(is->read_pause_return != AVERROR(ENOSYS)){
1053             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1054         }
1055         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1056     }
1057     is->paused = !is->paused;
1058 }
1059
1060 static double compute_target_time(double frame_current_pts, VideoState *is)
1061 {
1062     double delay, sync_threshold, diff;
1063
1064     /* compute nominal delay */
1065     delay = frame_current_pts - is->frame_last_pts;
1066     if (delay <= 0 || delay >= 10.0) {
1067         /* if incorrect delay, use previous one */
1068         delay = is->frame_last_delay;
1069     } else {
1070         is->frame_last_delay = delay;
1071     }
1072     is->frame_last_pts = frame_current_pts;
1073
1074     /* update delay to follow master synchronisation source */
1075     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1076          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1077         /* if video is slave, we try to correct big delays by
1078            duplicating or deleting a frame */
1079         diff = get_video_clock(is) - get_master_clock(is);
1080
1081         /* skip or repeat frame. We take into account the
1082            delay to compute the threshold. I still don't know
1083            if it is the best guess */
1084         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1085         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1086             if (diff <= -sync_threshold)
1087                 delay = 0;
1088             else if (diff >= sync_threshold)
1089                 delay = 2 * delay;
1090         }
1091     }
1092     is->frame_timer += delay;
1093
1094     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1095             delay, frame_current_pts, -diff);
1096
1097     return is->frame_timer;
1098 }
1099
1100 /* called to display each frame */
1101 static void video_refresh(void *opaque)
1102 {
1103     VideoState *is = opaque;
1104     VideoPicture *vp;
1105
1106     SubPicture *sp, *sp2;
1107
1108     if (is->video_st) {
1109 retry:
1110         if (is->pictq_size == 0) {
1111             //nothing to do, no picture to display in the que
1112         } else {
1113             double time= av_gettime()/1000000.0;
1114             double next_target;
1115             /* dequeue the picture */
1116             vp = &is->pictq[is->pictq_rindex];
1117
1118             if(time < vp->target_clock)
1119                 return;
1120             /* update current video pts */
1121             is->video_current_pts = vp->pts;
1122             is->video_current_pts_drift = is->video_current_pts - time;
1123             is->video_current_pos = vp->pos;
1124             if(is->pictq_size > 1){
1125                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1126                 assert(nextvp->target_clock >= vp->target_clock);
1127                 next_target= nextvp->target_clock;
1128             }else{
1129                 next_target= vp->target_clock + vp->duration;
1130             }
1131             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1132                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1133                 if(is->pictq_size > 1 || time > next_target + 0.5){
1134                     /* update queue size and signal for next picture */
1135                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1136                         is->pictq_rindex = 0;
1137
1138                     SDL_LockMutex(is->pictq_mutex);
1139                     is->pictq_size--;
1140                     SDL_CondSignal(is->pictq_cond);
1141                     SDL_UnlockMutex(is->pictq_mutex);
1142                     goto retry;
1143                 }
1144             }
1145
1146             if(is->subtitle_st) {
1147                 if (is->subtitle_stream_changed) {
1148                     SDL_LockMutex(is->subpq_mutex);
1149
1150                     while (is->subpq_size) {
1151                         free_subpicture(&is->subpq[is->subpq_rindex]);
1152
1153                         /* update queue size and signal for next picture */
1154                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                             is->subpq_rindex = 0;
1156
1157                         is->subpq_size--;
1158                     }
1159                     is->subtitle_stream_changed = 0;
1160
1161                     SDL_CondSignal(is->subpq_cond);
1162                     SDL_UnlockMutex(is->subpq_mutex);
1163                 } else {
1164                     if (is->subpq_size > 0) {
1165                         sp = &is->subpq[is->subpq_rindex];
1166
1167                         if (is->subpq_size > 1)
1168                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1169                         else
1170                             sp2 = NULL;
1171
1172                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1173                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1174                         {
1175                             free_subpicture(sp);
1176
1177                             /* update queue size and signal for next picture */
1178                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1179                                 is->subpq_rindex = 0;
1180
1181                             SDL_LockMutex(is->subpq_mutex);
1182                             is->subpq_size--;
1183                             SDL_CondSignal(is->subpq_cond);
1184                             SDL_UnlockMutex(is->subpq_mutex);
1185                         }
1186                     }
1187                 }
1188             }
1189
1190             /* display picture */
1191             if (!display_disable)
1192                 video_display(is);
1193
1194             /* update queue size and signal for next picture */
1195             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1196                 is->pictq_rindex = 0;
1197
1198             SDL_LockMutex(is->pictq_mutex);
1199             is->pictq_size--;
1200             SDL_CondSignal(is->pictq_cond);
1201             SDL_UnlockMutex(is->pictq_mutex);
1202         }
1203     } else if (is->audio_st) {
1204         /* draw the next audio frame */
1205
1206         /* if only audio stream, then display the audio bars (better
1207            than nothing, just to test the implementation */
1208
1209         /* display picture */
1210         if (!display_disable)
1211             video_display(is);
1212     }
1213     if (show_status) {
1214         static int64_t last_time;
1215         int64_t cur_time;
1216         int aqsize, vqsize, sqsize;
1217         double av_diff;
1218
1219         cur_time = av_gettime();
1220         if (!last_time || (cur_time - last_time) >= 30000) {
1221             aqsize = 0;
1222             vqsize = 0;
1223             sqsize = 0;
1224             if (is->audio_st)
1225                 aqsize = is->audioq.size;
1226             if (is->video_st)
1227                 vqsize = is->videoq.size;
1228             if (is->subtitle_st)
1229                 sqsize = is->subtitleq.size;
1230             av_diff = 0;
1231             if (is->audio_st && is->video_st)
1232                 av_diff = get_audio_clock(is) - get_video_clock(is);
1233             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1234                    get_master_clock(is),
1235                    av_diff,
1236                    FFMAX(is->skip_frames-1, 0),
1237                    aqsize / 1024,
1238                    vqsize / 1024,
1239                    sqsize,
1240                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1241                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1242             fflush(stdout);
1243             last_time = cur_time;
1244         }
1245     }
1246 }
1247
1248 /* allocate a picture (needs to do that in main thread to avoid
1249    potential locking problems */
1250 static void alloc_picture(void *opaque)
1251 {
1252     VideoState *is = opaque;
1253     VideoPicture *vp;
1254
1255     vp = &is->pictq[is->pictq_windex];
1256
1257     if (vp->bmp)
1258         SDL_FreeYUVOverlay(vp->bmp);
1259
1260 #if CONFIG_AVFILTER
1261     if (vp->picref)
1262         avfilter_unref_buffer(vp->picref);
1263     vp->picref = NULL;
1264
1265     vp->width   = is->out_video_filter->inputs[0]->w;
1266     vp->height  = is->out_video_filter->inputs[0]->h;
1267     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1268 #else
1269     vp->width   = is->video_st->codec->width;
1270     vp->height  = is->video_st->codec->height;
1271     vp->pix_fmt = is->video_st->codec->pix_fmt;
1272 #endif
1273
1274     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1275                                    SDL_YV12_OVERLAY,
1276                                    screen);
1277     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1278         /* SDL allocates a buffer smaller than requested if the video
1279          * overlay hardware is unable to support the requested size. */
1280         fprintf(stderr, "Error: the video system does not support an image\n"
1281                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1282                         "to reduce the image size.\n", vp->width, vp->height );
1283         do_exit();
1284     }
1285
1286     SDL_LockMutex(is->pictq_mutex);
1287     vp->allocated = 1;
1288     SDL_CondSignal(is->pictq_cond);
1289     SDL_UnlockMutex(is->pictq_mutex);
1290 }
1291
1292 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1293 {
1294     VideoPicture *vp;
1295     double frame_delay, pts = pts1;
1296
1297     /* compute the exact PTS for the picture if it is omitted in the stream
1298      * pts1 is the dts of the pkt / pts of the frame */
1299     if (pts != 0) {
1300         /* update video clock with pts, if present */
1301         is->video_clock = pts;
1302     } else {
1303         pts = is->video_clock;
1304     }
1305     /* update video clock for next frame */
1306     frame_delay = av_q2d(is->video_st->codec->time_base);
1307     /* for MPEG2, the frame can be repeated, so we update the
1308        clock accordingly */
1309     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1310     is->video_clock += frame_delay;
1311
1312 #if defined(DEBUG_SYNC) && 0
1313     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1315 #endif
1316
1317     /* wait until we have space to put a new picture */
1318     SDL_LockMutex(is->pictq_mutex);
1319
1320     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1321         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1322
1323     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1324            !is->videoq.abort_request) {
1325         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1326     }
1327     SDL_UnlockMutex(is->pictq_mutex);
1328
1329     if (is->videoq.abort_request)
1330         return -1;
1331
1332     vp = &is->pictq[is->pictq_windex];
1333
1334     vp->duration = frame_delay;
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated = 0;
1348
1349         /* the allocation must be done in the main thread to avoid
1350            locking problems */
1351         event.type = FF_ALLOC_EVENT;
1352         event.user.data1 = is;
1353         SDL_PushEvent(&event);
1354
1355         /* wait until the picture is allocated */
1356         SDL_LockMutex(is->pictq_mutex);
1357         while (!vp->allocated && !is->videoq.abort_request) {
1358             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359         }
1360         SDL_UnlockMutex(is->pictq_mutex);
1361
1362         if (is->videoq.abort_request)
1363             return -1;
1364     }
1365
1366     /* if the frame is not skipped, then display it */
1367     if (vp->bmp) {
1368         AVPicture pict;
1369 #if CONFIG_AVFILTER
1370         if(vp->picref)
1371             avfilter_unref_buffer(vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         memset(&pict,0,sizeof(AVPicture));
1379         pict.data[0] = vp->bmp->pixels[0];
1380         pict.data[1] = vp->bmp->pixels[2];
1381         pict.data[2] = vp->bmp->pixels[1];
1382
1383         pict.linesize[0] = vp->bmp->pitches[0];
1384         pict.linesize[1] = vp->bmp->pitches[2];
1385         pict.linesize[2] = vp->bmp->pitches[1];
1386
1387 #if CONFIG_AVFILTER
1388         //FIXME use direct rendering
1389         av_picture_copy(&pict, (AVPicture *)src_frame,
1390                         vp->pix_fmt, vp->width, vp->height);
1391 #else
1392         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1396         if (is->img_convert_ctx == NULL) {
1397             fprintf(stderr, "Cannot initialize the conversion context\n");
1398             exit(1);
1399         }
1400         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401                   0, vp->height, pict.data, pict.linesize);
1402 #endif
1403         /* update the bitmap content */
1404         SDL_UnlockYUVOverlay(vp->bmp);
1405
1406         vp->pts = pts;
1407         vp->pos = pos;
1408
1409         /* now we can update the picture count */
1410         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411             is->pictq_windex = 0;
1412         SDL_LockMutex(is->pictq_mutex);
1413         vp->target_clock= compute_target_time(vp->pts, is);
1414
1415         is->pictq_size++;
1416         SDL_UnlockMutex(is->pictq_mutex);
1417     }
1418     return 0;
1419 }
1420
1421 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1422 {
1423     int got_picture, i;
1424
1425     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1426         return -1;
1427
1428     if (pkt->data == flush_pkt.data) {
1429         avcodec_flush_buffers(is->video_st->codec);
1430
1431         SDL_LockMutex(is->pictq_mutex);
1432         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1433         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1434             is->pictq[i].target_clock= 0;
1435         }
1436         while (is->pictq_size && !is->videoq.abort_request) {
1437             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1438         }
1439         is->video_current_pos = -1;
1440         SDL_UnlockMutex(is->pictq_mutex);
1441
1442         is->frame_last_pts = AV_NOPTS_VALUE;
1443         is->frame_last_delay = 0;
1444         is->frame_timer = (double)av_gettime() / 1000000.0;
1445         is->skip_frames = 1;
1446         is->skip_frames_index = 0;
1447         return 0;
1448     }
1449
1450     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1451
1452     if (got_picture) {
1453         if (decoder_reorder_pts == -1) {
1454             *pts = frame->best_effort_timestamp;
1455         } else if (decoder_reorder_pts) {
1456             *pts = frame->pkt_pts;
1457         } else {
1458             *pts = frame->pkt_dts;
1459         }
1460
1461         if (*pts == AV_NOPTS_VALUE) {
1462             *pts = 0;
1463         }
1464
1465         is->skip_frames_index += 1;
1466         if(is->skip_frames_index >= is->skip_frames){
1467             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1468             return 1;
1469         }
1470
1471     }
1472     return 0;
1473 }
1474
1475 #if CONFIG_AVFILTER
1476 typedef struct {
1477     VideoState *is;
1478     AVFrame *frame;
1479     int use_dr1;
1480 } FilterPriv;
1481
1482 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1483 {
1484     AVFilterContext *ctx = codec->opaque;
1485     AVFilterBufferRef  *ref;
1486     int perms = AV_PERM_WRITE;
1487     int i, w, h, stride[4];
1488     unsigned edge;
1489     int pixel_size;
1490
1491     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1492
1493     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1494         perms |= AV_PERM_NEG_LINESIZES;
1495
1496     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1497         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1498         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1499         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1500     }
1501     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1502
1503     w = codec->width;
1504     h = codec->height;
1505
1506     if(av_image_check_size(w, h, 0, codec))
1507         return -1;
1508
1509     avcodec_align_dimensions2(codec, &w, &h, stride);
1510     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1511     w += edge << 1;
1512     h += edge << 1;
1513
1514     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1515         return -1;
1516
1517     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1518     ref->video->w = codec->width;
1519     ref->video->h = codec->height;
1520     for(i = 0; i < 4; i ++) {
1521         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1522         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1523
1524         if (ref->data[i]) {
1525             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1526         }
1527         pic->data[i]     = ref->data[i];
1528         pic->linesize[i] = ref->linesize[i];
1529     }
1530     pic->opaque = ref;
1531     pic->age    = INT_MAX;
1532     pic->type   = FF_BUFFER_TYPE_USER;
1533     pic->reordered_opaque = codec->reordered_opaque;
1534     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1535     else           pic->pkt_pts = AV_NOPTS_VALUE;
1536     return 0;
1537 }
1538
1539 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1540 {
1541     memset(pic->data, 0, sizeof(pic->data));
1542     avfilter_unref_buffer(pic->opaque);
1543 }
1544
1545 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1546 {
1547     AVFilterBufferRef *ref = pic->opaque;
1548
1549     if (pic->data[0] == NULL) {
1550         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1551         return codec->get_buffer(codec, pic);
1552     }
1553
1554     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1555         (codec->pix_fmt != ref->format)) {
1556         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1557         return -1;
1558     }
1559
1560     pic->reordered_opaque = codec->reordered_opaque;
1561     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1562     else           pic->pkt_pts = AV_NOPTS_VALUE;
1563     return 0;
1564 }
1565
1566 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1567 {
1568     FilterPriv *priv = ctx->priv;
1569     AVCodecContext *codec;
1570     if(!opaque) return -1;
1571
1572     priv->is = opaque;
1573     codec    = priv->is->video_st->codec;
1574     codec->opaque = ctx;
1575     if((codec->codec->capabilities & CODEC_CAP_DR1)
1576     ) {
1577         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1578         priv->use_dr1 = 1;
1579         codec->get_buffer     = input_get_buffer;
1580         codec->release_buffer = input_release_buffer;
1581         codec->reget_buffer   = input_reget_buffer;
1582         codec->thread_safe_callbacks = 1;
1583     }
1584
1585     priv->frame = avcodec_alloc_frame();
1586
1587     return 0;
1588 }
1589
1590 static void input_uninit(AVFilterContext *ctx)
1591 {
1592     FilterPriv *priv = ctx->priv;
1593     av_free(priv->frame);
1594 }
1595
1596 static int input_request_frame(AVFilterLink *link)
1597 {
1598     FilterPriv *priv = link->src->priv;
1599     AVFilterBufferRef *picref;
1600     int64_t pts = 0;
1601     AVPacket pkt;
1602     int ret;
1603
1604     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1605         av_free_packet(&pkt);
1606     if (ret < 0)
1607         return -1;
1608
1609     if(priv->use_dr1 && priv->frame->opaque) {
1610         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1611     } else {
1612         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1613         av_image_copy(picref->data, picref->linesize,
1614                       priv->frame->data, priv->frame->linesize,
1615                       picref->format, link->w, link->h);
1616     }
1617     av_free_packet(&pkt);
1618
1619     avfilter_copy_frame_props(picref, priv->frame);
1620     picref->pts = pts;
1621
1622     avfilter_start_frame(link, picref);
1623     avfilter_draw_slice(link, 0, link->h, 1);
1624     avfilter_end_frame(link);
1625
1626     return 0;
1627 }
1628
1629 static int input_query_formats(AVFilterContext *ctx)
1630 {
1631     FilterPriv *priv = ctx->priv;
1632     enum PixelFormat pix_fmts[] = {
1633         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1634     };
1635
1636     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1637     return 0;
1638 }
1639
1640 static int input_config_props(AVFilterLink *link)
1641 {
1642     FilterPriv *priv  = link->src->priv;
1643     AVStream *s = priv->is->video_st;
1644
1645     link->w = s->codec->width;
1646     link->h = s->codec->height;
1647     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1648         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
1649     link->time_base = s->time_base;
1650
1651     return 0;
1652 }
1653
1654 static AVFilter input_filter =
1655 {
1656     .name      = "ffplay_input",
1657
1658     .priv_size = sizeof(FilterPriv),
1659
1660     .init      = input_init,
1661     .uninit    = input_uninit,
1662
1663     .query_formats = input_query_formats,
1664
1665     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1666     .outputs   = (AVFilterPad[]) {{ .name = "default",
1667                                     .type = AVMEDIA_TYPE_VIDEO,
1668                                     .request_frame = input_request_frame,
1669                                     .config_props  = input_config_props, },
1670                                   { .name = NULL }},
1671 };
1672
1673 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1674 {
1675     char sws_flags_str[128];
1676     int ret;
1677     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1678     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1679     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1680     graph->scale_sws_opts = av_strdup(sws_flags_str);
1681
1682     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1683                                             NULL, is, graph)) < 0)
1684         return ret;
1685     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1686                                             NULL, pix_fmts, graph)) < 0)
1687         return ret;
1688
1689     if(vfilters) {
1690         AVFilterInOut *outputs = avfilter_inout_alloc();
1691         AVFilterInOut *inputs  = avfilter_inout_alloc();
1692
1693         outputs->name    = av_strdup("in");
1694         outputs->filter_ctx = filt_src;
1695         outputs->pad_idx = 0;
1696         outputs->next    = NULL;
1697
1698         inputs->name    = av_strdup("out");
1699         inputs->filter_ctx = filt_out;
1700         inputs->pad_idx = 0;
1701         inputs->next    = NULL;
1702
1703         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1704             return ret;
1705     } else {
1706         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1707             return ret;
1708     }
1709
1710     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1711         return ret;
1712
1713     is->out_video_filter = filt_out;
1714
1715     return ret;
1716 }
1717
1718 #endif  /* CONFIG_AVFILTER */
1719
1720 static int video_thread(void *arg)
1721 {
1722     VideoState *is = arg;
1723     AVFrame *frame= avcodec_alloc_frame();
1724     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1725     double pts;
1726     int ret;
1727
1728 #if CONFIG_AVFILTER
1729     AVFilterGraph *graph = avfilter_graph_alloc();
1730     AVFilterContext *filt_out = NULL;
1731     int last_w = is->video_st->codec->width;
1732     int last_h = is->video_st->codec->height;
1733
1734     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1735         goto the_end;
1736     filt_out = is->out_video_filter;
1737 #endif
1738
1739     for(;;) {
1740 #if !CONFIG_AVFILTER
1741         AVPacket pkt;
1742 #else
1743         AVFilterBufferRef *picref;
1744         AVRational tb = filt_out->inputs[0]->time_base;
1745 #endif
1746         while (is->paused && !is->videoq.abort_request)
1747             SDL_Delay(10);
1748 #if CONFIG_AVFILTER
1749         if (   last_w != is->video_st->codec->width
1750             || last_h != is->video_st->codec->height) {
1751             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
1752                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
1753             avfilter_graph_free(&graph);
1754             graph = avfilter_graph_alloc();
1755             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1756                 goto the_end;
1757             filt_out = is->out_video_filter;
1758             last_w = is->video_st->codec->width;
1759             last_h = is->video_st->codec->height;
1760         }
1761         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1762         if (picref) {
1763             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1764             pts_int = picref->pts;
1765             pos     = picref->pos;
1766             frame->opaque = picref;
1767         }
1768
1769         if (av_cmp_q(tb, is->video_st->time_base)) {
1770             av_unused int64_t pts1 = pts_int;
1771             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1772             av_dlog(NULL, "video_thread(): "
1773                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1774                     tb.num, tb.den, pts1,
1775                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1776         }
1777 #else
1778         ret = get_video_frame(is, frame, &pts_int, &pkt);
1779         pos = pkt.pos;
1780         av_free_packet(&pkt);
1781 #endif
1782
1783         if (ret < 0) goto the_end;
1784
1785 #if CONFIG_AVFILTER
1786         if (!picref)
1787             continue;
1788 #endif
1789
1790         pts = pts_int*av_q2d(is->video_st->time_base);
1791
1792         ret = queue_picture(is, frame, pts, pos);
1793
1794         if (ret < 0)
1795             goto the_end;
1796
1797         if (step)
1798             if (cur_stream)
1799                 stream_toggle_pause(cur_stream);
1800     }
1801  the_end:
1802 #if CONFIG_AVFILTER
1803     avfilter_graph_free(&graph);
1804 #endif
1805     av_free(frame);
1806     return 0;
1807 }
1808
1809 static int subtitle_thread(void *arg)
1810 {
1811     VideoState *is = arg;
1812     SubPicture *sp;
1813     AVPacket pkt1, *pkt = &pkt1;
1814     int got_subtitle;
1815     double pts;
1816     int i, j;
1817     int r, g, b, y, u, v, a;
1818
1819     for(;;) {
1820         while (is->paused && !is->subtitleq.abort_request) {
1821             SDL_Delay(10);
1822         }
1823         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1824             break;
1825
1826         if(pkt->data == flush_pkt.data){
1827             avcodec_flush_buffers(is->subtitle_st->codec);
1828             continue;
1829         }
1830         SDL_LockMutex(is->subpq_mutex);
1831         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1832                !is->subtitleq.abort_request) {
1833             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1834         }
1835         SDL_UnlockMutex(is->subpq_mutex);
1836
1837         if (is->subtitleq.abort_request)
1838             return 0;
1839
1840         sp = &is->subpq[is->subpq_windex];
1841
1842        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1843            this packet, if any */
1844         pts = 0;
1845         if (pkt->pts != AV_NOPTS_VALUE)
1846             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1847
1848         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1849                                  &got_subtitle, pkt);
1850
1851         if (got_subtitle && sp->sub.format == 0) {
1852             sp->pts = pts;
1853
1854             for (i = 0; i < sp->sub.num_rects; i++)
1855             {
1856                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1857                 {
1858                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1859                     y = RGB_TO_Y_CCIR(r, g, b);
1860                     u = RGB_TO_U_CCIR(r, g, b, 0);
1861                     v = RGB_TO_V_CCIR(r, g, b, 0);
1862                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1863                 }
1864             }
1865
1866             /* now we can update the picture count */
1867             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1868                 is->subpq_windex = 0;
1869             SDL_LockMutex(is->subpq_mutex);
1870             is->subpq_size++;
1871             SDL_UnlockMutex(is->subpq_mutex);
1872         }
1873         av_free_packet(pkt);
1874     }
1875     return 0;
1876 }
1877
1878 /* copy samples for viewing in editor window */
1879 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1880 {
1881     int size, len;
1882
1883     size = samples_size / sizeof(short);
1884     while (size > 0) {
1885         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1886         if (len > size)
1887             len = size;
1888         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1889         samples += len;
1890         is->sample_array_index += len;
1891         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1892             is->sample_array_index = 0;
1893         size -= len;
1894     }
1895 }
1896
1897 /* return the new audio buffer size (samples can be added or deleted
1898    to get better sync if video or external master clock) */
1899 static int synchronize_audio(VideoState *is, short *samples,
1900                              int samples_size1, double pts)
1901 {
1902     int n, samples_size;
1903     double ref_clock;
1904
1905     n = 2 * is->audio_st->codec->channels;
1906     samples_size = samples_size1;
1907
1908     /* if not master, then we try to remove or add samples to correct the clock */
1909     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1910          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1911         double diff, avg_diff;
1912         int wanted_size, min_size, max_size, nb_samples;
1913
1914         ref_clock = get_master_clock(is);
1915         diff = get_audio_clock(is) - ref_clock;
1916
1917         if (diff < AV_NOSYNC_THRESHOLD) {
1918             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1919             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1920                 /* not enough measures to have a correct estimate */
1921                 is->audio_diff_avg_count++;
1922             } else {
1923                 /* estimate the A-V difference */
1924                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1925
1926                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1927                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1928                     nb_samples = samples_size / n;
1929
1930                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1931                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1932                     if (wanted_size < min_size)
1933                         wanted_size = min_size;
1934                     else if (wanted_size > max_size)
1935                         wanted_size = max_size;
1936
1937                     /* add or remove samples to correction the synchro */
1938                     if (wanted_size < samples_size) {
1939                         /* remove samples */
1940                         samples_size = wanted_size;
1941                     } else if (wanted_size > samples_size) {
1942                         uint8_t *samples_end, *q;
1943                         int nb;
1944
1945                         /* add samples */
1946                         nb = (samples_size - wanted_size);
1947                         samples_end = (uint8_t *)samples + samples_size - n;
1948                         q = samples_end + n;
1949                         while (nb > 0) {
1950                             memcpy(q, samples_end, n);
1951                             q += n;
1952                             nb -= n;
1953                         }
1954                         samples_size = wanted_size;
1955                     }
1956                 }
1957 #if 0
1958                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1959                        diff, avg_diff, samples_size - samples_size1,
1960                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1961 #endif
1962             }
1963         } else {
1964             /* too big difference : may be initial PTS errors, so
1965                reset A-V filter */
1966             is->audio_diff_avg_count = 0;
1967             is->audio_diff_cum = 0;
1968         }
1969     }
1970
1971     return samples_size;
1972 }
1973
1974 /* decode one audio frame and returns its uncompressed size */
1975 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1976 {
1977     AVPacket *pkt_temp = &is->audio_pkt_temp;
1978     AVPacket *pkt = &is->audio_pkt;
1979     AVCodecContext *dec= is->audio_st->codec;
1980     int n, len1, data_size;
1981     double pts;
1982
1983     for(;;) {
1984         /* NOTE: the audio packet can contain several frames */
1985         while (pkt_temp->size > 0) {
1986             data_size = sizeof(is->audio_buf1);
1987             len1 = avcodec_decode_audio3(dec,
1988                                         (int16_t *)is->audio_buf1, &data_size,
1989                                         pkt_temp);
1990             if (len1 < 0) {
1991                 /* if error, we skip the frame */
1992                 pkt_temp->size = 0;
1993                 break;
1994             }
1995
1996             pkt_temp->data += len1;
1997             pkt_temp->size -= len1;
1998             if (data_size <= 0)
1999                 continue;
2000
2001             if (dec->sample_fmt != is->audio_src_fmt) {
2002                 if (is->reformat_ctx)
2003                     av_audio_convert_free(is->reformat_ctx);
2004                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2005                                                          dec->sample_fmt, 1, NULL, 0);
2006                 if (!is->reformat_ctx) {
2007                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2008                         av_get_sample_fmt_name(dec->sample_fmt),
2009                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2010                         break;
2011                 }
2012                 is->audio_src_fmt= dec->sample_fmt;
2013             }
2014
2015             if (is->reformat_ctx) {
2016                 const void *ibuf[6]= {is->audio_buf1};
2017                 void *obuf[6]= {is->audio_buf2};
2018                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2019                 int ostride[6]= {2};
2020                 int len= data_size/istride[0];
2021                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2022                     printf("av_audio_convert() failed\n");
2023                     break;
2024                 }
2025                 is->audio_buf= is->audio_buf2;
2026                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2027                           remove this legacy cruft */
2028                 data_size= len*2;
2029             }else{
2030                 is->audio_buf= is->audio_buf1;
2031             }
2032
2033             /* if no pts, then compute it */
2034             pts = is->audio_clock;
2035             *pts_ptr = pts;
2036             n = 2 * dec->channels;
2037             is->audio_clock += (double)data_size /
2038                 (double)(n * dec->sample_rate);
2039 #ifdef DEBUG
2040             {
2041                 static double last_clock;
2042                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2043                        is->audio_clock - last_clock,
2044                        is->audio_clock, pts);
2045                 last_clock = is->audio_clock;
2046             }
2047 #endif
2048             return data_size;
2049         }
2050
2051         /* free the current packet */
2052         if (pkt->data)
2053             av_free_packet(pkt);
2054
2055         if (is->paused || is->audioq.abort_request) {
2056             return -1;
2057         }
2058
2059         /* read next packet */
2060         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2061             return -1;
2062         if(pkt->data == flush_pkt.data){
2063             avcodec_flush_buffers(dec);
2064             continue;
2065         }
2066
2067         pkt_temp->data = pkt->data;
2068         pkt_temp->size = pkt->size;
2069
2070         /* if update the audio clock with the pts */
2071         if (pkt->pts != AV_NOPTS_VALUE) {
2072             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2073         }
2074     }
2075 }
2076
2077 /* prepare a new audio buffer */
2078 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2079 {
2080     VideoState *is = opaque;
2081     int audio_size, len1;
2082     int bytes_per_sec;
2083     double pts;
2084
2085     audio_callback_time = av_gettime();
2086
2087     while (len > 0) {
2088         if (is->audio_buf_index >= is->audio_buf_size) {
2089            audio_size = audio_decode_frame(is, &pts);
2090            if (audio_size < 0) {
2091                 /* if error, just output silence */
2092                is->audio_buf = is->audio_buf1;
2093                is->audio_buf_size = 1024;
2094                memset(is->audio_buf, 0, is->audio_buf_size);
2095            } else {
2096                if (is->show_mode != SHOW_MODE_VIDEO)
2097                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2098                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2099                                               pts);
2100                is->audio_buf_size = audio_size;
2101            }
2102            is->audio_buf_index = 0;
2103         }
2104         len1 = is->audio_buf_size - is->audio_buf_index;
2105         if (len1 > len)
2106             len1 = len;
2107         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2108         len -= len1;
2109         stream += len1;
2110         is->audio_buf_index += len1;
2111     }
2112     bytes_per_sec = is->audio_st->codec->sample_rate *
2113             2 * is->audio_st->codec->channels;
2114     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2115     /* Let's assume the audio driver that is used by SDL has two periods. */
2116     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2117     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2118 }
2119
2120 /* open a given stream. Return 0 if OK */
2121 static int stream_component_open(VideoState *is, int stream_index)
2122 {
2123     AVFormatContext *ic = is->ic;
2124     AVCodecContext *avctx;
2125     AVCodec *codec;
2126     SDL_AudioSpec wanted_spec, spec;
2127     AVDictionary *opts;
2128     AVDictionaryEntry *t = NULL;
2129
2130     if (stream_index < 0 || stream_index >= ic->nb_streams)
2131         return -1;
2132     avctx = ic->streams[stream_index]->codec;
2133
2134     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2135
2136     /* prepare audio output */
2137     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2138         if (avctx->channels > 0) {
2139             avctx->request_channels = FFMIN(2, avctx->channels);
2140         } else {
2141             avctx->request_channels = 2;
2142         }
2143     }
2144
2145     codec = avcodec_find_decoder(avctx->codec_id);
2146     if (!codec)
2147         return -1;
2148
2149     avctx->workaround_bugs = workaround_bugs;
2150     avctx->lowres = lowres;
2151     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2152     avctx->idct_algo= idct;
2153     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2154     avctx->skip_frame= skip_frame;
2155     avctx->skip_idct= skip_idct;
2156     avctx->skip_loop_filter= skip_loop_filter;
2157     avctx->error_recognition= error_recognition;
2158     avctx->error_concealment= error_concealment;
2159     avctx->thread_count= thread_count;
2160
2161     if(codec->capabilities & CODEC_CAP_DR1)
2162         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2163
2164     if (!codec ||
2165         avcodec_open2(avctx, codec, &opts) < 0)
2166         return -1;
2167     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2168         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2169         return AVERROR_OPTION_NOT_FOUND;
2170     }
2171
2172     /* prepare audio output */
2173     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2174         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2175             fprintf(stderr, "Invalid sample rate or channel count\n");
2176             return -1;
2177         }
2178         wanted_spec.freq = avctx->sample_rate;
2179         wanted_spec.format = AUDIO_S16SYS;
2180         wanted_spec.channels = avctx->channels;
2181         wanted_spec.silence = 0;
2182         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2183         wanted_spec.callback = sdl_audio_callback;
2184         wanted_spec.userdata = is;
2185         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2186             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2187             return -1;
2188         }
2189         is->audio_hw_buf_size = spec.size;
2190         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2191     }
2192
2193     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2194     switch(avctx->codec_type) {
2195     case AVMEDIA_TYPE_AUDIO:
2196         is->audio_stream = stream_index;
2197         is->audio_st = ic->streams[stream_index];
2198         is->audio_buf_size = 0;
2199         is->audio_buf_index = 0;
2200
2201         /* init averaging filter */
2202         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2203         is->audio_diff_avg_count = 0;
2204         /* since we do not have a precise anough audio fifo fullness,
2205            we correct audio sync only if larger than this threshold */
2206         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2207
2208         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2209         packet_queue_init(&is->audioq);
2210         SDL_PauseAudio(0);
2211         break;
2212     case AVMEDIA_TYPE_VIDEO:
2213         is->video_stream = stream_index;
2214         is->video_st = ic->streams[stream_index];
2215
2216         packet_queue_init(&is->videoq);
2217         is->video_tid = SDL_CreateThread(video_thread, is);
2218         break;
2219     case AVMEDIA_TYPE_SUBTITLE:
2220         is->subtitle_stream = stream_index;
2221         is->subtitle_st = ic->streams[stream_index];
2222         packet_queue_init(&is->subtitleq);
2223
2224         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2225         break;
2226     default:
2227         break;
2228     }
2229     return 0;
2230 }
2231
2232 static void stream_component_close(VideoState *is, int stream_index)
2233 {
2234     AVFormatContext *ic = is->ic;
2235     AVCodecContext *avctx;
2236
2237     if (stream_index < 0 || stream_index >= ic->nb_streams)
2238         return;
2239     avctx = ic->streams[stream_index]->codec;
2240
2241     switch(avctx->codec_type) {
2242     case AVMEDIA_TYPE_AUDIO:
2243         packet_queue_abort(&is->audioq);
2244
2245         SDL_CloseAudio();
2246
2247         packet_queue_end(&is->audioq);
2248         if (is->reformat_ctx)
2249             av_audio_convert_free(is->reformat_ctx);
2250         is->reformat_ctx = NULL;
2251         break;
2252     case AVMEDIA_TYPE_VIDEO:
2253         packet_queue_abort(&is->videoq);
2254
2255         /* note: we also signal this mutex to make sure we deblock the
2256            video thread in all cases */
2257         SDL_LockMutex(is->pictq_mutex);
2258         SDL_CondSignal(is->pictq_cond);
2259         SDL_UnlockMutex(is->pictq_mutex);
2260
2261         SDL_WaitThread(is->video_tid, NULL);
2262
2263         packet_queue_end(&is->videoq);
2264         break;
2265     case AVMEDIA_TYPE_SUBTITLE:
2266         packet_queue_abort(&is->subtitleq);
2267
2268         /* note: we also signal this mutex to make sure we deblock the
2269            video thread in all cases */
2270         SDL_LockMutex(is->subpq_mutex);
2271         is->subtitle_stream_changed = 1;
2272
2273         SDL_CondSignal(is->subpq_cond);
2274         SDL_UnlockMutex(is->subpq_mutex);
2275
2276         SDL_WaitThread(is->subtitle_tid, NULL);
2277
2278         packet_queue_end(&is->subtitleq);
2279         break;
2280     default:
2281         break;
2282     }
2283
2284     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2285     avcodec_close(avctx);
2286     switch(avctx->codec_type) {
2287     case AVMEDIA_TYPE_AUDIO:
2288         is->audio_st = NULL;
2289         is->audio_stream = -1;
2290         break;
2291     case AVMEDIA_TYPE_VIDEO:
2292         is->video_st = NULL;
2293         is->video_stream = -1;
2294         break;
2295     case AVMEDIA_TYPE_SUBTITLE:
2296         is->subtitle_st = NULL;
2297         is->subtitle_stream = -1;
2298         break;
2299     default:
2300         break;
2301     }
2302 }
2303
2304 /* since we have only one decoding thread, we can use a global
2305    variable instead of a thread local variable */
2306 static VideoState *global_video_state;
2307
2308 static int decode_interrupt_cb(void)
2309 {
2310     return (global_video_state && global_video_state->abort_request);
2311 }
2312
2313 /* this thread gets the stream from the disk or the network */
2314 static int read_thread(void *arg)
2315 {
2316     VideoState *is = arg;
2317     AVFormatContext *ic = NULL;
2318     int err, i, ret;
2319     int st_index[AVMEDIA_TYPE_NB];
2320     AVPacket pkt1, *pkt = &pkt1;
2321     int eof=0;
2322     int pkt_in_play_range = 0;
2323     AVDictionaryEntry *t;
2324     AVDictionary **opts;
2325     int orig_nb_streams;
2326
2327     memset(st_index, -1, sizeof(st_index));
2328     is->video_stream = -1;
2329     is->audio_stream = -1;
2330     is->subtitle_stream = -1;
2331
2332     global_video_state = is;
2333     avio_set_interrupt_cb(decode_interrupt_cb);
2334
2335     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2336     if (err < 0) {
2337         print_error(is->filename, err);
2338         ret = -1;
2339         goto fail;
2340     }
2341     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2342         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2343         ret = AVERROR_OPTION_NOT_FOUND;
2344         goto fail;
2345     }
2346     is->ic = ic;
2347
2348     if(genpts)
2349         ic->flags |= AVFMT_FLAG_GENPTS;
2350
2351     opts = setup_find_stream_info_opts(ic, codec_opts);
2352     orig_nb_streams = ic->nb_streams;
2353
2354     err = avformat_find_stream_info(ic, opts);
2355     if (err < 0) {
2356         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2357         ret = -1;
2358         goto fail;
2359     }
2360     for (i = 0; i < orig_nb_streams; i++)
2361         av_dict_free(&opts[i]);
2362     av_freep(&opts);
2363
2364     if(ic->pb)
2365         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2366
2367     if(seek_by_bytes<0)
2368         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2369
2370     /* if seeking requested, we execute it */
2371     if (start_time != AV_NOPTS_VALUE) {
2372         int64_t timestamp;
2373
2374         timestamp = start_time;
2375         /* add the stream start time */
2376         if (ic->start_time != AV_NOPTS_VALUE)
2377             timestamp += ic->start_time;
2378         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2379         if (ret < 0) {
2380             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2381                     is->filename, (double)timestamp / AV_TIME_BASE);
2382         }
2383     }
2384
2385     for (i = 0; i < ic->nb_streams; i++)
2386         ic->streams[i]->discard = AVDISCARD_ALL;
2387     if (!video_disable)
2388         st_index[AVMEDIA_TYPE_VIDEO] =
2389             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2390                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2391     if (!audio_disable)
2392         st_index[AVMEDIA_TYPE_AUDIO] =
2393             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2394                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2395                                 st_index[AVMEDIA_TYPE_VIDEO],
2396                                 NULL, 0);
2397     if (!video_disable)
2398         st_index[AVMEDIA_TYPE_SUBTITLE] =
2399             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2400                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2401                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2402                                  st_index[AVMEDIA_TYPE_AUDIO] :
2403                                  st_index[AVMEDIA_TYPE_VIDEO]),
2404                                 NULL, 0);
2405     if (show_status) {
2406         av_dump_format(ic, 0, is->filename, 0);
2407     }
2408
2409     is->show_mode = show_mode;
2410
2411     /* open the streams */
2412     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2413         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2414     }
2415
2416     ret=-1;
2417     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2418         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2419     }
2420     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2421     if (is->show_mode == SHOW_MODE_NONE)
2422         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2423
2424     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2425         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2426     }
2427
2428     if (is->video_stream < 0 && is->audio_stream < 0) {
2429         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2430         ret = -1;
2431         goto fail;
2432     }
2433
2434     for(;;) {
2435         if (is->abort_request)
2436             break;
2437         if (is->paused != is->last_paused) {
2438             is->last_paused = is->paused;
2439             if (is->paused)
2440                 is->read_pause_return= av_read_pause(ic);
2441             else
2442                 av_read_play(ic);
2443         }
2444 #if CONFIG_RTSP_DEMUXER
2445         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2446             /* wait 10 ms to avoid trying to get another packet */
2447             /* XXX: horrible */
2448             SDL_Delay(10);
2449             continue;
2450         }
2451 #endif
2452         if (is->seek_req) {
2453             int64_t seek_target= is->seek_pos;
2454             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2455             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2456 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2457 //      of the seek_pos/seek_rel variables
2458
2459             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2460             if (ret < 0) {
2461                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2462             }else{
2463                 if (is->audio_stream >= 0) {
2464                     packet_queue_flush(&is->audioq);
2465                     packet_queue_put(&is->audioq, &flush_pkt);
2466                 }
2467                 if (is->subtitle_stream >= 0) {
2468                     packet_queue_flush(&is->subtitleq);
2469                     packet_queue_put(&is->subtitleq, &flush_pkt);
2470                 }
2471                 if (is->video_stream >= 0) {
2472                     packet_queue_flush(&is->videoq);
2473                     packet_queue_put(&is->videoq, &flush_pkt);
2474                 }
2475             }
2476             is->seek_req = 0;
2477             eof= 0;
2478         }
2479
2480         /* if the queue are full, no need to read more */
2481         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2482             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2483                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2484                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2485             /* wait 10 ms */
2486             SDL_Delay(10);
2487             continue;
2488         }
2489         if(eof) {
2490             if(is->video_stream >= 0){
2491                 av_init_packet(pkt);
2492                 pkt->data=NULL;
2493                 pkt->size=0;
2494                 pkt->stream_index= is->video_stream;
2495                 packet_queue_put(&is->videoq, pkt);
2496             }
2497             SDL_Delay(10);
2498             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2499                 if(loop!=1 && (!loop || --loop)){
2500                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2501                 }else if(autoexit){
2502                     ret=AVERROR_EOF;
2503                     goto fail;
2504                 }
2505             }
2506             eof=0;
2507             continue;
2508         }
2509         ret = av_read_frame(ic, pkt);
2510         if (ret < 0) {
2511             if (ret == AVERROR_EOF || url_feof(ic->pb))
2512                 eof=1;
2513             if (ic->pb && ic->pb->error)
2514                 break;
2515             SDL_Delay(100); /* wait for user event */
2516             continue;
2517         }
2518         /* check if packet is in play range specified by user, then queue, otherwise discard */
2519         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2520                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2521                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2522                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2523                 <= ((double)duration/1000000);
2524         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2525             packet_queue_put(&is->audioq, pkt);
2526         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2527             packet_queue_put(&is->videoq, pkt);
2528         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2529             packet_queue_put(&is->subtitleq, pkt);
2530         } else {
2531             av_free_packet(pkt);
2532         }
2533     }
2534     /* wait until the end */
2535     while (!is->abort_request) {
2536         SDL_Delay(100);
2537     }
2538
2539     ret = 0;
2540  fail:
2541     /* disable interrupting */
2542     global_video_state = NULL;
2543
2544     /* close each stream */
2545     if (is->audio_stream >= 0)
2546         stream_component_close(is, is->audio_stream);
2547     if (is->video_stream >= 0)
2548         stream_component_close(is, is->video_stream);
2549     if (is->subtitle_stream >= 0)
2550         stream_component_close(is, is->subtitle_stream);
2551     if (is->ic) {
2552         av_close_input_file(is->ic);
2553         is->ic = NULL; /* safety */
2554     }
2555     avio_set_interrupt_cb(NULL);
2556
2557     if (ret != 0) {
2558         SDL_Event event;
2559
2560         event.type = FF_QUIT_EVENT;
2561         event.user.data1 = is;
2562         SDL_PushEvent(&event);
2563     }
2564     return 0;
2565 }
2566
2567 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2568 {
2569     VideoState *is;
2570
2571     is = av_mallocz(sizeof(VideoState));
2572     if (!is)
2573         return NULL;
2574     av_strlcpy(is->filename, filename, sizeof(is->filename));
2575     is->iformat = iformat;
2576     is->ytop = 0;
2577     is->xleft = 0;
2578
2579     /* start video display */
2580     is->pictq_mutex = SDL_CreateMutex();
2581     is->pictq_cond = SDL_CreateCond();
2582
2583     is->subpq_mutex = SDL_CreateMutex();
2584     is->subpq_cond = SDL_CreateCond();
2585
2586     is->av_sync_type = av_sync_type;
2587     is->read_tid = SDL_CreateThread(read_thread, is);
2588     if (!is->read_tid) {
2589         av_free(is);
2590         return NULL;
2591     }
2592     return is;
2593 }
2594
2595 static void stream_cycle_channel(VideoState *is, int codec_type)
2596 {
2597     AVFormatContext *ic = is->ic;
2598     int start_index, stream_index;
2599     AVStream *st;
2600
2601     if (codec_type == AVMEDIA_TYPE_VIDEO)
2602         start_index = is->video_stream;
2603     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2604         start_index = is->audio_stream;
2605     else
2606         start_index = is->subtitle_stream;
2607     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2608         return;
2609     stream_index = start_index;
2610     for(;;) {
2611         if (++stream_index >= is->ic->nb_streams)
2612         {
2613             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2614             {
2615                 stream_index = -1;
2616                 goto the_end;
2617             } else
2618                 stream_index = 0;
2619         }
2620         if (stream_index == start_index)
2621             return;
2622         st = ic->streams[stream_index];
2623         if (st->codec->codec_type == codec_type) {
2624             /* check that parameters are OK */
2625             switch(codec_type) {
2626             case AVMEDIA_TYPE_AUDIO:
2627                 if (st->codec->sample_rate != 0 &&
2628                     st->codec->channels != 0)
2629                     goto the_end;
2630                 break;
2631             case AVMEDIA_TYPE_VIDEO:
2632             case AVMEDIA_TYPE_SUBTITLE:
2633                 goto the_end;
2634             default:
2635                 break;
2636             }
2637         }
2638     }
2639  the_end:
2640     stream_component_close(is, start_index);
2641     stream_component_open(is, stream_index);
2642 }
2643
2644
2645 static void toggle_full_screen(void)
2646 {
2647     is_full_screen = !is_full_screen;
2648     video_open(cur_stream);
2649 }
2650
2651 static void toggle_pause(void)
2652 {
2653     if (cur_stream)
2654         stream_toggle_pause(cur_stream);
2655     step = 0;
2656 }
2657
2658 static void step_to_next_frame(void)
2659 {
2660     if (cur_stream) {
2661         /* if the stream is paused unpause it, then step */
2662         if (cur_stream->paused)
2663             stream_toggle_pause(cur_stream);
2664     }
2665     step = 1;
2666 }
2667
2668 static void toggle_audio_display(void)
2669 {
2670     if (cur_stream) {
2671         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2672         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2673         fill_rectangle(screen,
2674                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2675                     bgcolor);
2676         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2677     }
2678 }
2679
2680 /* handle an event sent by the GUI */
2681 static void event_loop(void)
2682 {
2683     SDL_Event event;
2684     double incr, pos, frac;
2685
2686     for(;;) {
2687         double x;
2688         SDL_WaitEvent(&event);
2689         switch(event.type) {
2690         case SDL_KEYDOWN:
2691             if (exit_on_keydown) {
2692                 do_exit();
2693                 break;
2694             }
2695             switch(event.key.keysym.sym) {
2696             case SDLK_ESCAPE:
2697             case SDLK_q:
2698                 do_exit();
2699                 break;
2700             case SDLK_f:
2701                 toggle_full_screen();
2702                 break;
2703             case SDLK_p:
2704             case SDLK_SPACE:
2705                 toggle_pause();
2706                 break;
2707             case SDLK_s: //S: Step to next frame
2708                 step_to_next_frame();
2709                 break;
2710             case SDLK_a:
2711                 if (cur_stream)
2712                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2713                 break;
2714             case SDLK_v:
2715                 if (cur_stream)
2716                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2717                 break;
2718             case SDLK_t:
2719                 if (cur_stream)
2720                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2721                 break;
2722             case SDLK_w:
2723                 toggle_audio_display();
2724                 break;
2725             case SDLK_LEFT:
2726                 incr = -10.0;
2727                 goto do_seek;
2728             case SDLK_RIGHT:
2729                 incr = 10.0;
2730                 goto do_seek;
2731             case SDLK_UP:
2732                 incr = 60.0;
2733                 goto do_seek;
2734             case SDLK_DOWN:
2735                 incr = -60.0;
2736             do_seek:
2737                 if (cur_stream) {
2738                     if (seek_by_bytes) {
2739                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2740                             pos= cur_stream->video_current_pos;
2741                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2742                             pos= cur_stream->audio_pkt.pos;
2743                         }else
2744                             pos = avio_tell(cur_stream->ic->pb);
2745                         if (cur_stream->ic->bit_rate)
2746                             incr *= cur_stream->ic->bit_rate / 8.0;
2747                         else
2748                             incr *= 180000.0;
2749                         pos += incr;
2750                         stream_seek(cur_stream, pos, incr, 1);
2751                     } else {
2752                         pos = get_master_clock(cur_stream);
2753                         pos += incr;
2754                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2755                     }
2756                 }
2757                 break;
2758             default:
2759                 break;
2760             }
2761             break;
2762         case SDL_MOUSEBUTTONDOWN:
2763             if (exit_on_mousedown) {
2764                 do_exit();
2765                 break;
2766             }
2767         case SDL_MOUSEMOTION:
2768             if(event.type ==SDL_MOUSEBUTTONDOWN){
2769                 x= event.button.x;
2770             }else{
2771                 if(event.motion.state != SDL_PRESSED)
2772                     break;
2773                 x= event.motion.x;
2774             }
2775             if (cur_stream) {
2776                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2777                     uint64_t size=  avio_size(cur_stream->ic->pb);
2778                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2779                 }else{
2780                     int64_t ts;
2781                     int ns, hh, mm, ss;
2782                     int tns, thh, tmm, tss;
2783                     tns = cur_stream->ic->duration/1000000LL;
2784                     thh = tns/3600;
2785                     tmm = (tns%3600)/60;
2786                     tss = (tns%60);
2787                     frac = x/cur_stream->width;
2788                     ns = frac*tns;
2789                     hh = ns/3600;
2790                     mm = (ns%3600)/60;
2791                     ss = (ns%60);
2792                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2793                             hh, mm, ss, thh, tmm, tss);
2794                     ts = frac*cur_stream->ic->duration;
2795                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2796                         ts += cur_stream->ic->start_time;
2797                     stream_seek(cur_stream, ts, 0, 0);
2798                 }
2799             }
2800             break;
2801         case SDL_VIDEORESIZE:
2802             if (cur_stream) {
2803                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2804                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2805                 screen_width = cur_stream->width = event.resize.w;
2806                 screen_height= cur_stream->height= event.resize.h;
2807             }
2808             break;
2809         case SDL_QUIT:
2810         case FF_QUIT_EVENT:
2811             do_exit();
2812             break;
2813         case FF_ALLOC_EVENT:
2814             video_open(event.user.data1);
2815             alloc_picture(event.user.data1);
2816             break;
2817         case FF_REFRESH_EVENT:
2818             video_refresh(event.user.data1);
2819             cur_stream->refresh=0;
2820             break;
2821         default:
2822             break;
2823         }
2824     }
2825 }
2826
2827 static int opt_frame_size(const char *opt, const char *arg)
2828 {
2829     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2830     return opt_default("video_size", arg);
2831 }
2832
2833 static int opt_width(const char *opt, const char *arg)
2834 {
2835     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2836     return 0;
2837 }
2838
2839 static int opt_height(const char *opt, const char *arg)
2840 {
2841     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2842     return 0;
2843 }
2844
2845 static int opt_format(const char *opt, const char *arg)
2846 {
2847     file_iformat = av_find_input_format(arg);
2848     if (!file_iformat) {
2849         fprintf(stderr, "Unknown input format: %s\n", arg);
2850         return AVERROR(EINVAL);
2851     }
2852     return 0;
2853 }
2854
2855 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2856 {
2857     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2858     return opt_default("pixel_format", arg);
2859 }
2860
2861 static int opt_sync(const char *opt, const char *arg)
2862 {
2863     if (!strcmp(arg, "audio"))
2864         av_sync_type = AV_SYNC_AUDIO_MASTER;
2865     else if (!strcmp(arg, "video"))
2866         av_sync_type = AV_SYNC_VIDEO_MASTER;
2867     else if (!strcmp(arg, "ext"))
2868         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2869     else {
2870         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2871         exit(1);
2872     }
2873     return 0;
2874 }
2875
2876 static int opt_seek(const char *opt, const char *arg)
2877 {
2878     start_time = parse_time_or_die(opt, arg, 1);
2879     return 0;
2880 }
2881
2882 static int opt_duration(const char *opt, const char *arg)
2883 {
2884     duration = parse_time_or_die(opt, arg, 1);
2885     return 0;
2886 }
2887
2888 static int opt_thread_count(const char *opt, const char *arg)
2889 {
2890     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2891 #if !HAVE_THREADS
2892     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2893 #endif
2894     return 0;
2895 }
2896
2897 static int opt_show_mode(const char *opt, const char *arg)
2898 {
2899     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2900                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2901                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2902                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2903     return 0;
2904 }
2905
2906 static int opt_input_file(const char *opt, const char *filename)
2907 {
2908     if (input_filename) {
2909         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2910                 filename, input_filename);
2911         exit(1);
2912     }
2913     if (!strcmp(filename, "-"))
2914         filename = "pipe:";
2915     input_filename = filename;
2916     return 0;
2917 }
2918
2919 static const OptionDef options[] = {
2920 #include "cmdutils_common_opts.h"
2921     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2922     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2923     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2924     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2925     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2926     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2927     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2928     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2929     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2930     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2931     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2932     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2933     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2934     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2935     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2936     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2937     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2938     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2939     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2940     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2941     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2942     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2943     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2944     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2945     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2946     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2947     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2948     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2949     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2950     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2951     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2952     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2953     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2954     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2955     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2956 #if CONFIG_AVFILTER
2957     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2958 #endif
2959     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2960     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2961     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2962     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2963     { NULL, },
2964 };
2965
2966 static void show_usage(void)
2967 {
2968     printf("Simple media player\n");
2969     printf("usage: %s [options] input_file\n", program_name);
2970     printf("\n");
2971 }
2972
2973 static int opt_help(const char *opt, const char *arg)
2974 {
2975     av_log_set_callback(log_callback_help);
2976     show_usage();
2977     show_help_options(options, "Main options:\n",
2978                       OPT_EXPERT, 0);
2979     show_help_options(options, "\nAdvanced options:\n",
2980                       OPT_EXPERT, OPT_EXPERT);
2981     printf("\n");
2982     av_opt_show2(avcodec_opts[0], NULL,
2983                  AV_OPT_FLAG_DECODING_PARAM, 0);
2984     printf("\n");
2985     av_opt_show2(avformat_opts, NULL,
2986                  AV_OPT_FLAG_DECODING_PARAM, 0);
2987 #if !CONFIG_AVFILTER
2988     printf("\n");
2989     av_opt_show2(sws_opts, NULL,
2990                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2991 #endif
2992     printf("\nWhile playing:\n"
2993            "q, ESC              quit\n"
2994            "f                   toggle full screen\n"
2995            "p, SPC              pause\n"
2996            "a                   cycle audio channel\n"
2997            "v                   cycle video channel\n"
2998            "t                   cycle subtitle channel\n"
2999            "w                   show audio waves\n"
3000            "s                   activate frame-step mode\n"
3001            "left/right          seek backward/forward 10 seconds\n"
3002            "down/up             seek backward/forward 1 minute\n"
3003            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3004            );
3005     return 0;
3006 }
3007
3008 /* Called from the main */
3009 int main(int argc, char **argv)
3010 {
3011     int flags;
3012
3013     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3014
3015     /* register all codecs, demux and protocols */
3016     avcodec_register_all();
3017 #if CONFIG_AVDEVICE
3018     avdevice_register_all();
3019 #endif
3020 #if CONFIG_AVFILTER
3021     avfilter_register_all();
3022 #endif
3023     av_register_all();
3024
3025     init_opts();
3026
3027     show_banner();
3028
3029     parse_options(argc, argv, options, opt_input_file);
3030
3031     if (!input_filename) {
3032         show_usage();
3033         fprintf(stderr, "An input file must be specified\n");
3034         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3035         exit(1);
3036     }
3037
3038     if (display_disable) {
3039         video_disable = 1;
3040     }
3041     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3042     if (audio_disable)
3043         flags &= ~SDL_INIT_AUDIO;
3044 #if !defined(__MINGW32__) && !defined(__APPLE__)
3045     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3046 #endif
3047     if (SDL_Init (flags)) {
3048         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3049         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3050         exit(1);
3051     }
3052
3053     if (!display_disable) {
3054 #if HAVE_SDL_VIDEO_SIZE
3055         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3056         fs_screen_width = vi->current_w;
3057         fs_screen_height = vi->current_h;
3058 #endif
3059     }
3060
3061     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3062     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3063     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3064
3065     av_init_packet(&flush_pkt);
3066     flush_pkt.data= "FLUSH";
3067
3068     cur_stream = stream_open(input_filename, file_iformat);
3069
3070     event_loop();
3071
3072     /* never returns */
3073
3074     return 0;
3075 }