OSDN Git Service

Merge remote-tracking branch 'qatar/master'
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     int64_t pos;                                 ///<byte position in file
102     SDL_Overlay *bmp;
103     int width, height; /* source height & width */
104     int allocated;
105     enum PixelFormat pix_fmt;
106
107 #if CONFIG_AVFILTER
108     AVFilterBufferRef *picref;
109 #endif
110 } VideoPicture;
111
112 typedef struct SubPicture {
113     double pts; /* presentation time stamp for this picture */
114     AVSubtitle sub;
115 } SubPicture;
116
117 enum {
118     AV_SYNC_AUDIO_MASTER, /* default choice */
119     AV_SYNC_VIDEO_MASTER,
120     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121 };
122
123 typedef struct VideoState {
124     SDL_Thread *read_tid;
125     SDL_Thread *video_tid;
126     SDL_Thread *refresh_tid;
127     AVInputFormat *iformat;
128     int no_background;
129     int abort_request;
130     int paused;
131     int last_paused;
132     int seek_req;
133     int seek_flags;
134     int64_t seek_pos;
135     int64_t seek_rel;
136     int read_pause_return;
137     AVFormatContext *ic;
138
139     int audio_stream;
140
141     int av_sync_type;
142     double external_clock; /* external clock base */
143     int64_t external_clock_time;
144
145     double audio_clock;
146     double audio_diff_cum; /* used for AV difference average computation */
147     double audio_diff_avg_coef;
148     double audio_diff_threshold;
149     int audio_diff_avg_count;
150     AVStream *audio_st;
151     PacketQueue audioq;
152     int audio_hw_buf_size;
153     /* samples output by the codec. we reserve more space for avsync
154        compensation */
155     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     uint8_t *audio_buf;
158     unsigned int audio_buf_size; /* in bytes */
159     int audio_buf_index; /* in bytes */
160     AVPacket audio_pkt_temp;
161     AVPacket audio_pkt;
162     enum AVSampleFormat audio_src_fmt;
163     AVAudioConvert *reformat_ctx;
164
165     enum ShowMode {
166         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
167     } show_mode;
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     FFTSample *rdft_data;
174     int xpos;
175
176     SDL_Thread *subtitle_tid;
177     int subtitle_stream;
178     int subtitle_stream_changed;
179     AVStream *subtitle_st;
180     PacketQueue subtitleq;
181     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
182     int subpq_size, subpq_rindex, subpq_windex;
183     SDL_mutex *subpq_mutex;
184     SDL_cond *subpq_cond;
185
186     double frame_timer;
187     double frame_last_pts;
188     double frame_last_delay;
189     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
190     int video_stream;
191     AVStream *video_st;
192     PacketQueue videoq;
193     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
194     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
195     int64_t video_current_pos;                   ///<current displayed file pos
196     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
197     int pictq_size, pictq_rindex, pictq_windex;
198     SDL_mutex *pictq_mutex;
199     SDL_cond *pictq_cond;
200 #if !CONFIG_AVFILTER
201     struct SwsContext *img_convert_ctx;
202 #endif
203
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207 #if CONFIG_AVFILTER
208     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
209 #endif
210
211     float skip_frames;
212     float skip_frames_index;
213     int refresh;
214 } VideoState;
215
216 static int opt_help(const char *opt, const char *arg);
217
218 /* options specified by the user */
219 static AVInputFormat *file_iformat;
220 static const char *input_filename;
221 static const char *window_title;
222 static int fs_screen_width;
223 static int fs_screen_height;
224 static int screen_width = 0;
225 static int screen_height = 0;
226 static int audio_disable;
227 static int video_disable;
228 static int wanted_stream[AVMEDIA_TYPE_NB]={
229     [AVMEDIA_TYPE_AUDIO]=-1,
230     [AVMEDIA_TYPE_VIDEO]=-1,
231     [AVMEDIA_TYPE_SUBTITLE]=-1,
232 };
233 static int seek_by_bytes=-1;
234 static int display_disable;
235 static int show_status = 1;
236 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
237 static int64_t start_time = AV_NOPTS_VALUE;
238 static int64_t duration = AV_NOPTS_VALUE;
239 static int step = 0;
240 static int thread_count = 1;
241 static int workaround_bugs = 1;
242 static int fast = 0;
243 static int genpts = 0;
244 static int lowres = 0;
245 static int idct = FF_IDCT_AUTO;
246 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
247 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
248 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
249 static int error_recognition = FF_ER_CAREFUL;
250 static int error_concealment = 3;
251 static int decoder_reorder_pts= -1;
252 static int autoexit;
253 static int exit_on_keydown;
254 static int exit_on_mousedown;
255 static int loop=1;
256 static int framedrop=-1;
257 static enum ShowMode show_mode = SHOW_MODE_NONE;
258
259 static int rdftspeed=20;
260 #if CONFIG_AVFILTER
261 static char *vfilters = NULL;
262 #endif
263
264 /* current context */
265 static int is_full_screen;
266 static VideoState *cur_stream;
267 static int64_t audio_callback_time;
268
269 static AVPacket flush_pkt;
270
271 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
272 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
273 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
274
275 static SDL_Surface *screen;
276
277 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
278 {
279     AVPacketList *pkt1;
280
281     /* duplicate the packet */
282     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
283         return -1;
284
285     pkt1 = av_malloc(sizeof(AVPacketList));
286     if (!pkt1)
287         return -1;
288     pkt1->pkt = *pkt;
289     pkt1->next = NULL;
290
291
292     SDL_LockMutex(q->mutex);
293
294     if (!q->last_pkt)
295
296         q->first_pkt = pkt1;
297     else
298         q->last_pkt->next = pkt1;
299     q->last_pkt = pkt1;
300     q->nb_packets++;
301     q->size += pkt1->pkt.size + sizeof(*pkt1);
302     /* XXX: should duplicate packet data in DV case */
303     SDL_CondSignal(q->cond);
304
305     SDL_UnlockMutex(q->mutex);
306     return 0;
307 }
308
309 /* packet queue handling */
310 static void packet_queue_init(PacketQueue *q)
311 {
312     memset(q, 0, sizeof(PacketQueue));
313     q->mutex = SDL_CreateMutex();
314     q->cond = SDL_CreateCond();
315     packet_queue_put(q, &flush_pkt);
316 }
317
318 static void packet_queue_flush(PacketQueue *q)
319 {
320     AVPacketList *pkt, *pkt1;
321
322     SDL_LockMutex(q->mutex);
323     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
324         pkt1 = pkt->next;
325         av_free_packet(&pkt->pkt);
326         av_freep(&pkt);
327     }
328     q->last_pkt = NULL;
329     q->first_pkt = NULL;
330     q->nb_packets = 0;
331     q->size = 0;
332     SDL_UnlockMutex(q->mutex);
333 }
334
335 static void packet_queue_end(PacketQueue *q)
336 {
337     packet_queue_flush(q);
338     SDL_DestroyMutex(q->mutex);
339     SDL_DestroyCond(q->cond);
340 }
341
342 static void packet_queue_abort(PacketQueue *q)
343 {
344     SDL_LockMutex(q->mutex);
345
346     q->abort_request = 1;
347
348     SDL_CondSignal(q->cond);
349
350     SDL_UnlockMutex(q->mutex);
351 }
352
353 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
354 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
355 {
356     AVPacketList *pkt1;
357     int ret;
358
359     SDL_LockMutex(q->mutex);
360
361     for(;;) {
362         if (q->abort_request) {
363             ret = -1;
364             break;
365         }
366
367         pkt1 = q->first_pkt;
368         if (pkt1) {
369             q->first_pkt = pkt1->next;
370             if (!q->first_pkt)
371                 q->last_pkt = NULL;
372             q->nb_packets--;
373             q->size -= pkt1->pkt.size + sizeof(*pkt1);
374             *pkt = pkt1->pkt;
375             av_free(pkt1);
376             ret = 1;
377             break;
378         } else if (!block) {
379             ret = 0;
380             break;
381         } else {
382             SDL_CondWait(q->cond, q->mutex);
383         }
384     }
385     SDL_UnlockMutex(q->mutex);
386     return ret;
387 }
388
389 static inline void fill_rectangle(SDL_Surface *screen,
390                                   int x, int y, int w, int h, int color)
391 {
392     SDL_Rect rect;
393     rect.x = x;
394     rect.y = y;
395     rect.w = w;
396     rect.h = h;
397     SDL_FillRect(screen, &rect, color);
398 }
399
400 #define ALPHA_BLEND(a, oldp, newp, s)\
401 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402
403 #define RGBA_IN(r, g, b, a, s)\
404 {\
405     unsigned int v = ((const uint32_t *)(s))[0];\
406     a = (v >> 24) & 0xff;\
407     r = (v >> 16) & 0xff;\
408     g = (v >> 8) & 0xff;\
409     b = v & 0xff;\
410 }
411
412 #define YUVA_IN(y, u, v, a, s, pal)\
413 {\
414     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415     a = (val >> 24) & 0xff;\
416     y = (val >> 16) & 0xff;\
417     u = (val >> 8) & 0xff;\
418     v = val & 0xff;\
419 }
420
421 #define YUVA_OUT(d, y, u, v, a)\
422 {\
423     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424 }
425
426
427 #define BPP 1
428
429 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430 {
431     int wrap, wrap3, width2, skip2;
432     int y, u, v, a, u1, v1, a1, w, h;
433     uint8_t *lum, *cb, *cr;
434     const uint8_t *p;
435     const uint32_t *pal;
436     int dstx, dsty, dstw, dsth;
437
438     dstw = av_clip(rect->w, 0, imgw);
439     dsth = av_clip(rect->h, 0, imgh);
440     dstx = av_clip(rect->x, 0, imgw - dstw);
441     dsty = av_clip(rect->y, 0, imgh - dsth);
442     lum = dst->data[0] + dsty * dst->linesize[0];
443     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445
446     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
447     skip2 = dstx >> 1;
448     wrap = dst->linesize[0];
449     wrap3 = rect->pict.linesize[0];
450     p = rect->pict.data[0];
451     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
452
453     if (dsty & 1) {
454         lum += dstx;
455         cb += skip2;
456         cr += skip2;
457
458         if (dstx & 1) {
459             YUVA_IN(y, u, v, a, p, pal);
460             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463             cb++;
464             cr++;
465             lum++;
466             p += BPP;
467         }
468         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469             YUVA_IN(y, u, v, a, p, pal);
470             u1 = u;
471             v1 = v;
472             a1 = a;
473             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474
475             YUVA_IN(y, u, v, a, p + BPP, pal);
476             u1 += u;
477             v1 += v;
478             a1 += a;
479             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482             cb++;
483             cr++;
484             p += 2 * BPP;
485             lum += 2;
486         }
487         if (w) {
488             YUVA_IN(y, u, v, a, p, pal);
489             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492             p++;
493             lum++;
494         }
495         p += wrap3 - dstw * BPP;
496         lum += wrap - dstw - dstx;
497         cb += dst->linesize[1] - width2 - skip2;
498         cr += dst->linesize[2] - width2 - skip2;
499     }
500     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
501         lum += dstx;
502         cb += skip2;
503         cr += skip2;
504
505         if (dstx & 1) {
506             YUVA_IN(y, u, v, a, p, pal);
507             u1 = u;
508             v1 = v;
509             a1 = a;
510             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511             p += wrap3;
512             lum += wrap;
513             YUVA_IN(y, u, v, a, p, pal);
514             u1 += u;
515             v1 += v;
516             a1 += a;
517             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
519             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
520             cb++;
521             cr++;
522             p += -wrap3 + BPP;
523             lum += -wrap + 1;
524         }
525         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
526             YUVA_IN(y, u, v, a, p, pal);
527             u1 = u;
528             v1 = v;
529             a1 = a;
530             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531
532             YUVA_IN(y, u, v, a, p + BPP, pal);
533             u1 += u;
534             v1 += v;
535             a1 += a;
536             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
537             p += wrap3;
538             lum += wrap;
539
540             YUVA_IN(y, u, v, a, p, pal);
541             u1 += u;
542             v1 += v;
543             a1 += a;
544             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545
546             YUVA_IN(y, u, v, a, p + BPP, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551
552             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
553             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554
555             cb++;
556             cr++;
557             p += -wrap3 + 2 * BPP;
558             lum += -wrap + 2;
559         }
560         if (w) {
561             YUVA_IN(y, u, v, a, p, pal);
562             u1 = u;
563             v1 = v;
564             a1 = a;
565             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566             p += wrap3;
567             lum += wrap;
568             YUVA_IN(y, u, v, a, p, pal);
569             u1 += u;
570             v1 += v;
571             a1 += a;
572             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
574             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
575             cb++;
576             cr++;
577             p += -wrap3 + BPP;
578             lum += -wrap + 1;
579         }
580         p += wrap3 + (wrap3 - dstw * BPP);
581         lum += wrap + (wrap - dstw - dstx);
582         cb += dst->linesize[1] - width2 - skip2;
583         cr += dst->linesize[2] - width2 - skip2;
584     }
585     /* handle odd height */
586     if (h) {
587         lum += dstx;
588         cb += skip2;
589         cr += skip2;
590
591         if (dstx & 1) {
592             YUVA_IN(y, u, v, a, p, pal);
593             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
595             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
596             cb++;
597             cr++;
598             lum++;
599             p += BPP;
600         }
601         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
602             YUVA_IN(y, u, v, a, p, pal);
603             u1 = u;
604             v1 = v;
605             a1 = a;
606             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607
608             YUVA_IN(y, u, v, a, p + BPP, pal);
609             u1 += u;
610             v1 += v;
611             a1 += a;
612             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
614             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
615             cb++;
616             cr++;
617             p += 2 * BPP;
618             lum += 2;
619         }
620         if (w) {
621             YUVA_IN(y, u, v, a, p, pal);
622             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
624             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
625         }
626     }
627 }
628
629 static void free_subpicture(SubPicture *sp)
630 {
631     avsubtitle_free(&sp->sub);
632 }
633
634 static void video_image_display(VideoState *is)
635 {
636     VideoPicture *vp;
637     SubPicture *sp;
638     AVPicture pict;
639     float aspect_ratio;
640     int width, height, x, y;
641     SDL_Rect rect;
642     int i;
643
644     vp = &is->pictq[is->pictq_rindex];
645     if (vp->bmp) {
646 #if CONFIG_AVFILTER
647          if (vp->picref->video->sample_aspect_ratio.num == 0)
648              aspect_ratio = 0;
649          else
650              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
651 #else
652
653         /* XXX: use variable in the frame */
654         if (is->video_st->sample_aspect_ratio.num)
655             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
656         else if (is->video_st->codec->sample_aspect_ratio.num)
657             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
658         else
659             aspect_ratio = 0;
660 #endif
661         if (aspect_ratio <= 0.0)
662             aspect_ratio = 1.0;
663         aspect_ratio *= (float)vp->width / (float)vp->height;
664
665         if (is->subtitle_st) {
666             if (is->subpq_size > 0) {
667                 sp = &is->subpq[is->subpq_rindex];
668
669                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
670                     SDL_LockYUVOverlay (vp->bmp);
671
672                     pict.data[0] = vp->bmp->pixels[0];
673                     pict.data[1] = vp->bmp->pixels[2];
674                     pict.data[2] = vp->bmp->pixels[1];
675
676                     pict.linesize[0] = vp->bmp->pitches[0];
677                     pict.linesize[1] = vp->bmp->pitches[2];
678                     pict.linesize[2] = vp->bmp->pitches[1];
679
680                     for (i = 0; i < sp->sub.num_rects; i++)
681                         blend_subrect(&pict, sp->sub.rects[i],
682                                       vp->bmp->w, vp->bmp->h);
683
684                     SDL_UnlockYUVOverlay (vp->bmp);
685                 }
686             }
687         }
688
689
690         /* XXX: we suppose the screen has a 1.0 pixel ratio */
691         height = is->height;
692         width = ((int)rint(height * aspect_ratio)) & ~1;
693         if (width > is->width) {
694             width = is->width;
695             height = ((int)rint(width / aspect_ratio)) & ~1;
696         }
697         x = (is->width - width) / 2;
698         y = (is->height - height) / 2;
699         is->no_background = 0;
700         rect.x = is->xleft + x;
701         rect.y = is->ytop  + y;
702         rect.w = FFMAX(width,  1);
703         rect.h = FFMAX(height, 1);
704         SDL_DisplayYUVOverlay(vp->bmp, &rect);
705     }
706 }
707
708 /* get the current audio output buffer size, in samples. With SDL, we
709    cannot have a precise information */
710 static int audio_write_get_buf_size(VideoState *is)
711 {
712     return is->audio_buf_size - is->audio_buf_index;
713 }
714
715 static inline int compute_mod(int a, int b)
716 {
717     return a < 0 ? a%b + b : a%b;
718 }
719
720 static void video_audio_display(VideoState *s)
721 {
722     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
723     int ch, channels, h, h2, bgcolor, fgcolor;
724     int16_t time_diff;
725     int rdft_bits, nb_freq;
726
727     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
728         ;
729     nb_freq= 1<<(rdft_bits-1);
730
731     /* compute display index : center on currently output samples */
732     channels = s->audio_st->codec->channels;
733     nb_display_channels = channels;
734     if (!s->paused) {
735         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
736         n = 2 * channels;
737         delay = audio_write_get_buf_size(s);
738         delay /= n;
739
740         /* to be more precise, we take into account the time spent since
741            the last buffer computation */
742         if (audio_callback_time) {
743             time_diff = av_gettime() - audio_callback_time;
744             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
745         }
746
747         delay += 2*data_used;
748         if (delay < data_used)
749             delay = data_used;
750
751         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
752         if (s->show_mode == SHOW_MODE_WAVES) {
753             h= INT_MIN;
754             for(i=0; i<1000; i+=channels){
755                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
756                 int a= s->sample_array[idx];
757                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
758                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
759                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
760                 int score= a-d;
761                 if(h<score && (b^c)<0){
762                     h= score;
763                     i_start= idx;
764                 }
765             }
766         }
767
768         s->last_i_start = i_start;
769     } else {
770         i_start = s->last_i_start;
771     }
772
773     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
774     if (s->show_mode == SHOW_MODE_WAVES) {
775         fill_rectangle(screen,
776                        s->xleft, s->ytop, s->width, s->height,
777                        bgcolor);
778
779         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
780
781         /* total height for one channel */
782         h = s->height / nb_display_channels;
783         /* graph height / 2 */
784         h2 = (h * 9) / 20;
785         for(ch = 0;ch < nb_display_channels; ch++) {
786             i = i_start + ch;
787             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
788             for(x = 0; x < s->width; x++) {
789                 y = (s->sample_array[i] * h2) >> 15;
790                 if (y < 0) {
791                     y = -y;
792                     ys = y1 - y;
793                 } else {
794                     ys = y1;
795                 }
796                 fill_rectangle(screen,
797                                s->xleft + x, ys, 1, y,
798                                fgcolor);
799                 i += channels;
800                 if (i >= SAMPLE_ARRAY_SIZE)
801                     i -= SAMPLE_ARRAY_SIZE;
802             }
803         }
804
805         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
806
807         for(ch = 1;ch < nb_display_channels; ch++) {
808             y = s->ytop + ch * h;
809             fill_rectangle(screen,
810                            s->xleft, y, s->width, 1,
811                            fgcolor);
812         }
813         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
814     }else{
815         nb_display_channels= FFMIN(nb_display_channels, 2);
816         if(rdft_bits != s->rdft_bits){
817             av_rdft_end(s->rdft);
818             av_free(s->rdft_data);
819             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
820             s->rdft_bits= rdft_bits;
821             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
822         }
823         {
824             FFTSample *data[2];
825             for(ch = 0;ch < nb_display_channels; ch++) {
826                 data[ch] = s->rdft_data + 2*nb_freq*ch;
827                 i = i_start + ch;
828                 for(x = 0; x < 2*nb_freq; x++) {
829                     double w= (x-nb_freq)*(1.0/nb_freq);
830                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
831                     i += channels;
832                     if (i >= SAMPLE_ARRAY_SIZE)
833                         i -= SAMPLE_ARRAY_SIZE;
834                 }
835                 av_rdft_calc(s->rdft, data[ch]);
836             }
837             //least efficient way to do this, we should of course directly access it but its more than fast enough
838             for(y=0; y<s->height; y++){
839                 double w= 1/sqrt(nb_freq);
840                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
841                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
842                        + data[1][2*y+1]*data[1][2*y+1])) : a;
843                 a= FFMIN(a,255);
844                 b= FFMIN(b,255);
845                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
846
847                 fill_rectangle(screen,
848                             s->xpos, s->height-y, 1, 1,
849                             fgcolor);
850             }
851         }
852         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
853         s->xpos++;
854         if(s->xpos >= s->width)
855             s->xpos= s->xleft;
856     }
857 }
858
859 static void stream_close(VideoState *is)
860 {
861     VideoPicture *vp;
862     int i;
863     /* XXX: use a special url_shutdown call to abort parse cleanly */
864     is->abort_request = 1;
865     SDL_WaitThread(is->read_tid, NULL);
866     SDL_WaitThread(is->refresh_tid, NULL);
867
868     /* free all pictures */
869     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
870         vp = &is->pictq[i];
871 #if CONFIG_AVFILTER
872         if (vp->picref) {
873             avfilter_unref_buffer(vp->picref);
874             vp->picref = NULL;
875         }
876 #endif
877         if (vp->bmp) {
878             SDL_FreeYUVOverlay(vp->bmp);
879             vp->bmp = NULL;
880         }
881     }
882     SDL_DestroyMutex(is->pictq_mutex);
883     SDL_DestroyCond(is->pictq_cond);
884     SDL_DestroyMutex(is->subpq_mutex);
885     SDL_DestroyCond(is->subpq_cond);
886 #if !CONFIG_AVFILTER
887     if (is->img_convert_ctx)
888         sws_freeContext(is->img_convert_ctx);
889 #endif
890     av_free(is);
891 }
892
893 static void do_exit(void)
894 {
895     if (cur_stream) {
896         stream_close(cur_stream);
897         cur_stream = NULL;
898     }
899     uninit_opts();
900 #if CONFIG_AVFILTER
901     avfilter_uninit();
902 #endif
903     if (show_status)
904         printf("\n");
905     SDL_Quit();
906     av_log(NULL, AV_LOG_QUIET, "%s", "");
907     exit(0);
908 }
909
910 static int video_open(VideoState *is){
911     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912     int w,h;
913
914     if(is_full_screen) flags |= SDL_FULLSCREEN;
915     else               flags |= SDL_RESIZABLE;
916
917     if (is_full_screen && fs_screen_width) {
918         w = fs_screen_width;
919         h = fs_screen_height;
920     } else if(!is_full_screen && screen_width){
921         w = screen_width;
922         h = screen_height;
923 #if CONFIG_AVFILTER
924     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925         w = is->out_video_filter->inputs[0]->w;
926         h = is->out_video_filter->inputs[0]->h;
927 #else
928     }else if (is->video_st && is->video_st->codec->width){
929         w = is->video_st->codec->width;
930         h = is->video_st->codec->height;
931 #endif
932     } else {
933         w = 640;
934         h = 480;
935     }
936     if(screen && is->width == screen->w && screen->w == w
937        && is->height== screen->h && screen->h == h)
938         return 0;
939
940 #ifndef __APPLE__
941     screen = SDL_SetVideoMode(w, h, 0, flags);
942 #else
943     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944     screen = SDL_SetVideoMode(w, h, 24, flags);
945 #endif
946     if (!screen) {
947         fprintf(stderr, "SDL: could not set video mode - exiting\n");
948         do_exit();
949     }
950     if (!window_title)
951         window_title = input_filename;
952     SDL_WM_SetCaption(window_title, window_title);
953
954     is->width = screen->w;
955     is->height = screen->h;
956
957     return 0;
958 }
959
960 /* display the current picture, if any */
961 static void video_display(VideoState *is)
962 {
963     if(!screen)
964         video_open(cur_stream);
965     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
966         video_audio_display(is);
967     else if (is->video_st)
968         video_image_display(is);
969 }
970
971 static int refresh_thread(void *opaque)
972 {
973     VideoState *is= opaque;
974     while(!is->abort_request){
975         SDL_Event event;
976         event.type = FF_REFRESH_EVENT;
977         event.user.data1 = opaque;
978         if(!is->refresh){
979             is->refresh=1;
980             SDL_PushEvent(&event);
981         }
982         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
984     }
985     return 0;
986 }
987
988 /* get the current audio clock value */
989 static double get_audio_clock(VideoState *is)
990 {
991     double pts;
992     int hw_buf_size, bytes_per_sec;
993     pts = is->audio_clock;
994     hw_buf_size = audio_write_get_buf_size(is);
995     bytes_per_sec = 0;
996     if (is->audio_st) {
997         bytes_per_sec = is->audio_st->codec->sample_rate *
998             2 * is->audio_st->codec->channels;
999     }
1000     if (bytes_per_sec)
1001         pts -= (double)hw_buf_size / bytes_per_sec;
1002     return pts;
1003 }
1004
1005 /* get the current video clock value */
1006 static double get_video_clock(VideoState *is)
1007 {
1008     if (is->paused) {
1009         return is->video_current_pts;
1010     } else {
1011         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1012     }
1013 }
1014
1015 /* get the current external clock value */
1016 static double get_external_clock(VideoState *is)
1017 {
1018     int64_t ti;
1019     ti = av_gettime();
1020     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1021 }
1022
1023 /* get the current master clock value */
1024 static double get_master_clock(VideoState *is)
1025 {
1026     double val;
1027
1028     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1029         if (is->video_st)
1030             val = get_video_clock(is);
1031         else
1032             val = get_audio_clock(is);
1033     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1034         if (is->audio_st)
1035             val = get_audio_clock(is);
1036         else
1037             val = get_video_clock(is);
1038     } else {
1039         val = get_external_clock(is);
1040     }
1041     return val;
1042 }
1043
1044 /* seek in the stream */
1045 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1046 {
1047     if (!is->seek_req) {
1048         is->seek_pos = pos;
1049         is->seek_rel = rel;
1050         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1051         if (seek_by_bytes)
1052             is->seek_flags |= AVSEEK_FLAG_BYTE;
1053         is->seek_req = 1;
1054     }
1055 }
1056
1057 /* pause or resume the video */
1058 static void stream_toggle_pause(VideoState *is)
1059 {
1060     if (is->paused) {
1061         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1062         if(is->read_pause_return != AVERROR(ENOSYS)){
1063             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1064         }
1065         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1066     }
1067     is->paused = !is->paused;
1068 }
1069
1070 static double compute_target_time(double frame_current_pts, VideoState *is)
1071 {
1072     double delay, sync_threshold, diff;
1073
1074     /* compute nominal delay */
1075     delay = frame_current_pts - is->frame_last_pts;
1076     if (delay <= 0 || delay >= 10.0) {
1077         /* if incorrect delay, use previous one */
1078         delay = is->frame_last_delay;
1079     } else {
1080         is->frame_last_delay = delay;
1081     }
1082     is->frame_last_pts = frame_current_pts;
1083
1084     /* update delay to follow master synchronisation source */
1085     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1086          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1087         /* if video is slave, we try to correct big delays by
1088            duplicating or deleting a frame */
1089         diff = get_video_clock(is) - get_master_clock(is);
1090
1091         /* skip or repeat frame. We take into account the
1092            delay to compute the threshold. I still don't know
1093            if it is the best guess */
1094         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1095         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1096             if (diff <= -sync_threshold)
1097                 delay = 0;
1098             else if (diff >= sync_threshold)
1099                 delay = 2 * delay;
1100         }
1101     }
1102     is->frame_timer += delay;
1103
1104     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1105             delay, frame_current_pts, -diff);
1106
1107     return is->frame_timer;
1108 }
1109
1110 /* called to display each frame */
1111 static void video_refresh(void *opaque)
1112 {
1113     VideoState *is = opaque;
1114     VideoPicture *vp;
1115
1116     SubPicture *sp, *sp2;
1117
1118     if (is->video_st) {
1119 retry:
1120         if (is->pictq_size == 0) {
1121             //nothing to do, no picture to display in the que
1122         } else {
1123             double time= av_gettime()/1000000.0;
1124             double next_target;
1125             /* dequeue the picture */
1126             vp = &is->pictq[is->pictq_rindex];
1127
1128             if(time < vp->target_clock)
1129                 return;
1130             /* update current video pts */
1131             is->video_current_pts = vp->pts;
1132             is->video_current_pts_drift = is->video_current_pts - time;
1133             is->video_current_pos = vp->pos;
1134             if(is->pictq_size > 1){
1135                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1136                 assert(nextvp->target_clock >= vp->target_clock);
1137                 next_target= nextvp->target_clock;
1138             }else{
1139                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1140             }
1141             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1142                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1143                 if(is->pictq_size > 1 || time > next_target + 0.5){
1144                     /* update queue size and signal for next picture */
1145                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1146                         is->pictq_rindex = 0;
1147
1148                     SDL_LockMutex(is->pictq_mutex);
1149                     is->pictq_size--;
1150                     SDL_CondSignal(is->pictq_cond);
1151                     SDL_UnlockMutex(is->pictq_mutex);
1152                     goto retry;
1153                 }
1154             }
1155
1156             if(is->subtitle_st) {
1157                 if (is->subtitle_stream_changed) {
1158                     SDL_LockMutex(is->subpq_mutex);
1159
1160                     while (is->subpq_size) {
1161                         free_subpicture(&is->subpq[is->subpq_rindex]);
1162
1163                         /* update queue size and signal for next picture */
1164                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1165                             is->subpq_rindex = 0;
1166
1167                         is->subpq_size--;
1168                     }
1169                     is->subtitle_stream_changed = 0;
1170
1171                     SDL_CondSignal(is->subpq_cond);
1172                     SDL_UnlockMutex(is->subpq_mutex);
1173                 } else {
1174                     if (is->subpq_size > 0) {
1175                         sp = &is->subpq[is->subpq_rindex];
1176
1177                         if (is->subpq_size > 1)
1178                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1179                         else
1180                             sp2 = NULL;
1181
1182                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1183                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1184                         {
1185                             free_subpicture(sp);
1186
1187                             /* update queue size and signal for next picture */
1188                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1189                                 is->subpq_rindex = 0;
1190
1191                             SDL_LockMutex(is->subpq_mutex);
1192                             is->subpq_size--;
1193                             SDL_CondSignal(is->subpq_cond);
1194                             SDL_UnlockMutex(is->subpq_mutex);
1195                         }
1196                     }
1197                 }
1198             }
1199
1200             /* display picture */
1201             if (!display_disable)
1202                 video_display(is);
1203
1204             /* update queue size and signal for next picture */
1205             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1206                 is->pictq_rindex = 0;
1207
1208             SDL_LockMutex(is->pictq_mutex);
1209             is->pictq_size--;
1210             SDL_CondSignal(is->pictq_cond);
1211             SDL_UnlockMutex(is->pictq_mutex);
1212         }
1213     } else if (is->audio_st) {
1214         /* draw the next audio frame */
1215
1216         /* if only audio stream, then display the audio bars (better
1217            than nothing, just to test the implementation */
1218
1219         /* display picture */
1220         if (!display_disable)
1221             video_display(is);
1222     }
1223     if (show_status) {
1224         static int64_t last_time;
1225         int64_t cur_time;
1226         int aqsize, vqsize, sqsize;
1227         double av_diff;
1228
1229         cur_time = av_gettime();
1230         if (!last_time || (cur_time - last_time) >= 30000) {
1231             aqsize = 0;
1232             vqsize = 0;
1233             sqsize = 0;
1234             if (is->audio_st)
1235                 aqsize = is->audioq.size;
1236             if (is->video_st)
1237                 vqsize = is->videoq.size;
1238             if (is->subtitle_st)
1239                 sqsize = is->subtitleq.size;
1240             av_diff = 0;
1241             if (is->audio_st && is->video_st)
1242                 av_diff = get_audio_clock(is) - get_video_clock(is);
1243             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1244                    get_master_clock(is),
1245                    av_diff,
1246                    FFMAX(is->skip_frames-1, 0),
1247                    aqsize / 1024,
1248                    vqsize / 1024,
1249                    sqsize,
1250                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1251                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1252             fflush(stdout);
1253             last_time = cur_time;
1254         }
1255     }
1256 }
1257
1258 /* allocate a picture (needs to do that in main thread to avoid
1259    potential locking problems */
1260 static void alloc_picture(void *opaque)
1261 {
1262     VideoState *is = opaque;
1263     VideoPicture *vp;
1264
1265     vp = &is->pictq[is->pictq_windex];
1266
1267     if (vp->bmp)
1268         SDL_FreeYUVOverlay(vp->bmp);
1269
1270 #if CONFIG_AVFILTER
1271     if (vp->picref)
1272         avfilter_unref_buffer(vp->picref);
1273     vp->picref = NULL;
1274
1275     vp->width   = is->out_video_filter->inputs[0]->w;
1276     vp->height  = is->out_video_filter->inputs[0]->h;
1277     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1278 #else
1279     vp->width   = is->video_st->codec->width;
1280     vp->height  = is->video_st->codec->height;
1281     vp->pix_fmt = is->video_st->codec->pix_fmt;
1282 #endif
1283
1284     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1285                                    SDL_YV12_OVERLAY,
1286                                    screen);
1287     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1288         /* SDL allocates a buffer smaller than requested if the video
1289          * overlay hardware is unable to support the requested size. */
1290         fprintf(stderr, "Error: the video system does not support an image\n"
1291                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1292                         "to reduce the image size.\n", vp->width, vp->height );
1293         do_exit();
1294     }
1295
1296     SDL_LockMutex(is->pictq_mutex);
1297     vp->allocated = 1;
1298     SDL_CondSignal(is->pictq_cond);
1299     SDL_UnlockMutex(is->pictq_mutex);
1300 }
1301
1302 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1303 {
1304     VideoPicture *vp;
1305     double frame_delay, pts = pts1;
1306
1307     /* compute the exact PTS for the picture if it is omitted in the stream
1308      * pts1 is the dts of the pkt / pts of the frame */
1309     if (pts != 0) {
1310         /* update video clock with pts, if present */
1311         is->video_clock = pts;
1312     } else {
1313         pts = is->video_clock;
1314     }
1315     /* update video clock for next frame */
1316     frame_delay = av_q2d(is->video_st->codec->time_base);
1317     /* for MPEG2, the frame can be repeated, so we update the
1318        clock accordingly */
1319     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320     is->video_clock += frame_delay;
1321
1322 #if defined(DEBUG_SYNC) && 0
1323     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1324            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1325 #endif
1326
1327     /* wait until we have space to put a new picture */
1328     SDL_LockMutex(is->pictq_mutex);
1329
1330     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1331         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1332
1333     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1334            !is->videoq.abort_request) {
1335         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1336     }
1337     SDL_UnlockMutex(is->pictq_mutex);
1338
1339     if (is->videoq.abort_request)
1340         return -1;
1341
1342     vp = &is->pictq[is->pictq_windex];
1343
1344     /* alloc or resize hardware picture buffer */
1345     if (!vp->bmp ||
1346 #if CONFIG_AVFILTER
1347         vp->width  != is->out_video_filter->inputs[0]->w ||
1348         vp->height != is->out_video_filter->inputs[0]->h) {
1349 #else
1350         vp->width != is->video_st->codec->width ||
1351         vp->height != is->video_st->codec->height) {
1352 #endif
1353         SDL_Event event;
1354
1355         vp->allocated = 0;
1356
1357         /* the allocation must be done in the main thread to avoid
1358            locking problems */
1359         event.type = FF_ALLOC_EVENT;
1360         event.user.data1 = is;
1361         SDL_PushEvent(&event);
1362
1363         /* wait until the picture is allocated */
1364         SDL_LockMutex(is->pictq_mutex);
1365         while (!vp->allocated && !is->videoq.abort_request) {
1366             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1367         }
1368         SDL_UnlockMutex(is->pictq_mutex);
1369
1370         if (is->videoq.abort_request)
1371             return -1;
1372     }
1373
1374     /* if the frame is not skipped, then display it */
1375     if (vp->bmp) {
1376         AVPicture pict;
1377 #if CONFIG_AVFILTER
1378         if(vp->picref)
1379             avfilter_unref_buffer(vp->picref);
1380         vp->picref = src_frame->opaque;
1381 #endif
1382
1383         /* get a pointer on the bitmap */
1384         SDL_LockYUVOverlay (vp->bmp);
1385
1386         memset(&pict,0,sizeof(AVPicture));
1387         pict.data[0] = vp->bmp->pixels[0];
1388         pict.data[1] = vp->bmp->pixels[2];
1389         pict.data[2] = vp->bmp->pixels[1];
1390
1391         pict.linesize[0] = vp->bmp->pitches[0];
1392         pict.linesize[1] = vp->bmp->pitches[2];
1393         pict.linesize[2] = vp->bmp->pitches[1];
1394
1395 #if CONFIG_AVFILTER
1396         //FIXME use direct rendering
1397         av_picture_copy(&pict, (AVPicture *)src_frame,
1398                         vp->pix_fmt, vp->width, vp->height);
1399 #else
1400         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1401         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1402             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1403             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1404         if (is->img_convert_ctx == NULL) {
1405             fprintf(stderr, "Cannot initialize the conversion context\n");
1406             exit(1);
1407         }
1408         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1409                   0, vp->height, pict.data, pict.linesize);
1410 #endif
1411         /* update the bitmap content */
1412         SDL_UnlockYUVOverlay(vp->bmp);
1413
1414         vp->pts = pts;
1415         vp->pos = pos;
1416
1417         /* now we can update the picture count */
1418         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1419             is->pictq_windex = 0;
1420         SDL_LockMutex(is->pictq_mutex);
1421         vp->target_clock= compute_target_time(vp->pts, is);
1422
1423         is->pictq_size++;
1424         SDL_UnlockMutex(is->pictq_mutex);
1425     }
1426     return 0;
1427 }
1428
1429 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1430 {
1431     int got_picture, i;
1432
1433     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1434         return -1;
1435
1436     if (pkt->data == flush_pkt.data) {
1437         avcodec_flush_buffers(is->video_st->codec);
1438
1439         SDL_LockMutex(is->pictq_mutex);
1440         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1441         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1442             is->pictq[i].target_clock= 0;
1443         }
1444         while (is->pictq_size && !is->videoq.abort_request) {
1445             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1446         }
1447         is->video_current_pos = -1;
1448         SDL_UnlockMutex(is->pictq_mutex);
1449
1450         is->frame_last_pts = AV_NOPTS_VALUE;
1451         is->frame_last_delay = 0;
1452         is->frame_timer = (double)av_gettime() / 1000000.0;
1453         is->skip_frames = 1;
1454         is->skip_frames_index = 0;
1455         return 0;
1456     }
1457
1458     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1459
1460     if (got_picture) {
1461         if (decoder_reorder_pts == -1) {
1462             *pts = frame->best_effort_timestamp;
1463         } else if (decoder_reorder_pts) {
1464             *pts = frame->pkt_pts;
1465         } else {
1466             *pts = frame->pkt_dts;
1467         }
1468
1469         if (*pts == AV_NOPTS_VALUE) {
1470             *pts = 0;
1471         }
1472
1473         is->skip_frames_index += 1;
1474         if(is->skip_frames_index >= is->skip_frames){
1475             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1476             return 1;
1477         }
1478
1479     }
1480     return 0;
1481 }
1482
1483 #if CONFIG_AVFILTER
1484 typedef struct {
1485     VideoState *is;
1486     AVFrame *frame;
1487     int use_dr1;
1488 } FilterPriv;
1489
1490 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1491 {
1492     AVFilterContext *ctx = codec->opaque;
1493     AVFilterBufferRef  *ref;
1494     int perms = AV_PERM_WRITE;
1495     int i, w, h, stride[4];
1496     unsigned edge;
1497     int pixel_size;
1498
1499     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1500
1501     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1502         perms |= AV_PERM_NEG_LINESIZES;
1503
1504     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1505         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1506         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1507         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1508     }
1509     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1510
1511     w = codec->width;
1512     h = codec->height;
1513
1514     if(av_image_check_size(w, h, 0, codec))
1515         return -1;
1516
1517     avcodec_align_dimensions2(codec, &w, &h, stride);
1518     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1519     w += edge << 1;
1520     h += edge << 1;
1521
1522     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1523         return -1;
1524
1525     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1526     ref->video->w = codec->width;
1527     ref->video->h = codec->height;
1528     for(i = 0; i < 4; i ++) {
1529         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1530         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1531
1532         if (ref->data[i]) {
1533             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1534         }
1535         pic->data[i]     = ref->data[i];
1536         pic->linesize[i] = ref->linesize[i];
1537     }
1538     pic->opaque = ref;
1539     pic->age    = INT_MAX;
1540     pic->type   = FF_BUFFER_TYPE_USER;
1541     pic->reordered_opaque = codec->reordered_opaque;
1542     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1543     else           pic->pkt_pts = AV_NOPTS_VALUE;
1544     return 0;
1545 }
1546
1547 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1548 {
1549     memset(pic->data, 0, sizeof(pic->data));
1550     avfilter_unref_buffer(pic->opaque);
1551 }
1552
1553 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1554 {
1555     AVFilterBufferRef *ref = pic->opaque;
1556
1557     if (pic->data[0] == NULL) {
1558         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1559         return codec->get_buffer(codec, pic);
1560     }
1561
1562     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1563         (codec->pix_fmt != ref->format)) {
1564         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1565         return -1;
1566     }
1567
1568     pic->reordered_opaque = codec->reordered_opaque;
1569     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1570     else           pic->pkt_pts = AV_NOPTS_VALUE;
1571     return 0;
1572 }
1573
1574 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1575 {
1576     FilterPriv *priv = ctx->priv;
1577     AVCodecContext *codec;
1578     if(!opaque) return -1;
1579
1580     priv->is = opaque;
1581     codec    = priv->is->video_st->codec;
1582     codec->opaque = ctx;
1583     if((codec->codec->capabilities & CODEC_CAP_DR1)
1584     ) {
1585         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1586         priv->use_dr1 = 1;
1587         codec->get_buffer     = input_get_buffer;
1588         codec->release_buffer = input_release_buffer;
1589         codec->reget_buffer   = input_reget_buffer;
1590         codec->thread_safe_callbacks = 1;
1591     }
1592
1593     priv->frame = avcodec_alloc_frame();
1594
1595     return 0;
1596 }
1597
1598 static void input_uninit(AVFilterContext *ctx)
1599 {
1600     FilterPriv *priv = ctx->priv;
1601     av_free(priv->frame);
1602 }
1603
1604 static int input_request_frame(AVFilterLink *link)
1605 {
1606     FilterPriv *priv = link->src->priv;
1607     AVFilterBufferRef *picref;
1608     int64_t pts = 0;
1609     AVPacket pkt;
1610     int ret;
1611
1612     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1613         av_free_packet(&pkt);
1614     if (ret < 0)
1615         return -1;
1616
1617     if(priv->use_dr1 && priv->frame->opaque) {
1618         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1619     } else {
1620         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1621         av_image_copy(picref->data, picref->linesize,
1622                       priv->frame->data, priv->frame->linesize,
1623                       picref->format, link->w, link->h);
1624     }
1625     av_free_packet(&pkt);
1626
1627     avfilter_copy_frame_props(picref, priv->frame);
1628     picref->pts = pts;
1629
1630     avfilter_start_frame(link, picref);
1631     avfilter_draw_slice(link, 0, link->h, 1);
1632     avfilter_end_frame(link);
1633
1634     return 0;
1635 }
1636
1637 static int input_query_formats(AVFilterContext *ctx)
1638 {
1639     FilterPriv *priv = ctx->priv;
1640     enum PixelFormat pix_fmts[] = {
1641         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1642     };
1643
1644     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1645     return 0;
1646 }
1647
1648 static int input_config_props(AVFilterLink *link)
1649 {
1650     FilterPriv *priv  = link->src->priv;
1651     AVCodecContext *c = priv->is->video_st->codec;
1652
1653     link->w = c->width;
1654     link->h = c->height;
1655     link->sample_aspect_ratio = priv->is->video_st->sample_aspect_ratio;
1656     link->time_base = priv->is->video_st->time_base;
1657
1658     return 0;
1659 }
1660
1661 static AVFilter input_filter =
1662 {
1663     .name      = "ffplay_input",
1664
1665     .priv_size = sizeof(FilterPriv),
1666
1667     .init      = input_init,
1668     .uninit    = input_uninit,
1669
1670     .query_formats = input_query_formats,
1671
1672     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1673     .outputs   = (AVFilterPad[]) {{ .name = "default",
1674                                     .type = AVMEDIA_TYPE_VIDEO,
1675                                     .request_frame = input_request_frame,
1676                                     .config_props  = input_config_props, },
1677                                   { .name = NULL }},
1678 };
1679
1680 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1681 {
1682     char sws_flags_str[128];
1683     int ret;
1684     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1685     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1686     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1687     graph->scale_sws_opts = av_strdup(sws_flags_str);
1688
1689     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1690                                             NULL, is, graph)) < 0)
1691         return ret;
1692     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1693                                             NULL, pix_fmts, graph)) < 0)
1694         return ret;
1695
1696     if(vfilters) {
1697         AVFilterInOut *outputs = avfilter_inout_alloc();
1698         AVFilterInOut *inputs  = avfilter_inout_alloc();
1699
1700         outputs->name    = av_strdup("in");
1701         outputs->filter_ctx = filt_src;
1702         outputs->pad_idx = 0;
1703         outputs->next    = NULL;
1704
1705         inputs->name    = av_strdup("out");
1706         inputs->filter_ctx = filt_out;
1707         inputs->pad_idx = 0;
1708         inputs->next    = NULL;
1709
1710         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1711             return ret;
1712         av_freep(&vfilters);
1713     } else {
1714         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1715             return ret;
1716     }
1717
1718     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1719         return ret;
1720
1721     is->out_video_filter = filt_out;
1722
1723     return ret;
1724 }
1725
1726 #endif  /* CONFIG_AVFILTER */
1727
1728 static int video_thread(void *arg)
1729 {
1730     VideoState *is = arg;
1731     AVFrame *frame= avcodec_alloc_frame();
1732     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1733     double pts;
1734     int ret;
1735
1736 #if CONFIG_AVFILTER
1737     AVFilterGraph *graph = avfilter_graph_alloc();
1738     AVFilterContext *filt_out = NULL;
1739
1740     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1741         goto the_end;
1742     filt_out = is->out_video_filter;
1743 #endif
1744
1745     for(;;) {
1746 #if !CONFIG_AVFILTER
1747         AVPacket pkt;
1748 #else
1749         AVFilterBufferRef *picref;
1750         AVRational tb = filt_out->inputs[0]->time_base;
1751 #endif
1752         while (is->paused && !is->videoq.abort_request)
1753             SDL_Delay(10);
1754 #if CONFIG_AVFILTER
1755         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1756         if (picref) {
1757             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1758             pts_int = picref->pts;
1759             pos     = picref->pos;
1760             frame->opaque = picref;
1761         }
1762
1763         if (av_cmp_q(tb, is->video_st->time_base)) {
1764             av_unused int64_t pts1 = pts_int;
1765             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1766             av_dlog(NULL, "video_thread(): "
1767                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1768                     tb.num, tb.den, pts1,
1769                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1770         }
1771 #else
1772         ret = get_video_frame(is, frame, &pts_int, &pkt);
1773         pos = pkt.pos;
1774         av_free_packet(&pkt);
1775 #endif
1776
1777         if (ret < 0) goto the_end;
1778
1779         if (!picref)
1780             continue;
1781
1782         pts = pts_int*av_q2d(is->video_st->time_base);
1783
1784         ret = queue_picture(is, frame, pts, pos);
1785
1786         if (ret < 0)
1787             goto the_end;
1788
1789         if (step)
1790             if (cur_stream)
1791                 stream_toggle_pause(cur_stream);
1792     }
1793  the_end:
1794 #if CONFIG_AVFILTER
1795     avfilter_graph_free(&graph);
1796 #endif
1797     av_free(frame);
1798     return 0;
1799 }
1800
1801 static int subtitle_thread(void *arg)
1802 {
1803     VideoState *is = arg;
1804     SubPicture *sp;
1805     AVPacket pkt1, *pkt = &pkt1;
1806     int got_subtitle;
1807     double pts;
1808     int i, j;
1809     int r, g, b, y, u, v, a;
1810
1811     for(;;) {
1812         while (is->paused && !is->subtitleq.abort_request) {
1813             SDL_Delay(10);
1814         }
1815         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1816             break;
1817
1818         if(pkt->data == flush_pkt.data){
1819             avcodec_flush_buffers(is->subtitle_st->codec);
1820             continue;
1821         }
1822         SDL_LockMutex(is->subpq_mutex);
1823         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1824                !is->subtitleq.abort_request) {
1825             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1826         }
1827         SDL_UnlockMutex(is->subpq_mutex);
1828
1829         if (is->subtitleq.abort_request)
1830             return 0;
1831
1832         sp = &is->subpq[is->subpq_windex];
1833
1834        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1835            this packet, if any */
1836         pts = 0;
1837         if (pkt->pts != AV_NOPTS_VALUE)
1838             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1839
1840         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1841                                  &got_subtitle, pkt);
1842
1843         if (got_subtitle && sp->sub.format == 0) {
1844             sp->pts = pts;
1845
1846             for (i = 0; i < sp->sub.num_rects; i++)
1847             {
1848                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1849                 {
1850                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1851                     y = RGB_TO_Y_CCIR(r, g, b);
1852                     u = RGB_TO_U_CCIR(r, g, b, 0);
1853                     v = RGB_TO_V_CCIR(r, g, b, 0);
1854                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1855                 }
1856             }
1857
1858             /* now we can update the picture count */
1859             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1860                 is->subpq_windex = 0;
1861             SDL_LockMutex(is->subpq_mutex);
1862             is->subpq_size++;
1863             SDL_UnlockMutex(is->subpq_mutex);
1864         }
1865         av_free_packet(pkt);
1866     }
1867     return 0;
1868 }
1869
1870 /* copy samples for viewing in editor window */
1871 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1872 {
1873     int size, len;
1874
1875     size = samples_size / sizeof(short);
1876     while (size > 0) {
1877         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1878         if (len > size)
1879             len = size;
1880         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1881         samples += len;
1882         is->sample_array_index += len;
1883         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1884             is->sample_array_index = 0;
1885         size -= len;
1886     }
1887 }
1888
1889 /* return the new audio buffer size (samples can be added or deleted
1890    to get better sync if video or external master clock) */
1891 static int synchronize_audio(VideoState *is, short *samples,
1892                              int samples_size1, double pts)
1893 {
1894     int n, samples_size;
1895     double ref_clock;
1896
1897     n = 2 * is->audio_st->codec->channels;
1898     samples_size = samples_size1;
1899
1900     /* if not master, then we try to remove or add samples to correct the clock */
1901     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1902          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1903         double diff, avg_diff;
1904         int wanted_size, min_size, max_size, nb_samples;
1905
1906         ref_clock = get_master_clock(is);
1907         diff = get_audio_clock(is) - ref_clock;
1908
1909         if (diff < AV_NOSYNC_THRESHOLD) {
1910             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1911             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1912                 /* not enough measures to have a correct estimate */
1913                 is->audio_diff_avg_count++;
1914             } else {
1915                 /* estimate the A-V difference */
1916                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1917
1918                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1919                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1920                     nb_samples = samples_size / n;
1921
1922                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1923                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1924                     if (wanted_size < min_size)
1925                         wanted_size = min_size;
1926                     else if (wanted_size > max_size)
1927                         wanted_size = max_size;
1928
1929                     /* add or remove samples to correction the synchro */
1930                     if (wanted_size < samples_size) {
1931                         /* remove samples */
1932                         samples_size = wanted_size;
1933                     } else if (wanted_size > samples_size) {
1934                         uint8_t *samples_end, *q;
1935                         int nb;
1936
1937                         /* add samples */
1938                         nb = (samples_size - wanted_size);
1939                         samples_end = (uint8_t *)samples + samples_size - n;
1940                         q = samples_end + n;
1941                         while (nb > 0) {
1942                             memcpy(q, samples_end, n);
1943                             q += n;
1944                             nb -= n;
1945                         }
1946                         samples_size = wanted_size;
1947                     }
1948                 }
1949 #if 0
1950                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1951                        diff, avg_diff, samples_size - samples_size1,
1952                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1953 #endif
1954             }
1955         } else {
1956             /* too big difference : may be initial PTS errors, so
1957                reset A-V filter */
1958             is->audio_diff_avg_count = 0;
1959             is->audio_diff_cum = 0;
1960         }
1961     }
1962
1963     return samples_size;
1964 }
1965
1966 /* decode one audio frame and returns its uncompressed size */
1967 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1968 {
1969     AVPacket *pkt_temp = &is->audio_pkt_temp;
1970     AVPacket *pkt = &is->audio_pkt;
1971     AVCodecContext *dec= is->audio_st->codec;
1972     int n, len1, data_size;
1973     double pts;
1974
1975     for(;;) {
1976         /* NOTE: the audio packet can contain several frames */
1977         while (pkt_temp->size > 0) {
1978             data_size = sizeof(is->audio_buf1);
1979             len1 = avcodec_decode_audio3(dec,
1980                                         (int16_t *)is->audio_buf1, &data_size,
1981                                         pkt_temp);
1982             if (len1 < 0) {
1983                 /* if error, we skip the frame */
1984                 pkt_temp->size = 0;
1985                 break;
1986             }
1987
1988             pkt_temp->data += len1;
1989             pkt_temp->size -= len1;
1990             if (data_size <= 0)
1991                 continue;
1992
1993             if (dec->sample_fmt != is->audio_src_fmt) {
1994                 if (is->reformat_ctx)
1995                     av_audio_convert_free(is->reformat_ctx);
1996                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
1997                                                          dec->sample_fmt, 1, NULL, 0);
1998                 if (!is->reformat_ctx) {
1999                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2000                         av_get_sample_fmt_name(dec->sample_fmt),
2001                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2002                         break;
2003                 }
2004                 is->audio_src_fmt= dec->sample_fmt;
2005             }
2006
2007             if (is->reformat_ctx) {
2008                 const void *ibuf[6]= {is->audio_buf1};
2009                 void *obuf[6]= {is->audio_buf2};
2010                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2011                 int ostride[6]= {2};
2012                 int len= data_size/istride[0];
2013                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2014                     printf("av_audio_convert() failed\n");
2015                     break;
2016                 }
2017                 is->audio_buf= is->audio_buf2;
2018                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2019                           remove this legacy cruft */
2020                 data_size= len*2;
2021             }else{
2022                 is->audio_buf= is->audio_buf1;
2023             }
2024
2025             /* if no pts, then compute it */
2026             pts = is->audio_clock;
2027             *pts_ptr = pts;
2028             n = 2 * dec->channels;
2029             is->audio_clock += (double)data_size /
2030                 (double)(n * dec->sample_rate);
2031 #ifdef DEBUG
2032             {
2033                 static double last_clock;
2034                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2035                        is->audio_clock - last_clock,
2036                        is->audio_clock, pts);
2037                 last_clock = is->audio_clock;
2038             }
2039 #endif
2040             return data_size;
2041         }
2042
2043         /* free the current packet */
2044         if (pkt->data)
2045             av_free_packet(pkt);
2046
2047         if (is->paused || is->audioq.abort_request) {
2048             return -1;
2049         }
2050
2051         /* read next packet */
2052         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2053             return -1;
2054         if(pkt->data == flush_pkt.data){
2055             avcodec_flush_buffers(dec);
2056             continue;
2057         }
2058
2059         pkt_temp->data = pkt->data;
2060         pkt_temp->size = pkt->size;
2061
2062         /* if update the audio clock with the pts */
2063         if (pkt->pts != AV_NOPTS_VALUE) {
2064             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2065         }
2066     }
2067 }
2068
2069 /* prepare a new audio buffer */
2070 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2071 {
2072     VideoState *is = opaque;
2073     int audio_size, len1;
2074     double pts;
2075
2076     audio_callback_time = av_gettime();
2077
2078     while (len > 0) {
2079         if (is->audio_buf_index >= is->audio_buf_size) {
2080            audio_size = audio_decode_frame(is, &pts);
2081            if (audio_size < 0) {
2082                 /* if error, just output silence */
2083                is->audio_buf = is->audio_buf1;
2084                is->audio_buf_size = 1024;
2085                memset(is->audio_buf, 0, is->audio_buf_size);
2086            } else {
2087                if (is->show_mode != SHOW_MODE_VIDEO)
2088                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2089                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2090                                               pts);
2091                is->audio_buf_size = audio_size;
2092            }
2093            is->audio_buf_index = 0;
2094         }
2095         len1 = is->audio_buf_size - is->audio_buf_index;
2096         if (len1 > len)
2097             len1 = len;
2098         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2099         len -= len1;
2100         stream += len1;
2101         is->audio_buf_index += len1;
2102     }
2103 }
2104
2105 /* open a given stream. Return 0 if OK */
2106 static int stream_component_open(VideoState *is, int stream_index)
2107 {
2108     AVFormatContext *ic = is->ic;
2109     AVCodecContext *avctx;
2110     AVCodec *codec;
2111     SDL_AudioSpec wanted_spec, spec;
2112     AVDictionary *opts;
2113     AVDictionaryEntry *t = NULL;
2114
2115     if (stream_index < 0 || stream_index >= ic->nb_streams)
2116         return -1;
2117     avctx = ic->streams[stream_index]->codec;
2118
2119     opts = filter_codec_opts(codec_opts, avctx->codec_id, 0);
2120
2121     /* prepare audio output */
2122     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2123         if (avctx->channels > 0) {
2124             avctx->request_channels = FFMIN(2, avctx->channels);
2125         } else {
2126             avctx->request_channels = 2;
2127         }
2128     }
2129
2130     codec = avcodec_find_decoder(avctx->codec_id);
2131     if (!codec)
2132         return -1;
2133
2134     avctx->workaround_bugs = workaround_bugs;
2135     avctx->lowres = lowres;
2136     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2137     avctx->idct_algo= idct;
2138     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2139     avctx->skip_frame= skip_frame;
2140     avctx->skip_idct= skip_idct;
2141     avctx->skip_loop_filter= skip_loop_filter;
2142     avctx->error_recognition= error_recognition;
2143     avctx->error_concealment= error_concealment;
2144     avctx->thread_count= thread_count;
2145
2146     if(codec->capabilities & CODEC_CAP_DR1)
2147         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2148
2149     if (!codec ||
2150         avcodec_open2(avctx, codec, &opts) < 0)
2151         return -1;
2152     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2153         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2154         return AVERROR_OPTION_NOT_FOUND;
2155     }
2156
2157     /* prepare audio output */
2158     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2159         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2160             fprintf(stderr, "Invalid sample rate or channel count\n");
2161             return -1;
2162         }
2163         wanted_spec.freq = avctx->sample_rate;
2164         wanted_spec.format = AUDIO_S16SYS;
2165         wanted_spec.channels = avctx->channels;
2166         wanted_spec.silence = 0;
2167         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2168         wanted_spec.callback = sdl_audio_callback;
2169         wanted_spec.userdata = is;
2170         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2171             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2172             return -1;
2173         }
2174         is->audio_hw_buf_size = spec.size;
2175         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2176     }
2177
2178     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2179     switch(avctx->codec_type) {
2180     case AVMEDIA_TYPE_AUDIO:
2181         is->audio_stream = stream_index;
2182         is->audio_st = ic->streams[stream_index];
2183         is->audio_buf_size = 0;
2184         is->audio_buf_index = 0;
2185
2186         /* init averaging filter */
2187         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2188         is->audio_diff_avg_count = 0;
2189         /* since we do not have a precise anough audio fifo fullness,
2190            we correct audio sync only if larger than this threshold */
2191         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2192
2193         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2194         packet_queue_init(&is->audioq);
2195         SDL_PauseAudio(0);
2196         break;
2197     case AVMEDIA_TYPE_VIDEO:
2198         is->video_stream = stream_index;
2199         is->video_st = ic->streams[stream_index];
2200
2201         packet_queue_init(&is->videoq);
2202         is->video_tid = SDL_CreateThread(video_thread, is);
2203         break;
2204     case AVMEDIA_TYPE_SUBTITLE:
2205         is->subtitle_stream = stream_index;
2206         is->subtitle_st = ic->streams[stream_index];
2207         packet_queue_init(&is->subtitleq);
2208
2209         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2210         break;
2211     default:
2212         break;
2213     }
2214     return 0;
2215 }
2216
2217 static void stream_component_close(VideoState *is, int stream_index)
2218 {
2219     AVFormatContext *ic = is->ic;
2220     AVCodecContext *avctx;
2221
2222     if (stream_index < 0 || stream_index >= ic->nb_streams)
2223         return;
2224     avctx = ic->streams[stream_index]->codec;
2225
2226     switch(avctx->codec_type) {
2227     case AVMEDIA_TYPE_AUDIO:
2228         packet_queue_abort(&is->audioq);
2229
2230         SDL_CloseAudio();
2231
2232         packet_queue_end(&is->audioq);
2233         if (is->reformat_ctx)
2234             av_audio_convert_free(is->reformat_ctx);
2235         is->reformat_ctx = NULL;
2236         break;
2237     case AVMEDIA_TYPE_VIDEO:
2238         packet_queue_abort(&is->videoq);
2239
2240         /* note: we also signal this mutex to make sure we deblock the
2241            video thread in all cases */
2242         SDL_LockMutex(is->pictq_mutex);
2243         SDL_CondSignal(is->pictq_cond);
2244         SDL_UnlockMutex(is->pictq_mutex);
2245
2246         SDL_WaitThread(is->video_tid, NULL);
2247
2248         packet_queue_end(&is->videoq);
2249         break;
2250     case AVMEDIA_TYPE_SUBTITLE:
2251         packet_queue_abort(&is->subtitleq);
2252
2253         /* note: we also signal this mutex to make sure we deblock the
2254            video thread in all cases */
2255         SDL_LockMutex(is->subpq_mutex);
2256         is->subtitle_stream_changed = 1;
2257
2258         SDL_CondSignal(is->subpq_cond);
2259         SDL_UnlockMutex(is->subpq_mutex);
2260
2261         SDL_WaitThread(is->subtitle_tid, NULL);
2262
2263         packet_queue_end(&is->subtitleq);
2264         break;
2265     default:
2266         break;
2267     }
2268
2269     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2270     avcodec_close(avctx);
2271     switch(avctx->codec_type) {
2272     case AVMEDIA_TYPE_AUDIO:
2273         is->audio_st = NULL;
2274         is->audio_stream = -1;
2275         break;
2276     case AVMEDIA_TYPE_VIDEO:
2277         is->video_st = NULL;
2278         is->video_stream = -1;
2279         break;
2280     case AVMEDIA_TYPE_SUBTITLE:
2281         is->subtitle_st = NULL;
2282         is->subtitle_stream = -1;
2283         break;
2284     default:
2285         break;
2286     }
2287 }
2288
2289 /* since we have only one decoding thread, we can use a global
2290    variable instead of a thread local variable */
2291 static VideoState *global_video_state;
2292
2293 static int decode_interrupt_cb(void)
2294 {
2295     return (global_video_state && global_video_state->abort_request);
2296 }
2297
2298 /* this thread gets the stream from the disk or the network */
2299 static int read_thread(void *arg)
2300 {
2301     VideoState *is = arg;
2302     AVFormatContext *ic = NULL;
2303     int err, i, ret;
2304     int st_index[AVMEDIA_TYPE_NB];
2305     AVPacket pkt1, *pkt = &pkt1;
2306     int eof=0;
2307     int pkt_in_play_range = 0;
2308     AVDictionaryEntry *t;
2309     AVDictionary **opts;
2310     int orig_nb_streams;
2311
2312     memset(st_index, -1, sizeof(st_index));
2313     is->video_stream = -1;
2314     is->audio_stream = -1;
2315     is->subtitle_stream = -1;
2316
2317     global_video_state = is;
2318     avio_set_interrupt_cb(decode_interrupt_cb);
2319
2320     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2321     if (err < 0) {
2322         print_error(is->filename, err);
2323         ret = -1;
2324         goto fail;
2325     }
2326     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2327         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2328         ret = AVERROR_OPTION_NOT_FOUND;
2329         goto fail;
2330     }
2331     is->ic = ic;
2332
2333     if(genpts)
2334         ic->flags |= AVFMT_FLAG_GENPTS;
2335
2336     opts = setup_find_stream_info_opts(ic, codec_opts);
2337     orig_nb_streams = ic->nb_streams;
2338
2339     err = avformat_find_stream_info(ic, opts);
2340     if (err < 0) {
2341         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2342         ret = -1;
2343         goto fail;
2344     }
2345     for (i = 0; i < orig_nb_streams; i++)
2346         av_dict_free(&opts[i]);
2347     av_freep(&opts);
2348
2349     if(ic->pb)
2350         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2351
2352     if(seek_by_bytes<0)
2353         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2354
2355     /* if seeking requested, we execute it */
2356     if (start_time != AV_NOPTS_VALUE) {
2357         int64_t timestamp;
2358
2359         timestamp = start_time;
2360         /* add the stream start time */
2361         if (ic->start_time != AV_NOPTS_VALUE)
2362             timestamp += ic->start_time;
2363         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2364         if (ret < 0) {
2365             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2366                     is->filename, (double)timestamp / AV_TIME_BASE);
2367         }
2368     }
2369
2370     for (i = 0; i < ic->nb_streams; i++)
2371         ic->streams[i]->discard = AVDISCARD_ALL;
2372     if (!video_disable)
2373         st_index[AVMEDIA_TYPE_VIDEO] =
2374             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2375                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2376     if (!audio_disable)
2377         st_index[AVMEDIA_TYPE_AUDIO] =
2378             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2379                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2380                                 st_index[AVMEDIA_TYPE_VIDEO],
2381                                 NULL, 0);
2382     if (!video_disable)
2383         st_index[AVMEDIA_TYPE_SUBTITLE] =
2384             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2385                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2386                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2387                                  st_index[AVMEDIA_TYPE_AUDIO] :
2388                                  st_index[AVMEDIA_TYPE_VIDEO]),
2389                                 NULL, 0);
2390     if (show_status) {
2391         av_dump_format(ic, 0, is->filename, 0);
2392     }
2393
2394     is->show_mode = show_mode;
2395
2396     /* open the streams */
2397     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2398         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2399     }
2400
2401     ret=-1;
2402     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2403         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2404     }
2405     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2406     if (is->show_mode == SHOW_MODE_NONE)
2407         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2408
2409     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2410         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2411     }
2412
2413     if (is->video_stream < 0 && is->audio_stream < 0) {
2414         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2415         ret = -1;
2416         goto fail;
2417     }
2418
2419     for(;;) {
2420         if (is->abort_request)
2421             break;
2422         if (is->paused != is->last_paused) {
2423             is->last_paused = is->paused;
2424             if (is->paused)
2425                 is->read_pause_return= av_read_pause(ic);
2426             else
2427                 av_read_play(ic);
2428         }
2429 #if CONFIG_RTSP_DEMUXER
2430         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2431             /* wait 10 ms to avoid trying to get another packet */
2432             /* XXX: horrible */
2433             SDL_Delay(10);
2434             continue;
2435         }
2436 #endif
2437         if (is->seek_req) {
2438             int64_t seek_target= is->seek_pos;
2439             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2440             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2441 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2442 //      of the seek_pos/seek_rel variables
2443
2444             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2445             if (ret < 0) {
2446                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2447             }else{
2448                 if (is->audio_stream >= 0) {
2449                     packet_queue_flush(&is->audioq);
2450                     packet_queue_put(&is->audioq, &flush_pkt);
2451                 }
2452                 if (is->subtitle_stream >= 0) {
2453                     packet_queue_flush(&is->subtitleq);
2454                     packet_queue_put(&is->subtitleq, &flush_pkt);
2455                 }
2456                 if (is->video_stream >= 0) {
2457                     packet_queue_flush(&is->videoq);
2458                     packet_queue_put(&is->videoq, &flush_pkt);
2459                 }
2460             }
2461             is->seek_req = 0;
2462             eof= 0;
2463         }
2464
2465         /* if the queue are full, no need to read more */
2466         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2467             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2468                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2469                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2470             /* wait 10 ms */
2471             SDL_Delay(10);
2472             continue;
2473         }
2474         if(eof) {
2475             if(is->video_stream >= 0){
2476                 av_init_packet(pkt);
2477                 pkt->data=NULL;
2478                 pkt->size=0;
2479                 pkt->stream_index= is->video_stream;
2480                 packet_queue_put(&is->videoq, pkt);
2481             }
2482             SDL_Delay(10);
2483             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2484                 if(loop!=1 && (!loop || --loop)){
2485                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2486                 }else if(autoexit){
2487                     ret=AVERROR_EOF;
2488                     goto fail;
2489                 }
2490             }
2491             eof=0;
2492             continue;
2493         }
2494         ret = av_read_frame(ic, pkt);
2495         if (ret < 0) {
2496             if (ret == AVERROR_EOF || url_feof(ic->pb))
2497                 eof=1;
2498             if (ic->pb && ic->pb->error)
2499                 break;
2500             SDL_Delay(100); /* wait for user event */
2501             continue;
2502         }
2503         /* check if packet is in play range specified by user, then queue, otherwise discard */
2504         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2505                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2506                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2507                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2508                 <= ((double)duration/1000000);
2509         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2510             packet_queue_put(&is->audioq, pkt);
2511         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2512             packet_queue_put(&is->videoq, pkt);
2513         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2514             packet_queue_put(&is->subtitleq, pkt);
2515         } else {
2516             av_free_packet(pkt);
2517         }
2518     }
2519     /* wait until the end */
2520     while (!is->abort_request) {
2521         SDL_Delay(100);
2522     }
2523
2524     ret = 0;
2525  fail:
2526     /* disable interrupting */
2527     global_video_state = NULL;
2528
2529     /* close each stream */
2530     if (is->audio_stream >= 0)
2531         stream_component_close(is, is->audio_stream);
2532     if (is->video_stream >= 0)
2533         stream_component_close(is, is->video_stream);
2534     if (is->subtitle_stream >= 0)
2535         stream_component_close(is, is->subtitle_stream);
2536     if (is->ic) {
2537         av_close_input_file(is->ic);
2538         is->ic = NULL; /* safety */
2539     }
2540     avio_set_interrupt_cb(NULL);
2541
2542     if (ret != 0) {
2543         SDL_Event event;
2544
2545         event.type = FF_QUIT_EVENT;
2546         event.user.data1 = is;
2547         SDL_PushEvent(&event);
2548     }
2549     return 0;
2550 }
2551
2552 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2553 {
2554     VideoState *is;
2555
2556     is = av_mallocz(sizeof(VideoState));
2557     if (!is)
2558         return NULL;
2559     av_strlcpy(is->filename, filename, sizeof(is->filename));
2560     is->iformat = iformat;
2561     is->ytop = 0;
2562     is->xleft = 0;
2563
2564     /* start video display */
2565     is->pictq_mutex = SDL_CreateMutex();
2566     is->pictq_cond = SDL_CreateCond();
2567
2568     is->subpq_mutex = SDL_CreateMutex();
2569     is->subpq_cond = SDL_CreateCond();
2570
2571     is->av_sync_type = av_sync_type;
2572     is->read_tid = SDL_CreateThread(read_thread, is);
2573     if (!is->read_tid) {
2574         av_free(is);
2575         return NULL;
2576     }
2577     return is;
2578 }
2579
2580 static void stream_cycle_channel(VideoState *is, int codec_type)
2581 {
2582     AVFormatContext *ic = is->ic;
2583     int start_index, stream_index;
2584     AVStream *st;
2585
2586     if (codec_type == AVMEDIA_TYPE_VIDEO)
2587         start_index = is->video_stream;
2588     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2589         start_index = is->audio_stream;
2590     else
2591         start_index = is->subtitle_stream;
2592     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2593         return;
2594     stream_index = start_index;
2595     for(;;) {
2596         if (++stream_index >= is->ic->nb_streams)
2597         {
2598             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2599             {
2600                 stream_index = -1;
2601                 goto the_end;
2602             } else
2603                 stream_index = 0;
2604         }
2605         if (stream_index == start_index)
2606             return;
2607         st = ic->streams[stream_index];
2608         if (st->codec->codec_type == codec_type) {
2609             /* check that parameters are OK */
2610             switch(codec_type) {
2611             case AVMEDIA_TYPE_AUDIO:
2612                 if (st->codec->sample_rate != 0 &&
2613                     st->codec->channels != 0)
2614                     goto the_end;
2615                 break;
2616             case AVMEDIA_TYPE_VIDEO:
2617             case AVMEDIA_TYPE_SUBTITLE:
2618                 goto the_end;
2619             default:
2620                 break;
2621             }
2622         }
2623     }
2624  the_end:
2625     stream_component_close(is, start_index);
2626     stream_component_open(is, stream_index);
2627 }
2628
2629
2630 static void toggle_full_screen(void)
2631 {
2632     is_full_screen = !is_full_screen;
2633     video_open(cur_stream);
2634 }
2635
2636 static void toggle_pause(void)
2637 {
2638     if (cur_stream)
2639         stream_toggle_pause(cur_stream);
2640     step = 0;
2641 }
2642
2643 static void step_to_next_frame(void)
2644 {
2645     if (cur_stream) {
2646         /* if the stream is paused unpause it, then step */
2647         if (cur_stream->paused)
2648             stream_toggle_pause(cur_stream);
2649     }
2650     step = 1;
2651 }
2652
2653 static void toggle_audio_display(void)
2654 {
2655     if (cur_stream) {
2656         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2657         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2658         fill_rectangle(screen,
2659                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2660                     bgcolor);
2661         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2662     }
2663 }
2664
2665 /* handle an event sent by the GUI */
2666 static void event_loop(void)
2667 {
2668     SDL_Event event;
2669     double incr, pos, frac;
2670
2671     for(;;) {
2672         double x;
2673         SDL_WaitEvent(&event);
2674         switch(event.type) {
2675         case SDL_KEYDOWN:
2676             if (exit_on_keydown) {
2677                 do_exit();
2678                 break;
2679             }
2680             switch(event.key.keysym.sym) {
2681             case SDLK_ESCAPE:
2682             case SDLK_q:
2683                 do_exit();
2684                 break;
2685             case SDLK_f:
2686                 toggle_full_screen();
2687                 break;
2688             case SDLK_p:
2689             case SDLK_SPACE:
2690                 toggle_pause();
2691                 break;
2692             case SDLK_s: //S: Step to next frame
2693                 step_to_next_frame();
2694                 break;
2695             case SDLK_a:
2696                 if (cur_stream)
2697                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2698                 break;
2699             case SDLK_v:
2700                 if (cur_stream)
2701                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2702                 break;
2703             case SDLK_t:
2704                 if (cur_stream)
2705                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2706                 break;
2707             case SDLK_w:
2708                 toggle_audio_display();
2709                 break;
2710             case SDLK_LEFT:
2711                 incr = -10.0;
2712                 goto do_seek;
2713             case SDLK_RIGHT:
2714                 incr = 10.0;
2715                 goto do_seek;
2716             case SDLK_UP:
2717                 incr = 60.0;
2718                 goto do_seek;
2719             case SDLK_DOWN:
2720                 incr = -60.0;
2721             do_seek:
2722                 if (cur_stream) {
2723                     if (seek_by_bytes) {
2724                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2725                             pos= cur_stream->video_current_pos;
2726                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2727                             pos= cur_stream->audio_pkt.pos;
2728                         }else
2729                             pos = avio_tell(cur_stream->ic->pb);
2730                         if (cur_stream->ic->bit_rate)
2731                             incr *= cur_stream->ic->bit_rate / 8.0;
2732                         else
2733                             incr *= 180000.0;
2734                         pos += incr;
2735                         stream_seek(cur_stream, pos, incr, 1);
2736                     } else {
2737                         pos = get_master_clock(cur_stream);
2738                         pos += incr;
2739                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2740                     }
2741                 }
2742                 break;
2743             default:
2744                 break;
2745             }
2746             break;
2747         case SDL_MOUSEBUTTONDOWN:
2748             if (exit_on_mousedown) {
2749                 do_exit();
2750                 break;
2751             }
2752         case SDL_MOUSEMOTION:
2753             if(event.type ==SDL_MOUSEBUTTONDOWN){
2754                 x= event.button.x;
2755             }else{
2756                 if(event.motion.state != SDL_PRESSED)
2757                     break;
2758                 x= event.motion.x;
2759             }
2760             if (cur_stream) {
2761                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2762                     uint64_t size=  avio_size(cur_stream->ic->pb);
2763                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2764                 }else{
2765                     int64_t ts;
2766                     int ns, hh, mm, ss;
2767                     int tns, thh, tmm, tss;
2768                     tns = cur_stream->ic->duration/1000000LL;
2769                     thh = tns/3600;
2770                     tmm = (tns%3600)/60;
2771                     tss = (tns%60);
2772                     frac = x/cur_stream->width;
2773                     ns = frac*tns;
2774                     hh = ns/3600;
2775                     mm = (ns%3600)/60;
2776                     ss = (ns%60);
2777                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2778                             hh, mm, ss, thh, tmm, tss);
2779                     ts = frac*cur_stream->ic->duration;
2780                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2781                         ts += cur_stream->ic->start_time;
2782                     stream_seek(cur_stream, ts, 0, 0);
2783                 }
2784             }
2785             break;
2786         case SDL_VIDEORESIZE:
2787             if (cur_stream) {
2788                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2789                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2790                 screen_width = cur_stream->width = event.resize.w;
2791                 screen_height= cur_stream->height= event.resize.h;
2792             }
2793             break;
2794         case SDL_QUIT:
2795         case FF_QUIT_EVENT:
2796             do_exit();
2797             break;
2798         case FF_ALLOC_EVENT:
2799             video_open(event.user.data1);
2800             alloc_picture(event.user.data1);
2801             break;
2802         case FF_REFRESH_EVENT:
2803             video_refresh(event.user.data1);
2804             cur_stream->refresh=0;
2805             break;
2806         default:
2807             break;
2808         }
2809     }
2810 }
2811
2812 static int opt_frame_size(const char *opt, const char *arg)
2813 {
2814     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2815     return opt_default("video_size", arg);
2816 }
2817
2818 static int opt_width(const char *opt, const char *arg)
2819 {
2820     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2821     return 0;
2822 }
2823
2824 static int opt_height(const char *opt, const char *arg)
2825 {
2826     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2827     return 0;
2828 }
2829
2830 static int opt_format(const char *opt, const char *arg)
2831 {
2832     file_iformat = av_find_input_format(arg);
2833     if (!file_iformat) {
2834         fprintf(stderr, "Unknown input format: %s\n", arg);
2835         return AVERROR(EINVAL);
2836     }
2837     return 0;
2838 }
2839
2840 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2841 {
2842     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2843     return opt_default("pixel_format", arg);
2844 }
2845
2846 static int opt_sync(const char *opt, const char *arg)
2847 {
2848     if (!strcmp(arg, "audio"))
2849         av_sync_type = AV_SYNC_AUDIO_MASTER;
2850     else if (!strcmp(arg, "video"))
2851         av_sync_type = AV_SYNC_VIDEO_MASTER;
2852     else if (!strcmp(arg, "ext"))
2853         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2854     else {
2855         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2856         exit(1);
2857     }
2858     return 0;
2859 }
2860
2861 static int opt_seek(const char *opt, const char *arg)
2862 {
2863     start_time = parse_time_or_die(opt, arg, 1);
2864     return 0;
2865 }
2866
2867 static int opt_duration(const char *opt, const char *arg)
2868 {
2869     duration = parse_time_or_die(opt, arg, 1);
2870     return 0;
2871 }
2872
2873 static int opt_thread_count(const char *opt, const char *arg)
2874 {
2875     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2876 #if !HAVE_THREADS
2877     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2878 #endif
2879     return 0;
2880 }
2881
2882 static int opt_show_mode(const char *opt, const char *arg)
2883 {
2884     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2885                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2886                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2887                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2888     return 0;
2889 }
2890
2891 static int opt_input_file(const char *opt, const char *filename)
2892 {
2893     if (input_filename) {
2894         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2895                 filename, input_filename);
2896         exit(1);
2897     }
2898     if (!strcmp(filename, "-"))
2899         filename = "pipe:";
2900     input_filename = filename;
2901     return 0;
2902 }
2903
2904 static const OptionDef options[] = {
2905 #include "cmdutils_common_opts.h"
2906     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2907     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2908     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2909     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2910     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2911     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2912     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2913     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2914     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2915     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2916     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2917     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2918     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2919     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2920     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2921     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2922     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2923     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2924     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2925     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2926     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2927     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2928     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2929     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2930     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2931     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2932     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2933     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2934     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2935     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2936     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2937     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2938     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2939     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2940     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2941 #if CONFIG_AVFILTER
2942     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2943 #endif
2944     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2945     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2946     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2947     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2948     { NULL, },
2949 };
2950
2951 static void show_usage(void)
2952 {
2953     printf("Simple media player\n");
2954     printf("usage: ffplay [options] input_file\n");
2955     printf("\n");
2956 }
2957
2958 static int opt_help(const char *opt, const char *arg)
2959 {
2960     av_log_set_callback(log_callback_help);
2961     show_usage();
2962     show_help_options(options, "Main options:\n",
2963                       OPT_EXPERT, 0);
2964     show_help_options(options, "\nAdvanced options:\n",
2965                       OPT_EXPERT, OPT_EXPERT);
2966     printf("\n");
2967     av_opt_show2(avcodec_opts[0], NULL,
2968                  AV_OPT_FLAG_DECODING_PARAM, 0);
2969     printf("\n");
2970     av_opt_show2(avformat_opts, NULL,
2971                  AV_OPT_FLAG_DECODING_PARAM, 0);
2972 #if !CONFIG_AVFILTER
2973     printf("\n");
2974     av_opt_show2(sws_opts, NULL,
2975                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2976 #endif
2977     printf("\nWhile playing:\n"
2978            "q, ESC              quit\n"
2979            "f                   toggle full screen\n"
2980            "p, SPC              pause\n"
2981            "a                   cycle audio channel\n"
2982            "v                   cycle video channel\n"
2983            "t                   cycle subtitle channel\n"
2984            "w                   show audio waves\n"
2985            "s                   activate frame-step mode\n"
2986            "left/right          seek backward/forward 10 seconds\n"
2987            "down/up             seek backward/forward 1 minute\n"
2988            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2989            );
2990     return 0;
2991 }
2992
2993 /* Called from the main */
2994 int main(int argc, char **argv)
2995 {
2996     int flags;
2997
2998     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2999
3000     /* register all codecs, demux and protocols */
3001     avcodec_register_all();
3002 #if CONFIG_AVDEVICE
3003     avdevice_register_all();
3004 #endif
3005 #if CONFIG_AVFILTER
3006     avfilter_register_all();
3007 #endif
3008     av_register_all();
3009
3010     init_opts();
3011
3012     show_banner();
3013
3014     parse_options(argc, argv, options, opt_input_file);
3015
3016     if (!input_filename) {
3017         show_usage();
3018         fprintf(stderr, "An input file must be specified\n");
3019         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3020         exit(1);
3021     }
3022
3023     if (display_disable) {
3024         video_disable = 1;
3025     }
3026     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3027     if (audio_disable)
3028         flags &= ~SDL_INIT_AUDIO;
3029 #if !defined(__MINGW32__) && !defined(__APPLE__)
3030     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3031 #endif
3032     if (SDL_Init (flags)) {
3033         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3034         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3035         exit(1);
3036     }
3037
3038     if (!display_disable) {
3039 #if HAVE_SDL_VIDEO_SIZE
3040         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3041         fs_screen_width = vi->current_w;
3042         fs_screen_height = vi->current_h;
3043 #endif
3044     }
3045
3046     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3047     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3048     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3049
3050     av_init_packet(&flush_pkt);
3051     flush_pkt.data= "FLUSH";
3052
3053     cur_stream = stream_open(input_filename, file_iformat);
3054
3055     event_loop();
3056
3057     /* never returns */
3058
3059     return 0;
3060 }