OSDN Git Service

h264: wait for initial complete frame before outputing frames
[android-x86/external-ffmpeg.git] / avplay.c
1 /*
2  * avplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/time.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavresample/avresample.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avfilter.h"
44 # include "libavfilter/buffersink.h"
45 # include "libavfilter/buffersrc.h"
46 #endif
47
48 #include "cmdutils.h"
49
50 #include <SDL.h>
51 #include <SDL_thread.h>
52
53 #ifdef __MINGW32__
54 #undef main /* We don't want SDL to override our main() */
55 #endif
56
57 #include <assert.h>
58
59 const char program_name[] = "avplay";
60 const int program_birth_year = 2003;
61
62 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
63 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
64 #define MIN_FRAMES 5
65
66 /* SDL audio buffer size, in samples. Should be small to have precise
67    A/V sync as SDL does not have hardware buffer fullness info. */
68 #define SDL_AUDIO_BUFFER_SIZE 1024
69
70 /* no AV sync correction is done if below the AV sync threshold */
71 #define AV_SYNC_THRESHOLD 0.01
72 /* no AV correction is done if too big error */
73 #define AV_NOSYNC_THRESHOLD 10.0
74
75 #define FRAME_SKIP_FACTOR 0.05
76
77 /* maximum audio speed change to get correct sync */
78 #define SAMPLE_CORRECTION_PERCENT_MAX 10
79
80 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
81 #define AUDIO_DIFF_AVG_NB   20
82
83 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
84 #define SAMPLE_ARRAY_SIZE (2 * 65536)
85
86 static int64_t sws_flags = SWS_BICUBIC;
87
88 typedef struct PacketQueue {
89     AVPacketList *first_pkt, *last_pkt;
90     int nb_packets;
91     int size;
92     int abort_request;
93     SDL_mutex *mutex;
94     SDL_cond *cond;
95 } PacketQueue;
96
97 #define VIDEO_PICTURE_QUEUE_SIZE 2
98 #define SUBPICTURE_QUEUE_SIZE 4
99
100 typedef struct VideoPicture {
101     double pts;             // presentation timestamp for this picture
102     double target_clock;    // av_gettime() time at which this should be displayed ideally
103     int64_t pos;            // byte position in file
104     SDL_Overlay *bmp;
105     int width, height; /* source height & width */
106     int allocated;
107     int reallocate;
108     enum AVPixelFormat pix_fmt;
109
110     AVRational sar;
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
155     uint8_t *audio_buf;
156     uint8_t *audio_buf1;
157     unsigned int audio_buf_size; /* in bytes */
158     int audio_buf_index; /* in bytes */
159     AVPacket audio_pkt_temp;
160     AVPacket audio_pkt;
161     enum AVSampleFormat sdl_sample_fmt;
162     uint64_t sdl_channel_layout;
163     int sdl_channels;
164     int sdl_sample_rate;
165     enum AVSampleFormat resample_sample_fmt;
166     uint64_t resample_channel_layout;
167     int resample_sample_rate;
168     AVAudioResampleContext *avr;
169     AVFrame *frame;
170
171     int show_audio; /* if true, display audio samples */
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;             // pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;       // current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift; // video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;      // current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     //    QETimer *video_timer;
209     char filename[1024];
210     int width, height, xleft, ytop;
211
212     PtsCorrectionContext pts_ctx;
213
214 #if CONFIG_AVFILTER
215     AVFilterContext *in_video_filter;   // the first filter in the video chain
216     AVFilterContext *out_video_filter;  // the last filter in the video chain
217 #endif
218
219     float skip_frames;
220     float skip_frames_index;
221     int refresh;
222 } VideoState;
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width  = 0;
231 static int screen_height = 0;
232 static int audio_disable;
233 static int video_disable;
234 static int wanted_stream[AVMEDIA_TYPE_NB] = {
235     [AVMEDIA_TYPE_AUDIO]    = -1,
236     [AVMEDIA_TYPE_VIDEO]    = -1,
237     [AVMEDIA_TYPE_SUBTITLE] = -1,
238 };
239 static int seek_by_bytes = -1;
240 static int display_disable;
241 static int show_status = 1;
242 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
243 static int64_t start_time = AV_NOPTS_VALUE;
244 static int64_t duration = AV_NOPTS_VALUE;
245 static int debug_mv = 0;
246 static int step = 0;
247 static int workaround_bugs = 1;
248 static int fast = 0;
249 static int genpts = 0;
250 static int idct = FF_IDCT_AUTO;
251 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
253 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts = -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop = 1;
260 static int framedrop = 1;
261 static int infinite_buffer = 0;
262
263 static int rdftspeed = 20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
282
283 /* packet queue handling */
284 static void packet_queue_init(PacketQueue *q)
285 {
286     memset(q, 0, sizeof(PacketQueue));
287     q->mutex = SDL_CreateMutex();
288     q->cond = SDL_CreateCond();
289     packet_queue_put(q, &flush_pkt);
290 }
291
292 static void packet_queue_flush(PacketQueue *q)
293 {
294     AVPacketList *pkt, *pkt1;
295
296     SDL_LockMutex(q->mutex);
297     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
298         pkt1 = pkt->next;
299         av_free_packet(&pkt->pkt);
300         av_freep(&pkt);
301     }
302     q->last_pkt = NULL;
303     q->first_pkt = NULL;
304     q->nb_packets = 0;
305     q->size = 0;
306     SDL_UnlockMutex(q->mutex);
307 }
308
309 static void packet_queue_end(PacketQueue *q)
310 {
311     packet_queue_flush(q);
312     SDL_DestroyMutex(q->mutex);
313     SDL_DestroyCond(q->cond);
314 }
315
316 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
317 {
318     AVPacketList *pkt1;
319
320     /* duplicate the packet */
321     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
322         return -1;
323
324     pkt1 = av_malloc(sizeof(AVPacketList));
325     if (!pkt1)
326         return -1;
327     pkt1->pkt = *pkt;
328     pkt1->next = NULL;
329
330
331     SDL_LockMutex(q->mutex);
332
333     if (!q->last_pkt)
334
335         q->first_pkt = pkt1;
336     else
337         q->last_pkt->next = pkt1;
338     q->last_pkt = pkt1;
339     q->nb_packets++;
340     q->size += pkt1->pkt.size + sizeof(*pkt1);
341     /* XXX: should duplicate packet data in DV case */
342     SDL_CondSignal(q->cond);
343
344     SDL_UnlockMutex(q->mutex);
345     return 0;
346 }
347
348 static void packet_queue_abort(PacketQueue *q)
349 {
350     SDL_LockMutex(q->mutex);
351
352     q->abort_request = 1;
353
354     SDL_CondSignal(q->cond);
355
356     SDL_UnlockMutex(q->mutex);
357 }
358
359 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
360 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
361 {
362     AVPacketList *pkt1;
363     int ret;
364
365     SDL_LockMutex(q->mutex);
366
367     for (;;) {
368         if (q->abort_request) {
369             ret = -1;
370             break;
371         }
372
373         pkt1 = q->first_pkt;
374         if (pkt1) {
375             q->first_pkt = pkt1->next;
376             if (!q->first_pkt)
377                 q->last_pkt = NULL;
378             q->nb_packets--;
379             q->size -= pkt1->pkt.size + sizeof(*pkt1);
380             *pkt = pkt1->pkt;
381             av_free(pkt1);
382             ret = 1;
383             break;
384         } else if (!block) {
385             ret = 0;
386             break;
387         } else {
388             SDL_CondWait(q->cond, q->mutex);
389         }
390     }
391     SDL_UnlockMutex(q->mutex);
392     return ret;
393 }
394
395 static inline void fill_rectangle(SDL_Surface *screen,
396                                   int x, int y, int w, int h, int color)
397 {
398     SDL_Rect rect;
399     rect.x = x;
400     rect.y = y;
401     rect.w = w;
402     rect.h = h;
403     SDL_FillRect(screen, &rect, color);
404 }
405
406 #define ALPHA_BLEND(a, oldp, newp, s)\
407 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408
409 #define RGBA_IN(r, g, b, a, s)\
410 {\
411     unsigned int v = ((const uint32_t *)(s))[0];\
412     a = (v >> 24) & 0xff;\
413     r = (v >> 16) & 0xff;\
414     g = (v >> 8) & 0xff;\
415     b = v & 0xff;\
416 }
417
418 #define YUVA_IN(y, u, v, a, s, pal)\
419 {\
420     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421     a = (val >> 24) & 0xff;\
422     y = (val >> 16) & 0xff;\
423     u = (val >> 8) & 0xff;\
424     v = val & 0xff;\
425 }
426
427 #define YUVA_OUT(d, y, u, v, a)\
428 {\
429     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430 }
431
432
433 #define BPP 1
434
435 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436 {
437     int wrap, wrap3, width2, skip2;
438     int y, u, v, a, u1, v1, a1, w, h;
439     uint8_t *lum, *cb, *cr;
440     const uint8_t *p;
441     const uint32_t *pal;
442     int dstx, dsty, dstw, dsth;
443
444     dstw = av_clip(rect->w, 0, imgw);
445     dsth = av_clip(rect->h, 0, imgh);
446     dstx = av_clip(rect->x, 0, imgw - dstw);
447     dsty = av_clip(rect->y, 0, imgh - dsth);
448     lum = dst->data[0] + dsty * dst->linesize[0];
449     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451
452     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
453     skip2 = dstx >> 1;
454     wrap = dst->linesize[0];
455     wrap3 = rect->pict.linesize[0];
456     p = rect->pict.data[0];
457     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
458
459     if (dsty & 1) {
460         lum += dstx;
461         cb += skip2;
462         cr += skip2;
463
464         if (dstx & 1) {
465             YUVA_IN(y, u, v, a, p, pal);
466             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469             cb++;
470             cr++;
471             lum++;
472             p += BPP;
473         }
474         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
475             YUVA_IN(y, u, v, a, p, pal);
476             u1 = u;
477             v1 = v;
478             a1 = a;
479             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
481             YUVA_IN(y, u, v, a, p + BPP, pal);
482             u1 += u;
483             v1 += v;
484             a1 += a;
485             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488             cb++;
489             cr++;
490             p += 2 * BPP;
491             lum += 2;
492         }
493         if (w) {
494             YUVA_IN(y, u, v, a, p, pal);
495             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498             p++;
499             lum++;
500         }
501         p += wrap3 - dstw * BPP;
502         lum += wrap - dstw - dstx;
503         cb += dst->linesize[1] - width2 - skip2;
504         cr += dst->linesize[2] - width2 - skip2;
505     }
506     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
507         lum += dstx;
508         cb += skip2;
509         cr += skip2;
510
511         if (dstx & 1) {
512             YUVA_IN(y, u, v, a, p, pal);
513             u1 = u;
514             v1 = v;
515             a1 = a;
516             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517             p += wrap3;
518             lum += wrap;
519             YUVA_IN(y, u, v, a, p, pal);
520             u1 += u;
521             v1 += v;
522             a1 += a;
523             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
525             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
526             cb++;
527             cr++;
528             p += -wrap3 + BPP;
529             lum += -wrap + 1;
530         }
531         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
532             YUVA_IN(y, u, v, a, p, pal);
533             u1 = u;
534             v1 = v;
535             a1 = a;
536             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
538             YUVA_IN(y, u, v, a, p + BPP, pal);
539             u1 += u;
540             v1 += v;
541             a1 += a;
542             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
543             p += wrap3;
544             lum += wrap;
545
546             YUVA_IN(y, u, v, a, p, pal);
547             u1 += u;
548             v1 += v;
549             a1 += a;
550             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
551
552             YUVA_IN(y, u, v, a, p + BPP, pal);
553             u1 += u;
554             v1 += v;
555             a1 += a;
556             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
557
558             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
559             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
560
561             cb++;
562             cr++;
563             p += -wrap3 + 2 * BPP;
564             lum += -wrap + 2;
565         }
566         if (w) {
567             YUVA_IN(y, u, v, a, p, pal);
568             u1 = u;
569             v1 = v;
570             a1 = a;
571             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572             p += wrap3;
573             lum += wrap;
574             YUVA_IN(y, u, v, a, p, pal);
575             u1 += u;
576             v1 += v;
577             a1 += a;
578             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
579             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
580             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
581             cb++;
582             cr++;
583             p += -wrap3 + BPP;
584             lum += -wrap + 1;
585         }
586         p += wrap3 + (wrap3 - dstw * BPP);
587         lum += wrap + (wrap - dstw - dstx);
588         cb += dst->linesize[1] - width2 - skip2;
589         cr += dst->linesize[2] - width2 - skip2;
590     }
591     /* handle odd height */
592     if (h) {
593         lum += dstx;
594         cb += skip2;
595         cr += skip2;
596
597         if (dstx & 1) {
598             YUVA_IN(y, u, v, a, p, pal);
599             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
601             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
602             cb++;
603             cr++;
604             lum++;
605             p += BPP;
606         }
607         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
608             YUVA_IN(y, u, v, a, p, pal);
609             u1 = u;
610             v1 = v;
611             a1 = a;
612             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
614             YUVA_IN(y, u, v, a, p + BPP, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
621             cb++;
622             cr++;
623             p += 2 * BPP;
624             lum += 2;
625         }
626         if (w) {
627             YUVA_IN(y, u, v, a, p, pal);
628             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631         }
632     }
633 }
634
635 static void free_subpicture(SubPicture *sp)
636 {
637     avsubtitle_free(&sp->sub);
638 }
639
640 static void video_image_display(VideoState *is)
641 {
642     VideoPicture *vp;
643     SubPicture *sp;
644     AVPicture pict;
645     float aspect_ratio;
646     int width, height, x, y;
647     SDL_Rect rect;
648     int i;
649
650     vp = &is->pictq[is->pictq_rindex];
651     if (vp->bmp) {
652 #if CONFIG_AVFILTER
653          if (!vp->sar.num)
654              aspect_ratio = 0;
655          else
656              aspect_ratio = av_q2d(vp->sar);
657 #else
658
659         /* XXX: use variable in the frame */
660         if (is->video_st->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
662         else if (is->video_st->codec->sample_aspect_ratio.num)
663             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
664         else
665             aspect_ratio = 0;
666 #endif
667         if (aspect_ratio <= 0.0)
668             aspect_ratio = 1.0;
669         aspect_ratio *= (float)vp->width / (float)vp->height;
670
671         if (is->subtitle_st)
672         {
673             if (is->subpq_size > 0)
674             {
675                 sp = &is->subpq[is->subpq_rindex];
676
677                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
678                 {
679                     SDL_LockYUVOverlay (vp->bmp);
680
681                     pict.data[0] = vp->bmp->pixels[0];
682                     pict.data[1] = vp->bmp->pixels[2];
683                     pict.data[2] = vp->bmp->pixels[1];
684
685                     pict.linesize[0] = vp->bmp->pitches[0];
686                     pict.linesize[1] = vp->bmp->pitches[2];
687                     pict.linesize[2] = vp->bmp->pitches[1];
688
689                     for (i = 0; i < sp->sub.num_rects; i++)
690                         blend_subrect(&pict, sp->sub.rects[i],
691                                       vp->bmp->w, vp->bmp->h);
692
693                     SDL_UnlockYUVOverlay (vp->bmp);
694                 }
695             }
696         }
697
698
699         /* XXX: we suppose the screen has a 1.0 pixel ratio */
700         height = is->height;
701         width = ((int)rint(height * aspect_ratio)) & ~1;
702         if (width > is->width) {
703             width = is->width;
704             height = ((int)rint(width / aspect_ratio)) & ~1;
705         }
706         x = (is->width - width) / 2;
707         y = (is->height - height) / 2;
708         is->no_background = 0;
709         rect.x = is->xleft + x;
710         rect.y = is->ytop  + y;
711         rect.w = width;
712         rect.h = height;
713         SDL_DisplayYUVOverlay(vp->bmp, &rect);
714     }
715 }
716
717 /* get the current audio output buffer size, in samples. With SDL, we
718    cannot have a precise information */
719 static int audio_write_get_buf_size(VideoState *is)
720 {
721     return is->audio_buf_size - is->audio_buf_index;
722 }
723
724 static inline int compute_mod(int a, int b)
725 {
726     a = a % b;
727     if (a >= 0)
728         return a;
729     else
730         return a + b;
731 }
732
733 static void video_audio_display(VideoState *s)
734 {
735     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
736     int ch, channels, h, h2, bgcolor, fgcolor;
737     int16_t time_diff;
738     int rdft_bits, nb_freq;
739
740     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
741         ;
742     nb_freq = 1 << (rdft_bits - 1);
743
744     /* compute display index : center on currently output samples */
745     channels = s->sdl_channels;
746     nb_display_channels = channels;
747     if (!s->paused) {
748         int data_used = s->show_audio == 1 ? s->width : (2 * nb_freq);
749         n = 2 * channels;
750         delay = audio_write_get_buf_size(s);
751         delay /= n;
752
753         /* to be more precise, we take into account the time spent since
754            the last buffer computation */
755         if (audio_callback_time) {
756             time_diff = av_gettime() - audio_callback_time;
757             delay -= (time_diff * s->sdl_sample_rate) / 1000000;
758         }
759
760         delay += 2 * data_used;
761         if (delay < data_used)
762             delay = data_used;
763
764         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
765         if (s->show_audio == 1) {
766             h = INT_MIN;
767             for (i = 0; i < 1000; i += channels) {
768                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
769                 int a = s->sample_array[idx];
770                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
771                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
772                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
773                 int score = a - d;
774                 if (h < score && (b ^ c) < 0) {
775                     h = score;
776                     i_start = idx;
777                 }
778             }
779         }
780
781         s->last_i_start = i_start;
782     } else {
783         i_start = s->last_i_start;
784     }
785
786     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
787     if (s->show_audio == 1) {
788         fill_rectangle(screen,
789                        s->xleft, s->ytop, s->width, s->height,
790                        bgcolor);
791
792         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
793
794         /* total height for one channel */
795         h = s->height / nb_display_channels;
796         /* graph height / 2 */
797         h2 = (h * 9) / 20;
798         for (ch = 0; ch < nb_display_channels; ch++) {
799             i = i_start + ch;
800             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
801             for (x = 0; x < s->width; x++) {
802                 y = (s->sample_array[i] * h2) >> 15;
803                 if (y < 0) {
804                     y = -y;
805                     ys = y1 - y;
806                 } else {
807                     ys = y1;
808                 }
809                 fill_rectangle(screen,
810                                s->xleft + x, ys, 1, y,
811                                fgcolor);
812                 i += channels;
813                 if (i >= SAMPLE_ARRAY_SIZE)
814                     i -= SAMPLE_ARRAY_SIZE;
815             }
816         }
817
818         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
819
820         for (ch = 1; ch < nb_display_channels; ch++) {
821             y = s->ytop + ch * h;
822             fill_rectangle(screen,
823                            s->xleft, y, s->width, 1,
824                            fgcolor);
825         }
826         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
827     } else {
828         nb_display_channels= FFMIN(nb_display_channels, 2);
829         if (rdft_bits != s->rdft_bits) {
830             av_rdft_end(s->rdft);
831             av_free(s->rdft_data);
832             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
833             s->rdft_bits = rdft_bits;
834             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
835         }
836         {
837             FFTSample *data[2];
838             for (ch = 0; ch < nb_display_channels; ch++) {
839                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
840                 i = i_start + ch;
841                 for (x = 0; x < 2 * nb_freq; x++) {
842                     double w = (x-nb_freq) * (1.0 / nb_freq);
843                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
844                     i += channels;
845                     if (i >= SAMPLE_ARRAY_SIZE)
846                         i -= SAMPLE_ARRAY_SIZE;
847                 }
848                 av_rdft_calc(s->rdft, data[ch]);
849             }
850             /* Least efficient way to do this, we should of course
851              * directly access it but it is more than fast enough. */
852             for (y = 0; y < s->height; y++) {
853                 double w = 1 / sqrt(nb_freq);
854                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
855                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
856                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
857                 a = FFMIN(a, 255);
858                 b = FFMIN(b, 255);
859                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
860
861                 fill_rectangle(screen,
862                             s->xpos, s->height-y, 1, 1,
863                             fgcolor);
864             }
865         }
866         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
867         s->xpos++;
868         if (s->xpos >= s->width)
869             s->xpos= s->xleft;
870     }
871 }
872
873 static int video_open(VideoState *is)
874 {
875     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
876     int w,h;
877
878     if (is_full_screen) flags |= SDL_FULLSCREEN;
879     else                flags |= SDL_RESIZABLE;
880
881     if (is_full_screen && fs_screen_width) {
882         w = fs_screen_width;
883         h = fs_screen_height;
884     } else if (!is_full_screen && screen_width) {
885         w = screen_width;
886         h = screen_height;
887 #if CONFIG_AVFILTER
888     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
889         w = is->out_video_filter->inputs[0]->w;
890         h = is->out_video_filter->inputs[0]->h;
891 #else
892     } else if (is->video_st && is->video_st->codec->width) {
893         w = is->video_st->codec->width;
894         h = is->video_st->codec->height;
895 #endif
896     } else {
897         w = 640;
898         h = 480;
899     }
900     if (screen && is->width == screen->w && screen->w == w
901        && is->height== screen->h && screen->h == h)
902         return 0;
903
904 #if defined(__APPLE__) && !SDL_VERSION_ATLEAST(1, 2, 14)
905     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X and older SDL */
906     screen = SDL_SetVideoMode(w, h, 24, flags);
907 #else
908     screen = SDL_SetVideoMode(w, h, 0, flags);
909 #endif
910     if (!screen) {
911         fprintf(stderr, "SDL: could not set video mode - exiting\n");
912         return -1;
913     }
914     if (!window_title)
915         window_title = input_filename;
916     SDL_WM_SetCaption(window_title, window_title);
917
918     is->width  = screen->w;
919     is->height = screen->h;
920
921     return 0;
922 }
923
924 /* display the current picture, if any */
925 static void video_display(VideoState *is)
926 {
927     if (!screen)
928         video_open(cur_stream);
929     if (is->audio_st && is->show_audio)
930         video_audio_display(is);
931     else if (is->video_st)
932         video_image_display(is);
933 }
934
935 static int refresh_thread(void *opaque)
936 {
937     VideoState *is= opaque;
938     while (!is->abort_request) {
939         SDL_Event event;
940         event.type = FF_REFRESH_EVENT;
941         event.user.data1 = opaque;
942         if (!is->refresh) {
943             is->refresh = 1;
944             SDL_PushEvent(&event);
945         }
946         av_usleep(is->audio_st && is->show_audio ? rdftspeed * 1000 : 5000); // FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
947     }
948     return 0;
949 }
950
951 /* get the current audio clock value */
952 static double get_audio_clock(VideoState *is)
953 {
954     double pts;
955     int hw_buf_size, bytes_per_sec;
956     pts = is->audio_clock;
957     hw_buf_size = audio_write_get_buf_size(is);
958     bytes_per_sec = 0;
959     if (is->audio_st) {
960         bytes_per_sec = is->sdl_sample_rate * is->sdl_channels *
961                         av_get_bytes_per_sample(is->sdl_sample_fmt);
962     }
963     if (bytes_per_sec)
964         pts -= (double)hw_buf_size / bytes_per_sec;
965     return pts;
966 }
967
968 /* get the current video clock value */
969 static double get_video_clock(VideoState *is)
970 {
971     if (is->paused) {
972         return is->video_current_pts;
973     } else {
974         return is->video_current_pts_drift + av_gettime() / 1000000.0;
975     }
976 }
977
978 /* get the current external clock value */
979 static double get_external_clock(VideoState *is)
980 {
981     int64_t ti;
982     ti = av_gettime();
983     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
984 }
985
986 /* get the current master clock value */
987 static double get_master_clock(VideoState *is)
988 {
989     double val;
990
991     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
992         if (is->video_st)
993             val = get_video_clock(is);
994         else
995             val = get_audio_clock(is);
996     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
997         if (is->audio_st)
998             val = get_audio_clock(is);
999         else
1000             val = get_video_clock(is);
1001     } else {
1002         val = get_external_clock(is);
1003     }
1004     return val;
1005 }
1006
1007 /* seek in the stream */
1008 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1009 {
1010     if (!is->seek_req) {
1011         is->seek_pos = pos;
1012         is->seek_rel = rel;
1013         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1014         if (seek_by_bytes)
1015             is->seek_flags |= AVSEEK_FLAG_BYTE;
1016         is->seek_req = 1;
1017     }
1018 }
1019
1020 /* pause or resume the video */
1021 static void stream_pause(VideoState *is)
1022 {
1023     if (is->paused) {
1024         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1025         if (is->read_pause_return != AVERROR(ENOSYS)) {
1026             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1027         }
1028         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1029     }
1030     is->paused = !is->paused;
1031 }
1032
1033 static double compute_target_time(double frame_current_pts, VideoState *is)
1034 {
1035     double delay, sync_threshold, diff;
1036
1037     /* compute nominal delay */
1038     delay = frame_current_pts - is->frame_last_pts;
1039     if (delay <= 0 || delay >= 10.0) {
1040         /* if incorrect delay, use previous one */
1041         delay = is->frame_last_delay;
1042     } else {
1043         is->frame_last_delay = delay;
1044     }
1045     is->frame_last_pts = frame_current_pts;
1046
1047     /* update delay to follow master synchronisation source */
1048     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1049          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1050         /* if video is slave, we try to correct big delays by
1051            duplicating or deleting a frame */
1052         diff = get_video_clock(is) - get_master_clock(is);
1053
1054         /* skip or repeat frame. We take into account the
1055            delay to compute the threshold. I still don't know
1056            if it is the best guess */
1057         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1058         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1059             if (diff <= -sync_threshold)
1060                 delay = 0;
1061             else if (diff >= sync_threshold)
1062                 delay = 2 * delay;
1063         }
1064     }
1065     is->frame_timer += delay;
1066
1067     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1068             delay, frame_current_pts, -diff);
1069
1070     return is->frame_timer;
1071 }
1072
1073 /* called to display each frame */
1074 static void video_refresh_timer(void *opaque)
1075 {
1076     VideoState *is = opaque;
1077     VideoPicture *vp;
1078
1079     SubPicture *sp, *sp2;
1080
1081     if (is->video_st) {
1082 retry:
1083         if (is->pictq_size == 0) {
1084             // nothing to do, no picture to display in the que
1085         } else {
1086             double time = av_gettime() / 1000000.0;
1087             double next_target;
1088             /* dequeue the picture */
1089             vp = &is->pictq[is->pictq_rindex];
1090
1091             if (time < vp->target_clock)
1092                 return;
1093             /* update current video pts */
1094             is->video_current_pts = vp->pts;
1095             is->video_current_pts_drift = is->video_current_pts - time;
1096             is->video_current_pos = vp->pos;
1097             if (is->pictq_size > 1) {
1098                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
1099                 assert(nextvp->target_clock >= vp->target_clock);
1100                 next_target= nextvp->target_clock;
1101             } else {
1102                 next_target = vp->target_clock + is->video_clock - vp->pts; // FIXME pass durations cleanly
1103             }
1104             if (framedrop && time > next_target) {
1105                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1106                 if (is->pictq_size > 1 || time > next_target + 0.5) {
1107                     /* update queue size and signal for next picture */
1108                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109                         is->pictq_rindex = 0;
1110
1111                     SDL_LockMutex(is->pictq_mutex);
1112                     is->pictq_size--;
1113                     SDL_CondSignal(is->pictq_cond);
1114                     SDL_UnlockMutex(is->pictq_mutex);
1115                     goto retry;
1116                 }
1117             }
1118
1119             if (is->subtitle_st) {
1120                 if (is->subtitle_stream_changed) {
1121                     SDL_LockMutex(is->subpq_mutex);
1122
1123                     while (is->subpq_size) {
1124                         free_subpicture(&is->subpq[is->subpq_rindex]);
1125
1126                         /* update queue size and signal for next picture */
1127                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1128                             is->subpq_rindex = 0;
1129
1130                         is->subpq_size--;
1131                     }
1132                     is->subtitle_stream_changed = 0;
1133
1134                     SDL_CondSignal(is->subpq_cond);
1135                     SDL_UnlockMutex(is->subpq_mutex);
1136                 } else {
1137                     if (is->subpq_size > 0) {
1138                         sp = &is->subpq[is->subpq_rindex];
1139
1140                         if (is->subpq_size > 1)
1141                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1142                         else
1143                             sp2 = NULL;
1144
1145                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1146                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1147                         {
1148                             free_subpicture(sp);
1149
1150                             /* update queue size and signal for next picture */
1151                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1152                                 is->subpq_rindex = 0;
1153
1154                             SDL_LockMutex(is->subpq_mutex);
1155                             is->subpq_size--;
1156                             SDL_CondSignal(is->subpq_cond);
1157                             SDL_UnlockMutex(is->subpq_mutex);
1158                         }
1159                     }
1160                 }
1161             }
1162
1163             /* display picture */
1164             if (!display_disable)
1165                 video_display(is);
1166
1167             /* update queue size and signal for next picture */
1168             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1169                 is->pictq_rindex = 0;
1170
1171             SDL_LockMutex(is->pictq_mutex);
1172             is->pictq_size--;
1173             SDL_CondSignal(is->pictq_cond);
1174             SDL_UnlockMutex(is->pictq_mutex);
1175         }
1176     } else if (is->audio_st) {
1177         /* draw the next audio frame */
1178
1179         /* if only audio stream, then display the audio bars (better
1180            than nothing, just to test the implementation */
1181
1182         /* display picture */
1183         if (!display_disable)
1184             video_display(is);
1185     }
1186     if (show_status) {
1187         static int64_t last_time;
1188         int64_t cur_time;
1189         int aqsize, vqsize, sqsize;
1190         double av_diff;
1191
1192         cur_time = av_gettime();
1193         if (!last_time || (cur_time - last_time) >= 30000) {
1194             aqsize = 0;
1195             vqsize = 0;
1196             sqsize = 0;
1197             if (is->audio_st)
1198                 aqsize = is->audioq.size;
1199             if (is->video_st)
1200                 vqsize = is->videoq.size;
1201             if (is->subtitle_st)
1202                 sqsize = is->subtitleq.size;
1203             av_diff = 0;
1204             if (is->audio_st && is->video_st)
1205                 av_diff = get_audio_clock(is) - get_video_clock(is);
1206             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1207                    get_master_clock(is), av_diff, FFMAX(is->skip_frames - 1, 0), aqsize / 1024,
1208                    vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1209             fflush(stdout);
1210             last_time = cur_time;
1211         }
1212     }
1213 }
1214
1215 static void stream_close(VideoState *is)
1216 {
1217     VideoPicture *vp;
1218     int i;
1219     /* XXX: use a special url_shutdown call to abort parse cleanly */
1220     is->abort_request = 1;
1221     SDL_WaitThread(is->parse_tid, NULL);
1222     SDL_WaitThread(is->refresh_tid, NULL);
1223
1224     /* free all pictures */
1225     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1226         vp = &is->pictq[i];
1227         if (vp->bmp) {
1228             SDL_FreeYUVOverlay(vp->bmp);
1229             vp->bmp = NULL;
1230         }
1231     }
1232     SDL_DestroyMutex(is->pictq_mutex);
1233     SDL_DestroyCond(is->pictq_cond);
1234     SDL_DestroyMutex(is->subpq_mutex);
1235     SDL_DestroyCond(is->subpq_cond);
1236 #if !CONFIG_AVFILTER
1237     if (is->img_convert_ctx)
1238         sws_freeContext(is->img_convert_ctx);
1239 #endif
1240     av_free(is);
1241 }
1242
1243 static void do_exit(void)
1244 {
1245     if (cur_stream) {
1246         stream_close(cur_stream);
1247         cur_stream = NULL;
1248     }
1249     uninit_opts();
1250     avformat_network_deinit();
1251     if (show_status)
1252         printf("\n");
1253     SDL_Quit();
1254     av_log(NULL, AV_LOG_QUIET, "");
1255     exit(0);
1256 }
1257
1258 /* allocate a picture (needs to do that in main thread to avoid
1259    potential locking problems */
1260 static void alloc_picture(void *opaque)
1261 {
1262     VideoState *is = opaque;
1263     VideoPicture *vp;
1264
1265     vp = &is->pictq[is->pictq_windex];
1266
1267     if (vp->bmp)
1268         SDL_FreeYUVOverlay(vp->bmp);
1269
1270 #if CONFIG_AVFILTER
1271     vp->width   = is->out_video_filter->inputs[0]->w;
1272     vp->height  = is->out_video_filter->inputs[0]->h;
1273     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1274 #else
1275     vp->width   = is->video_st->codec->width;
1276     vp->height  = is->video_st->codec->height;
1277     vp->pix_fmt = is->video_st->codec->pix_fmt;
1278 #endif
1279
1280     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1281                                    SDL_YV12_OVERLAY,
1282                                    screen);
1283     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1284         /* SDL allocates a buffer smaller than requested if the video
1285          * overlay hardware is unable to support the requested size. */
1286         fprintf(stderr, "Error: the video system does not support an image\n"
1287                         "size of %dx%d pixels. Try using -vf \"scale=w:h\"\n"
1288                         "to reduce the image size.\n", vp->width, vp->height );
1289         do_exit();
1290     }
1291
1292     SDL_LockMutex(is->pictq_mutex);
1293     vp->allocated = 1;
1294     SDL_CondSignal(is->pictq_cond);
1295     SDL_UnlockMutex(is->pictq_mutex);
1296 }
1297
1298 /* The 'pts' parameter is the dts of the packet / pts of the frame and
1299  * guessed if not known. */
1300 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1301 {
1302     VideoPicture *vp;
1303 #if CONFIG_AVFILTER
1304     AVPicture pict_src;
1305 #else
1306     int dst_pix_fmt = AV_PIX_FMT_YUV420P;
1307 #endif
1308     /* wait until we have space to put a new picture */
1309     SDL_LockMutex(is->pictq_mutex);
1310
1311     if (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1312         is->skip_frames = FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0 - FRAME_SKIP_FACTOR));
1313
1314     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1315            !is->videoq.abort_request) {
1316         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1317     }
1318     SDL_UnlockMutex(is->pictq_mutex);
1319
1320     if (is->videoq.abort_request)
1321         return -1;
1322
1323     vp = &is->pictq[is->pictq_windex];
1324
1325     /* alloc or resize hardware picture buffer */
1326     if (!vp->bmp || vp->reallocate ||
1327 #if CONFIG_AVFILTER
1328         vp->width  != is->out_video_filter->inputs[0]->w ||
1329         vp->height != is->out_video_filter->inputs[0]->h) {
1330 #else
1331         vp->width != is->video_st->codec->width ||
1332         vp->height != is->video_st->codec->height) {
1333 #endif
1334         SDL_Event event;
1335
1336         vp->allocated  = 0;
1337         vp->reallocate = 0;
1338
1339         /* the allocation must be done in the main thread to avoid
1340            locking problems */
1341         event.type = FF_ALLOC_EVENT;
1342         event.user.data1 = is;
1343         SDL_PushEvent(&event);
1344
1345         /* wait until the picture is allocated */
1346         SDL_LockMutex(is->pictq_mutex);
1347         while (!vp->allocated && !is->videoq.abort_request) {
1348             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1349         }
1350         SDL_UnlockMutex(is->pictq_mutex);
1351
1352         if (is->videoq.abort_request)
1353             return -1;
1354     }
1355
1356     /* if the frame is not skipped, then display it */
1357     if (vp->bmp) {
1358         AVPicture pict = { { 0 } };
1359
1360         /* get a pointer on the bitmap */
1361         SDL_LockYUVOverlay (vp->bmp);
1362
1363         pict.data[0] = vp->bmp->pixels[0];
1364         pict.data[1] = vp->bmp->pixels[2];
1365         pict.data[2] = vp->bmp->pixels[1];
1366
1367         pict.linesize[0] = vp->bmp->pitches[0];
1368         pict.linesize[1] = vp->bmp->pitches[2];
1369         pict.linesize[2] = vp->bmp->pitches[1];
1370
1371 #if CONFIG_AVFILTER
1372         pict_src.data[0] = src_frame->data[0];
1373         pict_src.data[1] = src_frame->data[1];
1374         pict_src.data[2] = src_frame->data[2];
1375
1376         pict_src.linesize[0] = src_frame->linesize[0];
1377         pict_src.linesize[1] = src_frame->linesize[1];
1378         pict_src.linesize[2] = src_frame->linesize[2];
1379
1380         // FIXME use direct rendering
1381         av_picture_copy(&pict, &pict_src,
1382                         vp->pix_fmt, vp->width, vp->height);
1383 #else
1384         av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1385         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1386             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1387             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1388         if (is->img_convert_ctx == NULL) {
1389             fprintf(stderr, "Cannot initialize the conversion context\n");
1390             exit(1);
1391         }
1392         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1393                   0, vp->height, pict.data, pict.linesize);
1394 #endif
1395         /* update the bitmap content */
1396         SDL_UnlockYUVOverlay(vp->bmp);
1397
1398         vp->pts = pts;
1399         vp->pos = pos;
1400
1401         /* now we can update the picture count */
1402         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1403             is->pictq_windex = 0;
1404         SDL_LockMutex(is->pictq_mutex);
1405         vp->target_clock = compute_target_time(vp->pts, is);
1406
1407         is->pictq_size++;
1408         SDL_UnlockMutex(is->pictq_mutex);
1409     }
1410     return 0;
1411 }
1412
1413 /* Compute the exact PTS for the picture if it is omitted in the stream.
1414  * The 'pts1' parameter is the dts of the packet / pts of the frame. */
1415 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1416 {
1417     double frame_delay, pts;
1418     int ret;
1419
1420     pts = pts1;
1421
1422     if (pts != 0) {
1423         /* update video clock with pts, if present */
1424         is->video_clock = pts;
1425     } else {
1426         pts = is->video_clock;
1427     }
1428     /* update video clock for next frame */
1429     frame_delay = av_q2d(is->video_st->codec->time_base);
1430     /* for MPEG2, the frame can be repeated, so we update the
1431        clock accordingly */
1432     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1433     is->video_clock += frame_delay;
1434
1435     ret = queue_picture(is, src_frame, pts, pos);
1436     av_frame_unref(src_frame);
1437     return ret;
1438 }
1439
1440 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1441 {
1442     int got_picture, i;
1443
1444     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1445         return -1;
1446
1447     if (pkt->data == flush_pkt.data) {
1448         avcodec_flush_buffers(is->video_st->codec);
1449
1450         SDL_LockMutex(is->pictq_mutex);
1451         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1452         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1453             is->pictq[i].target_clock= 0;
1454         }
1455         while (is->pictq_size && !is->videoq.abort_request) {
1456             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1457         }
1458         is->video_current_pos = -1;
1459         SDL_UnlockMutex(is->pictq_mutex);
1460
1461         init_pts_correction(&is->pts_ctx);
1462         is->frame_last_pts = AV_NOPTS_VALUE;
1463         is->frame_last_delay = 0;
1464         is->frame_timer = (double)av_gettime() / 1000000.0;
1465         is->skip_frames = 1;
1466         is->skip_frames_index = 0;
1467         return 0;
1468     }
1469
1470     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1471
1472     if (got_picture) {
1473         if (decoder_reorder_pts == -1) {
1474             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1475         } else if (decoder_reorder_pts) {
1476             *pts = frame->pkt_pts;
1477         } else {
1478             *pts = frame->pkt_dts;
1479         }
1480
1481         if (*pts == AV_NOPTS_VALUE) {
1482             *pts = 0;
1483         }
1484         if (is->video_st->sample_aspect_ratio.num) {
1485             frame->sample_aspect_ratio = is->video_st->sample_aspect_ratio;
1486         }
1487
1488         is->skip_frames_index += 1;
1489         if (is->skip_frames_index >= is->skip_frames) {
1490             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1491             return 1;
1492         }
1493         av_frame_unref(frame);
1494     }
1495     return 0;
1496 }
1497
1498 #if CONFIG_AVFILTER
1499 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1500 {
1501     char sws_flags_str[128];
1502     char buffersrc_args[256];
1503     int ret;
1504     AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;
1505     AVCodecContext *codec = is->video_st->codec;
1506
1507     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1508     graph->scale_sws_opts = av_strdup(sws_flags_str);
1509
1510     snprintf(buffersrc_args, sizeof(buffersrc_args), "%d:%d:%d:%d:%d:%d:%d",
1511              codec->width, codec->height, codec->pix_fmt,
1512              is->video_st->time_base.num, is->video_st->time_base.den,
1513              codec->sample_aspect_ratio.num, codec->sample_aspect_ratio.den);
1514
1515
1516     if ((ret = avfilter_graph_create_filter(&filt_src,
1517                                             avfilter_get_by_name("buffer"),
1518                                             "src", buffersrc_args, NULL,
1519                                             graph)) < 0)
1520         return ret;
1521     if ((ret = avfilter_graph_create_filter(&filt_out,
1522                                             avfilter_get_by_name("buffersink"),
1523                                             "out", NULL, NULL, graph)) < 0)
1524         return ret;
1525
1526     if ((ret = avfilter_graph_create_filter(&filt_format,
1527                                             avfilter_get_by_name("format"),
1528                                             "format", "yuv420p", NULL, graph)) < 0)
1529         return ret;
1530     if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
1531         return ret;
1532
1533
1534     if (vfilters) {
1535         AVFilterInOut *outputs = avfilter_inout_alloc();
1536         AVFilterInOut *inputs  = avfilter_inout_alloc();
1537
1538         outputs->name    = av_strdup("in");
1539         outputs->filter_ctx = filt_src;
1540         outputs->pad_idx = 0;
1541         outputs->next    = NULL;
1542
1543         inputs->name    = av_strdup("out");
1544         inputs->filter_ctx = filt_format;
1545         inputs->pad_idx = 0;
1546         inputs->next    = NULL;
1547
1548         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1549             return ret;
1550     } else {
1551         if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
1552             return ret;
1553     }
1554
1555     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1556         return ret;
1557
1558     is->in_video_filter  = filt_src;
1559     is->out_video_filter = filt_out;
1560
1561     return ret;
1562 }
1563
1564 #endif  /* CONFIG_AVFILTER */
1565
1566 static int video_thread(void *arg)
1567 {
1568     AVPacket pkt = { 0 };
1569     VideoState *is = arg;
1570     AVFrame *frame = av_frame_alloc();
1571     int64_t pts_int;
1572     double pts;
1573     int ret;
1574
1575 #if CONFIG_AVFILTER
1576     AVFilterGraph *graph = avfilter_graph_alloc();
1577     AVFilterContext *filt_out = NULL, *filt_in = NULL;
1578     int last_w = is->video_st->codec->width;
1579     int last_h = is->video_st->codec->height;
1580
1581     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1582         goto the_end;
1583     filt_in  = is->in_video_filter;
1584     filt_out = is->out_video_filter;
1585 #endif
1586
1587     for (;;) {
1588 #if CONFIG_AVFILTER
1589         AVRational tb;
1590 #endif
1591         while (is->paused && !is->videoq.abort_request)
1592             SDL_Delay(10);
1593
1594         av_free_packet(&pkt);
1595
1596         ret = get_video_frame(is, frame, &pts_int, &pkt);
1597         if (ret < 0)
1598             goto the_end;
1599
1600         if (!ret)
1601             continue;
1602
1603 #if CONFIG_AVFILTER
1604         if (   last_w != is->video_st->codec->width
1605             || last_h != is->video_st->codec->height) {
1606             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1607                     is->video_st->codec->width, is->video_st->codec->height);
1608             avfilter_graph_free(&graph);
1609             graph = avfilter_graph_alloc();
1610             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1611                 goto the_end;
1612             filt_in  = is->in_video_filter;
1613             filt_out = is->out_video_filter;
1614             last_w = is->video_st->codec->width;
1615             last_h = is->video_st->codec->height;
1616         }
1617
1618         frame->pts = pts_int;
1619         ret = av_buffersrc_add_frame(filt_in, frame);
1620         if (ret < 0)
1621             goto the_end;
1622
1623         while (ret >= 0) {
1624             ret = av_buffersink_get_frame(filt_out, frame);
1625             if (ret < 0) {
1626                 ret = 0;
1627                 break;
1628             }
1629
1630             pts_int = frame->pts;
1631             tb      = filt_out->inputs[0]->time_base;
1632             if (av_cmp_q(tb, is->video_st->time_base)) {
1633                 av_unused int64_t pts1 = pts_int;
1634                 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1635                 av_dlog(NULL, "video_thread(): "
1636                         "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1637                         tb.num, tb.den, pts1,
1638                         is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1639             }
1640             pts = pts_int * av_q2d(is->video_st->time_base);
1641             ret = output_picture2(is, frame, pts, 0);
1642         }
1643 #else
1644         pts = pts_int * av_q2d(is->video_st->time_base);
1645         ret = output_picture2(is, frame, pts,  pkt.pos);
1646 #endif
1647
1648         if (ret < 0)
1649             goto the_end;
1650
1651
1652         if (step)
1653             if (cur_stream)
1654                 stream_pause(cur_stream);
1655     }
1656  the_end:
1657 #if CONFIG_AVFILTER
1658     av_freep(&vfilters);
1659     avfilter_graph_free(&graph);
1660 #endif
1661     av_free_packet(&pkt);
1662     av_frame_free(&frame);
1663     return 0;
1664 }
1665
1666 static int subtitle_thread(void *arg)
1667 {
1668     VideoState *is = arg;
1669     SubPicture *sp;
1670     AVPacket pkt1, *pkt = &pkt1;
1671     int got_subtitle;
1672     double pts;
1673     int i, j;
1674     int r, g, b, y, u, v, a;
1675
1676     for (;;) {
1677         while (is->paused && !is->subtitleq.abort_request) {
1678             SDL_Delay(10);
1679         }
1680         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1681             break;
1682
1683         if (pkt->data == flush_pkt.data) {
1684             avcodec_flush_buffers(is->subtitle_st->codec);
1685             continue;
1686         }
1687         SDL_LockMutex(is->subpq_mutex);
1688         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1689                !is->subtitleq.abort_request) {
1690             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1691         }
1692         SDL_UnlockMutex(is->subpq_mutex);
1693
1694         if (is->subtitleq.abort_request)
1695             return 0;
1696
1697         sp = &is->subpq[is->subpq_windex];
1698
1699        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1700            this packet, if any */
1701         pts = 0;
1702         if (pkt->pts != AV_NOPTS_VALUE)
1703             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
1704
1705         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1706                                  &got_subtitle, pkt);
1707
1708         if (got_subtitle && sp->sub.format == 0) {
1709             sp->pts = pts;
1710
1711             for (i = 0; i < sp->sub.num_rects; i++)
1712             {
1713                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1714                 {
1715                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1716                     y = RGB_TO_Y_CCIR(r, g, b);
1717                     u = RGB_TO_U_CCIR(r, g, b, 0);
1718                     v = RGB_TO_V_CCIR(r, g, b, 0);
1719                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1720                 }
1721             }
1722
1723             /* now we can update the picture count */
1724             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1725                 is->subpq_windex = 0;
1726             SDL_LockMutex(is->subpq_mutex);
1727             is->subpq_size++;
1728             SDL_UnlockMutex(is->subpq_mutex);
1729         }
1730         av_free_packet(pkt);
1731     }
1732     return 0;
1733 }
1734
1735 /* copy samples for viewing in editor window */
1736 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1737 {
1738     int size, len;
1739
1740     size = samples_size / sizeof(short);
1741     while (size > 0) {
1742         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1743         if (len > size)
1744             len = size;
1745         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1746         samples += len;
1747         is->sample_array_index += len;
1748         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1749             is->sample_array_index = 0;
1750         size -= len;
1751     }
1752 }
1753
1754 /* return the new audio buffer size (samples can be added or deleted
1755    to get better sync if video or external master clock) */
1756 static int synchronize_audio(VideoState *is, short *samples,
1757                              int samples_size1, double pts)
1758 {
1759     int n, samples_size;
1760     double ref_clock;
1761
1762     n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1763     samples_size = samples_size1;
1764
1765     /* if not master, then we try to remove or add samples to correct the clock */
1766     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1767          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1768         double diff, avg_diff;
1769         int wanted_size, min_size, max_size, nb_samples;
1770
1771         ref_clock = get_master_clock(is);
1772         diff = get_audio_clock(is) - ref_clock;
1773
1774         if (diff < AV_NOSYNC_THRESHOLD) {
1775             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1776             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1777                 /* not enough measures to have a correct estimate */
1778                 is->audio_diff_avg_count++;
1779             } else {
1780                 /* estimate the A-V difference */
1781                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1782
1783                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1784                     wanted_size = samples_size + ((int)(diff * is->sdl_sample_rate) * n);
1785                     nb_samples = samples_size / n;
1786
1787                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1788                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1789                     if (wanted_size < min_size)
1790                         wanted_size = min_size;
1791                     else if (wanted_size > max_size)
1792                         wanted_size = max_size;
1793
1794                     /* add or remove samples to correction the synchro */
1795                     if (wanted_size < samples_size) {
1796                         /* remove samples */
1797                         samples_size = wanted_size;
1798                     } else if (wanted_size > samples_size) {
1799                         uint8_t *samples_end, *q;
1800                         int nb;
1801
1802                         /* add samples */
1803                         nb = (samples_size - wanted_size);
1804                         samples_end = (uint8_t *)samples + samples_size - n;
1805                         q = samples_end + n;
1806                         while (nb > 0) {
1807                             memcpy(q, samples_end, n);
1808                             q += n;
1809                             nb -= n;
1810                         }
1811                         samples_size = wanted_size;
1812                     }
1813                 }
1814                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1815                         diff, avg_diff, samples_size - samples_size1,
1816                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
1817             }
1818         } else {
1819             /* too big difference : may be initial PTS errors, so
1820                reset A-V filter */
1821             is->audio_diff_avg_count = 0;
1822             is->audio_diff_cum       = 0;
1823         }
1824     }
1825
1826     return samples_size;
1827 }
1828
1829 /* decode one audio frame and returns its uncompressed size */
1830 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1831 {
1832     AVPacket *pkt_temp = &is->audio_pkt_temp;
1833     AVPacket *pkt = &is->audio_pkt;
1834     AVCodecContext *dec = is->audio_st->codec;
1835     int n, len1, data_size, got_frame;
1836     double pts;
1837     int new_packet = 0;
1838     int flush_complete = 0;
1839
1840     for (;;) {
1841         /* NOTE: the audio packet can contain several frames */
1842         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
1843             int resample_changed, audio_resample;
1844
1845             if (!is->frame) {
1846                 if (!(is->frame = avcodec_alloc_frame()))
1847                     return AVERROR(ENOMEM);
1848             } else
1849                 avcodec_get_frame_defaults(is->frame);
1850
1851             if (flush_complete)
1852                 break;
1853             new_packet = 0;
1854             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
1855             if (len1 < 0) {
1856                 /* if error, we skip the frame */
1857                 pkt_temp->size = 0;
1858                 break;
1859             }
1860
1861             pkt_temp->data += len1;
1862             pkt_temp->size -= len1;
1863
1864             if (!got_frame) {
1865                 /* stop sending empty packets if the decoder is finished */
1866                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
1867                     flush_complete = 1;
1868                 continue;
1869             }
1870             data_size = av_samples_get_buffer_size(NULL, dec->channels,
1871                                                    is->frame->nb_samples,
1872                                                    is->frame->format, 1);
1873
1874             audio_resample = is->frame->format         != is->sdl_sample_fmt     ||
1875                              is->frame->channel_layout != is->sdl_channel_layout ||
1876                              is->frame->sample_rate    != is->sdl_sample_rate;
1877
1878             resample_changed = is->frame->format         != is->resample_sample_fmt     ||
1879                                is->frame->channel_layout != is->resample_channel_layout ||
1880                                is->frame->sample_rate    != is->resample_sample_rate;
1881
1882             if ((!is->avr && audio_resample) || resample_changed) {
1883                 int ret;
1884                 if (is->avr)
1885                     avresample_close(is->avr);
1886                 else if (audio_resample) {
1887                     is->avr = avresample_alloc_context();
1888                     if (!is->avr) {
1889                         fprintf(stderr, "error allocating AVAudioResampleContext\n");
1890                         break;
1891                     }
1892                 }
1893                 if (audio_resample) {
1894                     av_opt_set_int(is->avr, "in_channel_layout",  is->frame->channel_layout, 0);
1895                     av_opt_set_int(is->avr, "in_sample_fmt",      is->frame->format,         0);
1896                     av_opt_set_int(is->avr, "in_sample_rate",     is->frame->sample_rate,    0);
1897                     av_opt_set_int(is->avr, "out_channel_layout", is->sdl_channel_layout,    0);
1898                     av_opt_set_int(is->avr, "out_sample_fmt",     is->sdl_sample_fmt,        0);
1899                     av_opt_set_int(is->avr, "out_sample_rate",    is->sdl_sample_rate,       0);
1900
1901                     if ((ret = avresample_open(is->avr)) < 0) {
1902                         fprintf(stderr, "error initializing libavresample\n");
1903                         break;
1904                     }
1905                 }
1906                 is->resample_sample_fmt     = is->frame->format;
1907                 is->resample_channel_layout = is->frame->channel_layout;
1908                 is->resample_sample_rate    = is->frame->sample_rate;
1909             }
1910
1911             if (audio_resample) {
1912                 void *tmp_out;
1913                 int out_samples, out_size, out_linesize;
1914                 int osize      = av_get_bytes_per_sample(is->sdl_sample_fmt);
1915                 int nb_samples = is->frame->nb_samples;
1916
1917                 out_size = av_samples_get_buffer_size(&out_linesize,
1918                                                       is->sdl_channels,
1919                                                       nb_samples,
1920                                                       is->sdl_sample_fmt, 0);
1921                 tmp_out = av_realloc(is->audio_buf1, out_size);
1922                 if (!tmp_out)
1923                     return AVERROR(ENOMEM);
1924                 is->audio_buf1 = tmp_out;
1925
1926                 out_samples = avresample_convert(is->avr,
1927                                                  &is->audio_buf1,
1928                                                  out_linesize, nb_samples,
1929                                                  is->frame->data,
1930                                                  is->frame->linesize[0],
1931                                                  is->frame->nb_samples);
1932                 if (out_samples < 0) {
1933                     fprintf(stderr, "avresample_convert() failed\n");
1934                     break;
1935                 }
1936                 is->audio_buf = is->audio_buf1;
1937                 data_size = out_samples * osize * is->sdl_channels;
1938             } else {
1939                 is->audio_buf = is->frame->data[0];
1940             }
1941
1942             /* if no pts, then compute it */
1943             pts = is->audio_clock;
1944             *pts_ptr = pts;
1945             n = is->sdl_channels * av_get_bytes_per_sample(is->sdl_sample_fmt);
1946             is->audio_clock += (double)data_size /
1947                 (double)(n * is->sdl_sample_rate);
1948 #ifdef DEBUG
1949             {
1950                 static double last_clock;
1951                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1952                        is->audio_clock - last_clock,
1953                        is->audio_clock, pts);
1954                 last_clock = is->audio_clock;
1955             }
1956 #endif
1957             return data_size;
1958         }
1959
1960         /* free the current packet */
1961         if (pkt->data)
1962             av_free_packet(pkt);
1963         memset(pkt_temp, 0, sizeof(*pkt_temp));
1964
1965         if (is->paused || is->audioq.abort_request) {
1966             return -1;
1967         }
1968
1969         /* read next packet */
1970         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
1971             return -1;
1972
1973         if (pkt->data == flush_pkt.data) {
1974             avcodec_flush_buffers(dec);
1975             flush_complete = 0;
1976         }
1977
1978         *pkt_temp = *pkt;
1979
1980         /* if update the audio clock with the pts */
1981         if (pkt->pts != AV_NOPTS_VALUE) {
1982             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1983         }
1984     }
1985 }
1986
1987 /* prepare a new audio buffer */
1988 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1989 {
1990     VideoState *is = opaque;
1991     int audio_size, len1;
1992     double pts;
1993
1994     audio_callback_time = av_gettime();
1995
1996     while (len > 0) {
1997         if (is->audio_buf_index >= is->audio_buf_size) {
1998            audio_size = audio_decode_frame(is, &pts);
1999            if (audio_size < 0) {
2000                 /* if error, just output silence */
2001                is->audio_buf      = is->silence_buf;
2002                is->audio_buf_size = sizeof(is->silence_buf);
2003            } else {
2004                if (is->show_audio)
2005                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2006                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2007                                               pts);
2008                is->audio_buf_size = audio_size;
2009            }
2010            is->audio_buf_index = 0;
2011         }
2012         len1 = is->audio_buf_size - is->audio_buf_index;
2013         if (len1 > len)
2014             len1 = len;
2015         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2016         len -= len1;
2017         stream += len1;
2018         is->audio_buf_index += len1;
2019     }
2020 }
2021
2022 /* open a given stream. Return 0 if OK */
2023 static int stream_component_open(VideoState *is, int stream_index)
2024 {
2025     AVFormatContext *ic = is->ic;
2026     AVCodecContext *avctx;
2027     AVCodec *codec;
2028     SDL_AudioSpec wanted_spec, spec;
2029     AVDictionary *opts;
2030     AVDictionaryEntry *t = NULL;
2031
2032     if (stream_index < 0 || stream_index >= ic->nb_streams)
2033         return -1;
2034     avctx = ic->streams[stream_index]->codec;
2035
2036     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], NULL);
2037
2038     codec = avcodec_find_decoder(avctx->codec_id);
2039     avctx->debug_mv          = debug_mv;
2040     avctx->workaround_bugs   = workaround_bugs;
2041     avctx->idct_algo         = idct;
2042     avctx->skip_frame        = skip_frame;
2043     avctx->skip_idct         = skip_idct;
2044     avctx->skip_loop_filter  = skip_loop_filter;
2045     avctx->error_concealment = error_concealment;
2046
2047     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
2048
2049     if (!av_dict_get(opts, "threads", NULL, 0))
2050         av_dict_set(&opts, "threads", "auto", 0);
2051     if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
2052         av_dict_set(&opts, "refcounted_frames", "1", 0);
2053     if (!codec ||
2054         avcodec_open2(avctx, codec, &opts) < 0)
2055         return -1;
2056     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2057         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2058         return AVERROR_OPTION_NOT_FOUND;
2059     }
2060
2061     /* prepare audio output */
2062     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2063         is->sdl_sample_rate = avctx->sample_rate;
2064
2065         if (!avctx->channel_layout)
2066             avctx->channel_layout = av_get_default_channel_layout(avctx->channels);
2067         if (!avctx->channel_layout) {
2068             fprintf(stderr, "unable to guess channel layout\n");
2069             return -1;
2070         }
2071         if (avctx->channels == 1)
2072             is->sdl_channel_layout = AV_CH_LAYOUT_MONO;
2073         else
2074             is->sdl_channel_layout = AV_CH_LAYOUT_STEREO;
2075         is->sdl_channels = av_get_channel_layout_nb_channels(is->sdl_channel_layout);
2076
2077         wanted_spec.format = AUDIO_S16SYS;
2078         wanted_spec.freq = is->sdl_sample_rate;
2079         wanted_spec.channels = is->sdl_channels;
2080         wanted_spec.silence = 0;
2081         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2082         wanted_spec.callback = sdl_audio_callback;
2083         wanted_spec.userdata = is;
2084         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2085             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2086             return -1;
2087         }
2088         is->audio_hw_buf_size = spec.size;
2089         is->sdl_sample_fmt          = AV_SAMPLE_FMT_S16;
2090         is->resample_sample_fmt     = is->sdl_sample_fmt;
2091         is->resample_channel_layout = avctx->channel_layout;
2092         is->resample_sample_rate    = avctx->sample_rate;
2093     }
2094
2095     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2096     switch (avctx->codec_type) {
2097     case AVMEDIA_TYPE_AUDIO:
2098         is->audio_stream = stream_index;
2099         is->audio_st = ic->streams[stream_index];
2100         is->audio_buf_size  = 0;
2101         is->audio_buf_index = 0;
2102
2103         /* init averaging filter */
2104         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2105         is->audio_diff_avg_count = 0;
2106         /* since we do not have a precise anough audio fifo fullness,
2107            we correct audio sync only if larger than this threshold */
2108         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2109
2110         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2111         packet_queue_init(&is->audioq);
2112         SDL_PauseAudio(0);
2113         break;
2114     case AVMEDIA_TYPE_VIDEO:
2115         is->video_stream = stream_index;
2116         is->video_st = ic->streams[stream_index];
2117
2118         packet_queue_init(&is->videoq);
2119         is->video_tid = SDL_CreateThread(video_thread, is);
2120         break;
2121     case AVMEDIA_TYPE_SUBTITLE:
2122         is->subtitle_stream = stream_index;
2123         is->subtitle_st = ic->streams[stream_index];
2124         packet_queue_init(&is->subtitleq);
2125
2126         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2127         break;
2128     default:
2129         break;
2130     }
2131     return 0;
2132 }
2133
2134 static void stream_component_close(VideoState *is, int stream_index)
2135 {
2136     AVFormatContext *ic = is->ic;
2137     AVCodecContext *avctx;
2138
2139     if (stream_index < 0 || stream_index >= ic->nb_streams)
2140         return;
2141     avctx = ic->streams[stream_index]->codec;
2142
2143     switch (avctx->codec_type) {
2144     case AVMEDIA_TYPE_AUDIO:
2145         packet_queue_abort(&is->audioq);
2146
2147         SDL_CloseAudio();
2148
2149         packet_queue_end(&is->audioq);
2150         av_free_packet(&is->audio_pkt);
2151         if (is->avr)
2152             avresample_free(&is->avr);
2153         av_freep(&is->audio_buf1);
2154         is->audio_buf = NULL;
2155         avcodec_free_frame(&is->frame);
2156
2157         if (is->rdft) {
2158             av_rdft_end(is->rdft);
2159             av_freep(&is->rdft_data);
2160             is->rdft = NULL;
2161             is->rdft_bits = 0;
2162         }
2163         break;
2164     case AVMEDIA_TYPE_VIDEO:
2165         packet_queue_abort(&is->videoq);
2166
2167         /* note: we also signal this mutex to make sure we deblock the
2168            video thread in all cases */
2169         SDL_LockMutex(is->pictq_mutex);
2170         SDL_CondSignal(is->pictq_cond);
2171         SDL_UnlockMutex(is->pictq_mutex);
2172
2173         SDL_WaitThread(is->video_tid, NULL);
2174
2175         packet_queue_end(&is->videoq);
2176         break;
2177     case AVMEDIA_TYPE_SUBTITLE:
2178         packet_queue_abort(&is->subtitleq);
2179
2180         /* note: we also signal this mutex to make sure we deblock the
2181            video thread in all cases */
2182         SDL_LockMutex(is->subpq_mutex);
2183         is->subtitle_stream_changed = 1;
2184
2185         SDL_CondSignal(is->subpq_cond);
2186         SDL_UnlockMutex(is->subpq_mutex);
2187
2188         SDL_WaitThread(is->subtitle_tid, NULL);
2189
2190         packet_queue_end(&is->subtitleq);
2191         break;
2192     default:
2193         break;
2194     }
2195
2196     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2197     avcodec_close(avctx);
2198     switch (avctx->codec_type) {
2199     case AVMEDIA_TYPE_AUDIO:
2200         is->audio_st = NULL;
2201         is->audio_stream = -1;
2202         break;
2203     case AVMEDIA_TYPE_VIDEO:
2204         is->video_st = NULL;
2205         is->video_stream = -1;
2206         break;
2207     case AVMEDIA_TYPE_SUBTITLE:
2208         is->subtitle_st = NULL;
2209         is->subtitle_stream = -1;
2210         break;
2211     default:
2212         break;
2213     }
2214 }
2215
2216 /* since we have only one decoding thread, we can use a global
2217    variable instead of a thread local variable */
2218 static VideoState *global_video_state;
2219
2220 static int decode_interrupt_cb(void *ctx)
2221 {
2222     return global_video_state && global_video_state->abort_request;
2223 }
2224
2225 /* this thread gets the stream from the disk or the network */
2226 static int decode_thread(void *arg)
2227 {
2228     VideoState *is = arg;
2229     AVFormatContext *ic = NULL;
2230     int err, i, ret;
2231     int st_index[AVMEDIA_TYPE_NB];
2232     AVPacket pkt1, *pkt = &pkt1;
2233     int eof = 0;
2234     int pkt_in_play_range = 0;
2235     AVDictionaryEntry *t;
2236     AVDictionary **opts;
2237     int orig_nb_streams;
2238
2239     memset(st_index, -1, sizeof(st_index));
2240     is->video_stream = -1;
2241     is->audio_stream = -1;
2242     is->subtitle_stream = -1;
2243
2244     global_video_state = is;
2245
2246     ic = avformat_alloc_context();
2247     ic->interrupt_callback.callback = decode_interrupt_cb;
2248     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2249     if (err < 0) {
2250         print_error(is->filename, err);
2251         ret = -1;
2252         goto fail;
2253     }
2254     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2255         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2256         ret = AVERROR_OPTION_NOT_FOUND;
2257         goto fail;
2258     }
2259     is->ic = ic;
2260
2261     if (genpts)
2262         ic->flags |= AVFMT_FLAG_GENPTS;
2263
2264     opts = setup_find_stream_info_opts(ic, codec_opts);
2265     orig_nb_streams = ic->nb_streams;
2266
2267     err = avformat_find_stream_info(ic, opts);
2268     if (err < 0) {
2269         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2270         ret = -1;
2271         goto fail;
2272     }
2273     for (i = 0; i < orig_nb_streams; i++)
2274         av_dict_free(&opts[i]);
2275     av_freep(&opts);
2276
2277     if (ic->pb)
2278         ic->pb->eof_reached = 0; // FIXME hack, avplay maybe should not use url_feof() to test for the end
2279
2280     if (seek_by_bytes < 0)
2281         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2282
2283     /* if seeking requested, we execute it */
2284     if (start_time != AV_NOPTS_VALUE) {
2285         int64_t timestamp;
2286
2287         timestamp = start_time;
2288         /* add the stream start time */
2289         if (ic->start_time != AV_NOPTS_VALUE)
2290             timestamp += ic->start_time;
2291         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2292         if (ret < 0) {
2293             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2294                     is->filename, (double)timestamp / AV_TIME_BASE);
2295         }
2296     }
2297
2298     for (i = 0; i < ic->nb_streams; i++)
2299         ic->streams[i]->discard = AVDISCARD_ALL;
2300     if (!video_disable)
2301         st_index[AVMEDIA_TYPE_VIDEO] =
2302             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2303                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2304     if (!audio_disable)
2305         st_index[AVMEDIA_TYPE_AUDIO] =
2306             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2307                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2308                                 st_index[AVMEDIA_TYPE_VIDEO],
2309                                 NULL, 0);
2310     if (!video_disable)
2311         st_index[AVMEDIA_TYPE_SUBTITLE] =
2312             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2313                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2314                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2315                                  st_index[AVMEDIA_TYPE_AUDIO] :
2316                                  st_index[AVMEDIA_TYPE_VIDEO]),
2317                                 NULL, 0);
2318     if (show_status) {
2319         av_dump_format(ic, 0, is->filename, 0);
2320     }
2321
2322     /* open the streams */
2323     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2324         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2325     }
2326
2327     ret = -1;
2328     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2329         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2330     }
2331     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2332     if (ret < 0) {
2333         if (!display_disable)
2334             is->show_audio = 2;
2335     }
2336
2337     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2338         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2339     }
2340
2341     if (is->video_stream < 0 && is->audio_stream < 0) {
2342         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2343         ret = -1;
2344         goto fail;
2345     }
2346
2347     for (;;) {
2348         if (is->abort_request)
2349             break;
2350         if (is->paused != is->last_paused) {
2351             is->last_paused = is->paused;
2352             if (is->paused)
2353                 is->read_pause_return = av_read_pause(ic);
2354             else
2355                 av_read_play(ic);
2356         }
2357 #if CONFIG_RTSP_DEMUXER
2358         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2359             /* wait 10 ms to avoid trying to get another packet */
2360             /* XXX: horrible */
2361             SDL_Delay(10);
2362             continue;
2363         }
2364 #endif
2365         if (is->seek_req) {
2366             int64_t seek_target = is->seek_pos;
2367             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2368             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2369 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2370 //      of the seek_pos/seek_rel variables
2371
2372             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2373             if (ret < 0) {
2374                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2375             } else {
2376                 if (is->audio_stream >= 0) {
2377                     packet_queue_flush(&is->audioq);
2378                     packet_queue_put(&is->audioq, &flush_pkt);
2379                 }
2380                 if (is->subtitle_stream >= 0) {
2381                     packet_queue_flush(&is->subtitleq);
2382                     packet_queue_put(&is->subtitleq, &flush_pkt);
2383                 }
2384                 if (is->video_stream >= 0) {
2385                     packet_queue_flush(&is->videoq);
2386                     packet_queue_put(&is->videoq, &flush_pkt);
2387                 }
2388             }
2389             is->seek_req = 0;
2390             eof = 0;
2391         }
2392
2393         /* if the queue are full, no need to read more */
2394         if (!infinite_buffer &&
2395               (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2396             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
2397                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
2398                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0)))) {
2399             /* wait 10 ms */
2400             SDL_Delay(10);
2401             continue;
2402         }
2403         if (eof) {
2404             if (is->video_stream >= 0) {
2405                 av_init_packet(pkt);
2406                 pkt->data = NULL;
2407                 pkt->size = 0;
2408                 pkt->stream_index = is->video_stream;
2409                 packet_queue_put(&is->videoq, pkt);
2410             }
2411             if (is->audio_stream >= 0 &&
2412                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
2413                 av_init_packet(pkt);
2414                 pkt->data = NULL;
2415                 pkt->size = 0;
2416                 pkt->stream_index = is->audio_stream;
2417                 packet_queue_put(&is->audioq, pkt);
2418             }
2419             SDL_Delay(10);
2420             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
2421                 if (loop != 1 && (!loop || --loop)) {
2422                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2423                 } else if (autoexit) {
2424                     ret = AVERROR_EOF;
2425                     goto fail;
2426                 }
2427             }
2428             continue;
2429         }
2430         ret = av_read_frame(ic, pkt);
2431         if (ret < 0) {
2432             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2433                 eof = 1;
2434             if (ic->pb && ic->pb->error)
2435                 break;
2436             SDL_Delay(100); /* wait for user event */
2437             continue;
2438         }
2439         /* check if packet is in play range specified by user, then queue, otherwise discard */
2440         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2441                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2442                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2443                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
2444                 <= ((double)duration / 1000000);
2445         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2446             packet_queue_put(&is->audioq, pkt);
2447         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2448             packet_queue_put(&is->videoq, pkt);
2449         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2450             packet_queue_put(&is->subtitleq, pkt);
2451         } else {
2452             av_free_packet(pkt);
2453         }
2454     }
2455     /* wait until the end */
2456     while (!is->abort_request) {
2457         SDL_Delay(100);
2458     }
2459
2460     ret = 0;
2461  fail:
2462     /* disable interrupting */
2463     global_video_state = NULL;
2464
2465     /* close each stream */
2466     if (is->audio_stream >= 0)
2467         stream_component_close(is, is->audio_stream);
2468     if (is->video_stream >= 0)
2469         stream_component_close(is, is->video_stream);
2470     if (is->subtitle_stream >= 0)
2471         stream_component_close(is, is->subtitle_stream);
2472     if (is->ic) {
2473         avformat_close_input(&is->ic);
2474     }
2475
2476     if (ret != 0) {
2477         SDL_Event event;
2478
2479         event.type = FF_QUIT_EVENT;
2480         event.user.data1 = is;
2481         SDL_PushEvent(&event);
2482     }
2483     return 0;
2484 }
2485
2486 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2487 {
2488     VideoState *is;
2489
2490     is = av_mallocz(sizeof(VideoState));
2491     if (!is)
2492         return NULL;
2493     av_strlcpy(is->filename, filename, sizeof(is->filename));
2494     is->iformat = iformat;
2495     is->ytop    = 0;
2496     is->xleft   = 0;
2497
2498     /* start video display */
2499     is->pictq_mutex = SDL_CreateMutex();
2500     is->pictq_cond  = SDL_CreateCond();
2501
2502     is->subpq_mutex = SDL_CreateMutex();
2503     is->subpq_cond  = SDL_CreateCond();
2504
2505     is->av_sync_type = av_sync_type;
2506     is->parse_tid    = SDL_CreateThread(decode_thread, is);
2507     if (!is->parse_tid) {
2508         av_free(is);
2509         return NULL;
2510     }
2511     return is;
2512 }
2513
2514 static void stream_cycle_channel(VideoState *is, int codec_type)
2515 {
2516     AVFormatContext *ic = is->ic;
2517     int start_index, stream_index;
2518     AVStream *st;
2519
2520     if (codec_type == AVMEDIA_TYPE_VIDEO)
2521         start_index = is->video_stream;
2522     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2523         start_index = is->audio_stream;
2524     else
2525         start_index = is->subtitle_stream;
2526     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2527         return;
2528     stream_index = start_index;
2529     for (;;) {
2530         if (++stream_index >= is->ic->nb_streams)
2531         {
2532             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2533             {
2534                 stream_index = -1;
2535                 goto the_end;
2536             } else
2537                 stream_index = 0;
2538         }
2539         if (stream_index == start_index)
2540             return;
2541         st = ic->streams[stream_index];
2542         if (st->codec->codec_type == codec_type) {
2543             /* check that parameters are OK */
2544             switch (codec_type) {
2545             case AVMEDIA_TYPE_AUDIO:
2546                 if (st->codec->sample_rate != 0 &&
2547                     st->codec->channels != 0)
2548                     goto the_end;
2549                 break;
2550             case AVMEDIA_TYPE_VIDEO:
2551             case AVMEDIA_TYPE_SUBTITLE:
2552                 goto the_end;
2553             default:
2554                 break;
2555             }
2556         }
2557     }
2558  the_end:
2559     stream_component_close(is, start_index);
2560     stream_component_open(is, stream_index);
2561 }
2562
2563
2564 static void toggle_full_screen(void)
2565 {
2566 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
2567     /* OS X needs to empty the picture_queue */
2568     int i;
2569     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
2570         cur_stream->pictq[i].reallocate = 1;
2571 #endif
2572     is_full_screen = !is_full_screen;
2573     video_open(cur_stream);
2574 }
2575
2576 static void toggle_pause(void)
2577 {
2578     if (cur_stream)
2579         stream_pause(cur_stream);
2580     step = 0;
2581 }
2582
2583 static void step_to_next_frame(void)
2584 {
2585     if (cur_stream) {
2586         /* if the stream is paused unpause it, then step */
2587         if (cur_stream->paused)
2588             stream_pause(cur_stream);
2589     }
2590     step = 1;
2591 }
2592
2593 static void toggle_audio_display(void)
2594 {
2595     if (cur_stream) {
2596         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2597         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2598         fill_rectangle(screen,
2599                        cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2600                        bgcolor);
2601         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2602     }
2603 }
2604
2605 /* handle an event sent by the GUI */
2606 static void event_loop(void)
2607 {
2608     SDL_Event event;
2609     double incr, pos, frac;
2610
2611     for (;;) {
2612         double x;
2613         SDL_WaitEvent(&event);
2614         switch (event.type) {
2615         case SDL_KEYDOWN:
2616             if (exit_on_keydown) {
2617                 do_exit();
2618                 break;
2619             }
2620             switch (event.key.keysym.sym) {
2621             case SDLK_ESCAPE:
2622             case SDLK_q:
2623                 do_exit();
2624                 break;
2625             case SDLK_f:
2626                 toggle_full_screen();
2627                 break;
2628             case SDLK_p:
2629             case SDLK_SPACE:
2630                 toggle_pause();
2631                 break;
2632             case SDLK_s: // S: Step to next frame
2633                 step_to_next_frame();
2634                 break;
2635             case SDLK_a:
2636                 if (cur_stream)
2637                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2638                 break;
2639             case SDLK_v:
2640                 if (cur_stream)
2641                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2642                 break;
2643             case SDLK_t:
2644                 if (cur_stream)
2645                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2646                 break;
2647             case SDLK_w:
2648                 toggle_audio_display();
2649                 break;
2650             case SDLK_LEFT:
2651                 incr = -10.0;
2652                 goto do_seek;
2653             case SDLK_RIGHT:
2654                 incr = 10.0;
2655                 goto do_seek;
2656             case SDLK_UP:
2657                 incr = 60.0;
2658                 goto do_seek;
2659             case SDLK_DOWN:
2660                 incr = -60.0;
2661             do_seek:
2662                 if (cur_stream) {
2663                     if (seek_by_bytes) {
2664                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
2665                             pos = cur_stream->video_current_pos;
2666                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
2667                             pos = cur_stream->audio_pkt.pos;
2668                         } else
2669                             pos = avio_tell(cur_stream->ic->pb);
2670                         if (cur_stream->ic->bit_rate)
2671                             incr *= cur_stream->ic->bit_rate / 8.0;
2672                         else
2673                             incr *= 180000.0;
2674                         pos += incr;
2675                         stream_seek(cur_stream, pos, incr, 1);
2676                     } else {
2677                         pos = get_master_clock(cur_stream);
2678                         pos += incr;
2679                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2680                     }
2681                 }
2682                 break;
2683             default:
2684                 break;
2685             }
2686             break;
2687         case SDL_MOUSEBUTTONDOWN:
2688             if (exit_on_mousedown) {
2689                 do_exit();
2690                 break;
2691             }
2692         case SDL_MOUSEMOTION:
2693             if (event.type == SDL_MOUSEBUTTONDOWN) {
2694                 x = event.button.x;
2695             } else {
2696                 if (event.motion.state != SDL_PRESSED)
2697                     break;
2698                 x = event.motion.x;
2699             }
2700             if (cur_stream) {
2701                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
2702                     uint64_t size =  avio_size(cur_stream->ic->pb);
2703                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2704                 } else {
2705                     int64_t ts;
2706                     int ns, hh, mm, ss;
2707                     int tns, thh, tmm, tss;
2708                     tns  = cur_stream->ic->duration / 1000000LL;
2709                     thh  = tns / 3600;
2710                     tmm  = (tns % 3600) / 60;
2711                     tss  = (tns % 60);
2712                     frac = x / cur_stream->width;
2713                     ns   = frac * tns;
2714                     hh   = ns / 3600;
2715                     mm   = (ns % 3600) / 60;
2716                     ss   = (ns % 60);
2717                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2718                             hh, mm, ss, thh, tmm, tss);
2719                     ts = frac * cur_stream->ic->duration;
2720                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2721                         ts += cur_stream->ic->start_time;
2722                     stream_seek(cur_stream, ts, 0, 0);
2723                 }
2724             }
2725             break;
2726         case SDL_VIDEORESIZE:
2727             if (cur_stream) {
2728                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2729                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2730                 screen_width  = cur_stream->width  = event.resize.w;
2731                 screen_height = cur_stream->height = event.resize.h;
2732             }
2733             break;
2734         case SDL_QUIT:
2735         case FF_QUIT_EVENT:
2736             do_exit();
2737             break;
2738         case FF_ALLOC_EVENT:
2739             video_open(event.user.data1);
2740             alloc_picture(event.user.data1);
2741             break;
2742         case FF_REFRESH_EVENT:
2743             video_refresh_timer(event.user.data1);
2744             cur_stream->refresh = 0;
2745             break;
2746         default:
2747             break;
2748         }
2749     }
2750 }
2751
2752 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
2753 {
2754     av_log(NULL, AV_LOG_ERROR,
2755            "Option '%s' has been removed, use private format options instead\n", opt);
2756     return AVERROR(EINVAL);
2757 }
2758
2759 static int opt_width(void *optctx, const char *opt, const char *arg)
2760 {
2761     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2762     return 0;
2763 }
2764
2765 static int opt_height(void *optctx, const char *opt, const char *arg)
2766 {
2767     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2768     return 0;
2769 }
2770
2771 static int opt_format(void *optctx, const char *opt, const char *arg)
2772 {
2773     file_iformat = av_find_input_format(arg);
2774     if (!file_iformat) {
2775         fprintf(stderr, "Unknown input format: %s\n", arg);
2776         return AVERROR(EINVAL);
2777     }
2778     return 0;
2779 }
2780
2781 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
2782 {
2783     av_log(NULL, AV_LOG_ERROR,
2784            "Option '%s' has been removed, use private format options instead\n", opt);
2785     return AVERROR(EINVAL);
2786 }
2787
2788 static int opt_sync(void *optctx, const char *opt, const char *arg)
2789 {
2790     if (!strcmp(arg, "audio"))
2791         av_sync_type = AV_SYNC_AUDIO_MASTER;
2792     else if (!strcmp(arg, "video"))
2793         av_sync_type = AV_SYNC_VIDEO_MASTER;
2794     else if (!strcmp(arg, "ext"))
2795         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2796     else {
2797         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2798         exit(1);
2799     }
2800     return 0;
2801 }
2802
2803 static int opt_seek(void *optctx, const char *opt, const char *arg)
2804 {
2805     start_time = parse_time_or_die(opt, arg, 1);
2806     return 0;
2807 }
2808
2809 static int opt_duration(void *optctx, const char *opt, const char *arg)
2810 {
2811     duration = parse_time_or_die(opt, arg, 1);
2812     return 0;
2813 }
2814
2815 static int opt_vismv(void *optctx, const char *opt, const char *arg)
2816 {
2817     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2818     return 0;
2819 }
2820
2821 static const OptionDef options[] = {
2822 #include "cmdutils_common_opts.h"
2823     { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
2824     { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
2825     { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
2826     { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
2827     { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
2828     { "vn", OPT_BOOL, { &video_disable }, "disable video" },
2829     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
2830     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
2831     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { &wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
2832     { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
2833     { "t", HAS_ARG, { .func_arg = opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
2834     { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
2835     { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
2836     { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
2837     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
2838     { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
2839     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { &workaround_bugs }, "workaround bugs", "" },
2840     { "vismv", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vismv }, "visualize motion vectors", "" },
2841     { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
2842     { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
2843     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2844     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_loop_filter }, "", "" },
2845     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_frame }, "", "" },
2846     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { &skip_idct }, "", "" },
2847     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { &idct }, "set idct algo",  "algo" },
2848     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { &error_concealment }, "set error concealment options",  "bit_mask" },
2849     { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
2850     { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
2851     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
2852     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
2853     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
2854     { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
2855     { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
2856     { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
2857 #if CONFIG_AVFILTER
2858     { "vf", OPT_STRING | HAS_ARG, { &vfilters }, "video filters", "filter list" },
2859 #endif
2860     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
2861     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { opt_default }, "generic catch all option", "" },
2862     { "i", 0, { NULL }, "avconv compatibility dummy option", ""},
2863     { NULL, },
2864 };
2865
2866 static void show_usage(void)
2867 {
2868     printf("Simple media player\n");
2869     printf("usage: %s [options] input_file\n", program_name);
2870     printf("\n");
2871 }
2872
2873 void show_help_default(const char *opt, const char *arg)
2874 {
2875     av_log_set_callback(log_callback_help);
2876     show_usage();
2877     show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
2878     show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
2879     printf("\n");
2880     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2881     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
2882 #if !CONFIG_AVFILTER
2883     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
2884 #endif
2885     printf("\nWhile playing:\n"
2886            "q, ESC              quit\n"
2887            "f                   toggle full screen\n"
2888            "p, SPC              pause\n"
2889            "a                   cycle audio channel\n"
2890            "v                   cycle video channel\n"
2891            "t                   cycle subtitle channel\n"
2892            "w                   show audio waves\n"
2893            "s                   activate frame-step mode\n"
2894            "left/right          seek backward/forward 10 seconds\n"
2895            "down/up             seek backward/forward 1 minute\n"
2896            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2897            );
2898 }
2899
2900 static void opt_input_file(void *optctx, const char *filename)
2901 {
2902     if (input_filename) {
2903         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2904                 filename, input_filename);
2905         exit(1);
2906     }
2907     if (!strcmp(filename, "-"))
2908         filename = "pipe:";
2909     input_filename = filename;
2910 }
2911
2912 /* Called from the main */
2913 int main(int argc, char **argv)
2914 {
2915     int flags;
2916
2917     av_log_set_flags(AV_LOG_SKIP_REPEATED);
2918     parse_loglevel(argc, argv, options);
2919
2920     /* register all codecs, demux and protocols */
2921     avcodec_register_all();
2922 #if CONFIG_AVDEVICE
2923     avdevice_register_all();
2924 #endif
2925 #if CONFIG_AVFILTER
2926     avfilter_register_all();
2927 #endif
2928     av_register_all();
2929     avformat_network_init();
2930
2931     init_opts();
2932
2933     show_banner();
2934
2935     parse_options(NULL, argc, argv, options, opt_input_file);
2936
2937     if (!input_filename) {
2938         show_usage();
2939         fprintf(stderr, "An input file must be specified\n");
2940         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
2941         exit(1);
2942     }
2943
2944     if (display_disable) {
2945         video_disable = 1;
2946     }
2947     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2948 #if !defined(__MINGW32__) && !defined(__APPLE__)
2949     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2950 #endif
2951     if (SDL_Init (flags)) {
2952         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2953         exit(1);
2954     }
2955
2956     if (!display_disable) {
2957         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2958         fs_screen_width = vi->current_w;
2959         fs_screen_height = vi->current_h;
2960     }
2961
2962     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2963     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2964     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2965
2966     av_init_packet(&flush_pkt);
2967     flush_pkt.data = (uint8_t *)&flush_pkt;
2968
2969     cur_stream = stream_open(input_filename, file_iformat);
2970
2971     event_loop();
2972
2973     /* never returns */
2974
2975     return 0;
2976 }