OSDN Git Service

Replace some commented-out debug printf() / av_log() messages with av_dlog().
[coroid/libav_saccubus.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavutil/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "ffplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142     int dtg_active_format;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     int show_audio; /* if true, display audio samples */
171     int16_t sample_array[SAMPLE_ARRAY_SIZE];
172     int sample_array_index;
173     int last_i_start;
174     RDFTContext *rdft;
175     int rdft_bits;
176     FFTSample *rdft_data;
177     int xpos;
178
179     SDL_Thread *subtitle_tid;
180     int subtitle_stream;
181     int subtitle_stream_changed;
182     AVStream *subtitle_st;
183     PacketQueue subtitleq;
184     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185     int subpq_size, subpq_rindex, subpq_windex;
186     SDL_mutex *subpq_mutex;
187     SDL_cond *subpq_cond;
188
189     double frame_timer;
190     double frame_last_pts;
191     double frame_last_delay;
192     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193     int video_stream;
194     AVStream *video_st;
195     PacketQueue videoq;
196     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198     int64_t video_current_pos;                   ///<current displayed file pos
199     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200     int pictq_size, pictq_rindex, pictq_windex;
201     SDL_mutex *pictq_mutex;
202     SDL_cond *pictq_cond;
203 #if !CONFIG_AVFILTER
204     struct SwsContext *img_convert_ctx;
205 #endif
206
207     //    QETimer *video_timer;
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211     PtsCorrectionContext pts_ctx;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223 static int audio_write_get_buf_size(VideoState *is);
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width = 0;
232 static int screen_height = 0;
233 static int frame_width = 0;
234 static int frame_height = 0;
235 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
236 static int audio_disable;
237 static int video_disable;
238 static int wanted_stream[AVMEDIA_TYPE_NB]={
239     [AVMEDIA_TYPE_AUDIO]=-1,
240     [AVMEDIA_TYPE_VIDEO]=-1,
241     [AVMEDIA_TYPE_SUBTITLE]=-1,
242 };
243 static int seek_by_bytes=-1;
244 static int display_disable;
245 static int show_status = 1;
246 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
247 static int64_t start_time = AV_NOPTS_VALUE;
248 static int64_t duration = AV_NOPTS_VALUE;
249 static int debug = 0;
250 static int debug_mv = 0;
251 static int step = 0;
252 static int thread_count = 1;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int lowres = 0;
257 static int idct = FF_IDCT_AUTO;
258 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
260 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
261 static int error_recognition = FF_ER_CAREFUL;
262 static int error_concealment = 3;
263 static int decoder_reorder_pts= -1;
264 static int autoexit;
265 static int exit_on_keydown;
266 static int exit_on_mousedown;
267 static int loop=1;
268 static int framedrop=1;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static VideoState *cur_stream;
278 static int64_t audio_callback_time;
279
280 static AVPacket flush_pkt;
281
282 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285
286 static SDL_Surface *screen;
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
289
290 /* packet queue handling */
291 static void packet_queue_init(PacketQueue *q)
292 {
293     memset(q, 0, sizeof(PacketQueue));
294     q->mutex = SDL_CreateMutex();
295     q->cond = SDL_CreateCond();
296     packet_queue_put(q, &flush_pkt);
297 }
298
299 static void packet_queue_flush(PacketQueue *q)
300 {
301     AVPacketList *pkt, *pkt1;
302
303     SDL_LockMutex(q->mutex);
304     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
305         pkt1 = pkt->next;
306         av_free_packet(&pkt->pkt);
307         av_freep(&pkt);
308     }
309     q->last_pkt = NULL;
310     q->first_pkt = NULL;
311     q->nb_packets = 0;
312     q->size = 0;
313     SDL_UnlockMutex(q->mutex);
314 }
315
316 static void packet_queue_end(PacketQueue *q)
317 {
318     packet_queue_flush(q);
319     SDL_DestroyMutex(q->mutex);
320     SDL_DestroyCond(q->cond);
321 }
322
323 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
324 {
325     AVPacketList *pkt1;
326
327     /* duplicate the packet */
328     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
329         return -1;
330
331     pkt1 = av_malloc(sizeof(AVPacketList));
332     if (!pkt1)
333         return -1;
334     pkt1->pkt = *pkt;
335     pkt1->next = NULL;
336
337
338     SDL_LockMutex(q->mutex);
339
340     if (!q->last_pkt)
341
342         q->first_pkt = pkt1;
343     else
344         q->last_pkt->next = pkt1;
345     q->last_pkt = pkt1;
346     q->nb_packets++;
347     q->size += pkt1->pkt.size + sizeof(*pkt1);
348     /* XXX: should duplicate packet data in DV case */
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352     return 0;
353 }
354
355 static void packet_queue_abort(PacketQueue *q)
356 {
357     SDL_LockMutex(q->mutex);
358
359     q->abort_request = 1;
360
361     SDL_CondSignal(q->cond);
362
363     SDL_UnlockMutex(q->mutex);
364 }
365
366 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
367 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
368 {
369     AVPacketList *pkt1;
370     int ret;
371
372     SDL_LockMutex(q->mutex);
373
374     for(;;) {
375         if (q->abort_request) {
376             ret = -1;
377             break;
378         }
379
380         pkt1 = q->first_pkt;
381         if (pkt1) {
382             q->first_pkt = pkt1->next;
383             if (!q->first_pkt)
384                 q->last_pkt = NULL;
385             q->nb_packets--;
386             q->size -= pkt1->pkt.size + sizeof(*pkt1);
387             *pkt = pkt1->pkt;
388             av_free(pkt1);
389             ret = 1;
390             break;
391         } else if (!block) {
392             ret = 0;
393             break;
394         } else {
395             SDL_CondWait(q->cond, q->mutex);
396         }
397     }
398     SDL_UnlockMutex(q->mutex);
399     return ret;
400 }
401
402 static inline void fill_rectangle(SDL_Surface *screen,
403                                   int x, int y, int w, int h, int color)
404 {
405     SDL_Rect rect;
406     rect.x = x;
407     rect.y = y;
408     rect.w = w;
409     rect.h = h;
410     SDL_FillRect(screen, &rect, color);
411 }
412
413 #if 0
414 /* draw only the border of a rectangle */
415 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
416 {
417     int w1, w2, h1, h2;
418
419     /* fill the background */
420     w1 = x;
421     if (w1 < 0)
422         w1 = 0;
423     w2 = s->width - (x + w);
424     if (w2 < 0)
425         w2 = 0;
426     h1 = y;
427     if (h1 < 0)
428         h1 = 0;
429     h2 = s->height - (y + h);
430     if (h2 < 0)
431         h2 = 0;
432     fill_rectangle(screen,
433                    s->xleft, s->ytop,
434                    w1, s->height,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + s->width - w2, s->ytop,
438                    w2, s->height,
439                    color);
440     fill_rectangle(screen,
441                    s->xleft + w1, s->ytop,
442                    s->width - w1 - w2, h1,
443                    color);
444     fill_rectangle(screen,
445                    s->xleft + w1, s->ytop + s->height - h2,
446                    s->width - w1 - w2, h2,
447                    color);
448 }
449 #endif
450
451 #define ALPHA_BLEND(a, oldp, newp, s)\
452 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
453
454 #define RGBA_IN(r, g, b, a, s)\
455 {\
456     unsigned int v = ((const uint32_t *)(s))[0];\
457     a = (v >> 24) & 0xff;\
458     r = (v >> 16) & 0xff;\
459     g = (v >> 8) & 0xff;\
460     b = v & 0xff;\
461 }
462
463 #define YUVA_IN(y, u, v, a, s, pal)\
464 {\
465     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
466     a = (val >> 24) & 0xff;\
467     y = (val >> 16) & 0xff;\
468     u = (val >> 8) & 0xff;\
469     v = val & 0xff;\
470 }
471
472 #define YUVA_OUT(d, y, u, v, a)\
473 {\
474     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
475 }
476
477
478 #define BPP 1
479
480 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
481 {
482     int wrap, wrap3, width2, skip2;
483     int y, u, v, a, u1, v1, a1, w, h;
484     uint8_t *lum, *cb, *cr;
485     const uint8_t *p;
486     const uint32_t *pal;
487     int dstx, dsty, dstw, dsth;
488
489     dstw = av_clip(rect->w, 0, imgw);
490     dsth = av_clip(rect->h, 0, imgh);
491     dstx = av_clip(rect->x, 0, imgw - dstw);
492     dsty = av_clip(rect->y, 0, imgh - dsth);
493     lum = dst->data[0] + dsty * dst->linesize[0];
494     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
495     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
496
497     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
498     skip2 = dstx >> 1;
499     wrap = dst->linesize[0];
500     wrap3 = rect->pict.linesize[0];
501     p = rect->pict.data[0];
502     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
503
504     if (dsty & 1) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
513             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
514             cb++;
515             cr++;
516             lum++;
517             p += BPP;
518         }
519         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 = u;
522             v1 = v;
523             a1 = a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
526             YUVA_IN(y, u, v, a, p + BPP, pal);
527             u1 += u;
528             v1 += v;
529             a1 += a;
530             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
532             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
533             cb++;
534             cr++;
535             p += 2 * BPP;
536             lum += 2;
537         }
538         if (w) {
539             YUVA_IN(y, u, v, a, p, pal);
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
542             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
543             p++;
544             lum++;
545         }
546         p += wrap3 - dstw * BPP;
547         lum += wrap - dstw - dstx;
548         cb += dst->linesize[1] - width2 - skip2;
549         cr += dst->linesize[2] - width2 - skip2;
550     }
551     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
552         lum += dstx;
553         cb += skip2;
554         cr += skip2;
555
556         if (dstx & 1) {
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 = u;
559             v1 = v;
560             a1 = a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             p += wrap3;
563             lum += wrap;
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 += u;
566             v1 += v;
567             a1 += a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
571             cb++;
572             cr++;
573             p += -wrap3 + BPP;
574             lum += -wrap + 1;
575         }
576         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 = u;
579             v1 = v;
580             a1 = a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582
583             YUVA_IN(y, u, v, a, p + BPP, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
588             p += wrap3;
589             lum += wrap;
590
591             YUVA_IN(y, u, v, a, p, pal);
592             u1 += u;
593             v1 += v;
594             a1 += a;
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
597             YUVA_IN(y, u, v, a, p + BPP, pal);
598             u1 += u;
599             v1 += v;
600             a1 += a;
601             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602
603             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
604             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
605
606             cb++;
607             cr++;
608             p += -wrap3 + 2 * BPP;
609             lum += -wrap + 2;
610         }
611         if (w) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617             p += wrap3;
618             lum += wrap;
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
626             cb++;
627             cr++;
628             p += -wrap3 + BPP;
629             lum += -wrap + 1;
630         }
631         p += wrap3 + (wrap3 - dstw * BPP);
632         lum += wrap + (wrap - dstw - dstx);
633         cb += dst->linesize[1] - width2 - skip2;
634         cr += dst->linesize[2] - width2 - skip2;
635     }
636     /* handle odd height */
637     if (h) {
638         lum += dstx;
639         cb += skip2;
640         cr += skip2;
641
642         if (dstx & 1) {
643             YUVA_IN(y, u, v, a, p, pal);
644             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
645             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
646             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
647             cb++;
648             cr++;
649             lum++;
650             p += BPP;
651         }
652         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
653             YUVA_IN(y, u, v, a, p, pal);
654             u1 = u;
655             v1 = v;
656             a1 = a;
657             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
659             YUVA_IN(y, u, v, a, p + BPP, pal);
660             u1 += u;
661             v1 += v;
662             a1 += a;
663             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
664             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
665             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
666             cb++;
667             cr++;
668             p += 2 * BPP;
669             lum += 2;
670         }
671         if (w) {
672             YUVA_IN(y, u, v, a, p, pal);
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
675             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
676         }
677     }
678 }
679
680 static void free_subpicture(SubPicture *sp)
681 {
682     avsubtitle_free(&sp->sub);
683 }
684
685 static void video_image_display(VideoState *is)
686 {
687     VideoPicture *vp;
688     SubPicture *sp;
689     AVPicture pict;
690     float aspect_ratio;
691     int width, height, x, y;
692     SDL_Rect rect;
693     int i;
694
695     vp = &is->pictq[is->pictq_rindex];
696     if (vp->bmp) {
697 #if CONFIG_AVFILTER
698          if (vp->picref->video->pixel_aspect.num == 0)
699              aspect_ratio = 0;
700          else
701              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
702 #else
703
704         /* XXX: use variable in the frame */
705         if (is->video_st->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
707         else if (is->video_st->codec->sample_aspect_ratio.num)
708             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
709         else
710             aspect_ratio = 0;
711 #endif
712         if (aspect_ratio <= 0.0)
713             aspect_ratio = 1.0;
714         aspect_ratio *= (float)vp->width / (float)vp->height;
715
716         if (is->subtitle_st)
717         {
718             if (is->subpq_size > 0)
719             {
720                 sp = &is->subpq[is->subpq_rindex];
721
722                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
723                 {
724                     SDL_LockYUVOverlay (vp->bmp);
725
726                     pict.data[0] = vp->bmp->pixels[0];
727                     pict.data[1] = vp->bmp->pixels[2];
728                     pict.data[2] = vp->bmp->pixels[1];
729
730                     pict.linesize[0] = vp->bmp->pitches[0];
731                     pict.linesize[1] = vp->bmp->pitches[2];
732                     pict.linesize[2] = vp->bmp->pitches[1];
733
734                     for (i = 0; i < sp->sub.num_rects; i++)
735                         blend_subrect(&pict, sp->sub.rects[i],
736                                       vp->bmp->w, vp->bmp->h);
737
738                     SDL_UnlockYUVOverlay (vp->bmp);
739                 }
740             }
741         }
742
743
744         /* XXX: we suppose the screen has a 1.0 pixel ratio */
745         height = is->height;
746         width = ((int)rint(height * aspect_ratio)) & ~1;
747         if (width > is->width) {
748             width = is->width;
749             height = ((int)rint(width / aspect_ratio)) & ~1;
750         }
751         x = (is->width - width) / 2;
752         y = (is->height - height) / 2;
753         if (!is->no_background) {
754             /* fill the background */
755             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
756         } else {
757             is->no_background = 0;
758         }
759         rect.x = is->xleft + x;
760         rect.y = is->ytop  + y;
761         rect.w = width;
762         rect.h = height;
763         SDL_DisplayYUVOverlay(vp->bmp, &rect);
764     } else {
765 #if 0
766         fill_rectangle(screen,
767                        is->xleft, is->ytop, is->width, is->height,
768                        QERGB(0x00, 0x00, 0x00));
769 #endif
770     }
771 }
772
773 static inline int compute_mod(int a, int b)
774 {
775     a = a % b;
776     if (a >= 0)
777         return a;
778     else
779         return a + b;
780 }
781
782 static void video_audio_display(VideoState *s)
783 {
784     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
785     int ch, channels, h, h2, bgcolor, fgcolor;
786     int16_t time_diff;
787     int rdft_bits, nb_freq;
788
789     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
790         ;
791     nb_freq= 1<<(rdft_bits-1);
792
793     /* compute display index : center on currently output samples */
794     channels = s->audio_st->codec->channels;
795     nb_display_channels = channels;
796     if (!s->paused) {
797         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
798         n = 2 * channels;
799         delay = audio_write_get_buf_size(s);
800         delay /= n;
801
802         /* to be more precise, we take into account the time spent since
803            the last buffer computation */
804         if (audio_callback_time) {
805             time_diff = av_gettime() - audio_callback_time;
806             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
807         }
808
809         delay += 2*data_used;
810         if (delay < data_used)
811             delay = data_used;
812
813         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
814         if(s->show_audio==1){
815             h= INT_MIN;
816             for(i=0; i<1000; i+=channels){
817                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
818                 int a= s->sample_array[idx];
819                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
820                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
821                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
822                 int score= a-d;
823                 if(h<score && (b^c)<0){
824                     h= score;
825                     i_start= idx;
826                 }
827             }
828         }
829
830         s->last_i_start = i_start;
831     } else {
832         i_start = s->last_i_start;
833     }
834
835     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
836     if(s->show_audio==1){
837         fill_rectangle(screen,
838                        s->xleft, s->ytop, s->width, s->height,
839                        bgcolor);
840
841         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
842
843         /* total height for one channel */
844         h = s->height / nb_display_channels;
845         /* graph height / 2 */
846         h2 = (h * 9) / 20;
847         for(ch = 0;ch < nb_display_channels; ch++) {
848             i = i_start + ch;
849             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
850             for(x = 0; x < s->width; x++) {
851                 y = (s->sample_array[i] * h2) >> 15;
852                 if (y < 0) {
853                     y = -y;
854                     ys = y1 - y;
855                 } else {
856                     ys = y1;
857                 }
858                 fill_rectangle(screen,
859                                s->xleft + x, ys, 1, y,
860                                fgcolor);
861                 i += channels;
862                 if (i >= SAMPLE_ARRAY_SIZE)
863                     i -= SAMPLE_ARRAY_SIZE;
864             }
865         }
866
867         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
868
869         for(ch = 1;ch < nb_display_channels; ch++) {
870             y = s->ytop + ch * h;
871             fill_rectangle(screen,
872                            s->xleft, y, s->width, 1,
873                            fgcolor);
874         }
875         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
876     }else{
877         nb_display_channels= FFMIN(nb_display_channels, 2);
878         if(rdft_bits != s->rdft_bits){
879             av_rdft_end(s->rdft);
880             av_free(s->rdft_data);
881             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
882             s->rdft_bits= rdft_bits;
883             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
884         }
885         {
886             FFTSample *data[2];
887             for(ch = 0;ch < nb_display_channels; ch++) {
888                 data[ch] = s->rdft_data + 2*nb_freq*ch;
889                 i = i_start + ch;
890                 for(x = 0; x < 2*nb_freq; x++) {
891                     double w= (x-nb_freq)*(1.0/nb_freq);
892                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
893                     i += channels;
894                     if (i >= SAMPLE_ARRAY_SIZE)
895                         i -= SAMPLE_ARRAY_SIZE;
896                 }
897                 av_rdft_calc(s->rdft, data[ch]);
898             }
899             //least efficient way to do this, we should of course directly access it but its more than fast enough
900             for(y=0; y<s->height; y++){
901                 double w= 1/sqrt(nb_freq);
902                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
903                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
904                        + data[1][2*y+1]*data[1][2*y+1])) : a;
905                 a= FFMIN(a,255);
906                 b= FFMIN(b,255);
907                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
908
909                 fill_rectangle(screen,
910                             s->xpos, s->height-y, 1, 1,
911                             fgcolor);
912             }
913         }
914         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
915         s->xpos++;
916         if(s->xpos >= s->width)
917             s->xpos= s->xleft;
918     }
919 }
920
921 static int video_open(VideoState *is){
922     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
923     int w,h;
924
925     if(is_full_screen) flags |= SDL_FULLSCREEN;
926     else               flags |= SDL_RESIZABLE;
927
928     if (is_full_screen && fs_screen_width) {
929         w = fs_screen_width;
930         h = fs_screen_height;
931     } else if(!is_full_screen && screen_width){
932         w = screen_width;
933         h = screen_height;
934 #if CONFIG_AVFILTER
935     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
936         w = is->out_video_filter->inputs[0]->w;
937         h = is->out_video_filter->inputs[0]->h;
938 #else
939     }else if (is->video_st && is->video_st->codec->width){
940         w = is->video_st->codec->width;
941         h = is->video_st->codec->height;
942 #endif
943     } else {
944         w = 640;
945         h = 480;
946     }
947     if(screen && is->width == screen->w && screen->w == w
948        && is->height== screen->h && screen->h == h)
949         return 0;
950
951 #ifndef __APPLE__
952     screen = SDL_SetVideoMode(w, h, 0, flags);
953 #else
954     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
955     screen = SDL_SetVideoMode(w, h, 24, flags);
956 #endif
957     if (!screen) {
958         fprintf(stderr, "SDL: could not set video mode - exiting\n");
959         return -1;
960     }
961     if (!window_title)
962         window_title = input_filename;
963     SDL_WM_SetCaption(window_title, window_title);
964
965     is->width = screen->w;
966     is->height = screen->h;
967
968     return 0;
969 }
970
971 /* display the current picture, if any */
972 static void video_display(VideoState *is)
973 {
974     if(!screen)
975         video_open(cur_stream);
976     if (is->audio_st && is->show_audio)
977         video_audio_display(is);
978     else if (is->video_st)
979         video_image_display(is);
980 }
981
982 static int refresh_thread(void *opaque)
983 {
984     VideoState *is= opaque;
985     while(!is->abort_request){
986         SDL_Event event;
987         event.type = FF_REFRESH_EVENT;
988         event.user.data1 = opaque;
989         if(!is->refresh){
990             is->refresh=1;
991             SDL_PushEvent(&event);
992         }
993         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
994     }
995     return 0;
996 }
997
998 /* get the current audio clock value */
999 static double get_audio_clock(VideoState *is)
1000 {
1001     double pts;
1002     int hw_buf_size, bytes_per_sec;
1003     pts = is->audio_clock;
1004     hw_buf_size = audio_write_get_buf_size(is);
1005     bytes_per_sec = 0;
1006     if (is->audio_st) {
1007         bytes_per_sec = is->audio_st->codec->sample_rate *
1008             2 * is->audio_st->codec->channels;
1009     }
1010     if (bytes_per_sec)
1011         pts -= (double)hw_buf_size / bytes_per_sec;
1012     return pts;
1013 }
1014
1015 /* get the current video clock value */
1016 static double get_video_clock(VideoState *is)
1017 {
1018     if (is->paused) {
1019         return is->video_current_pts;
1020     } else {
1021         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1022     }
1023 }
1024
1025 /* get the current external clock value */
1026 static double get_external_clock(VideoState *is)
1027 {
1028     int64_t ti;
1029     ti = av_gettime();
1030     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1031 }
1032
1033 /* get the current master clock value */
1034 static double get_master_clock(VideoState *is)
1035 {
1036     double val;
1037
1038     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1039         if (is->video_st)
1040             val = get_video_clock(is);
1041         else
1042             val = get_audio_clock(is);
1043     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1044         if (is->audio_st)
1045             val = get_audio_clock(is);
1046         else
1047             val = get_video_clock(is);
1048     } else {
1049         val = get_external_clock(is);
1050     }
1051     return val;
1052 }
1053
1054 /* seek in the stream */
1055 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1056 {
1057     if (!is->seek_req) {
1058         is->seek_pos = pos;
1059         is->seek_rel = rel;
1060         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1061         if (seek_by_bytes)
1062             is->seek_flags |= AVSEEK_FLAG_BYTE;
1063         is->seek_req = 1;
1064     }
1065 }
1066
1067 /* pause or resume the video */
1068 static void stream_pause(VideoState *is)
1069 {
1070     if (is->paused) {
1071         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1072         if(is->read_pause_return != AVERROR(ENOSYS)){
1073             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1074         }
1075         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1076     }
1077     is->paused = !is->paused;
1078 }
1079
1080 static double compute_target_time(double frame_current_pts, VideoState *is)
1081 {
1082     double delay, sync_threshold, diff;
1083
1084     /* compute nominal delay */
1085     delay = frame_current_pts - is->frame_last_pts;
1086     if (delay <= 0 || delay >= 10.0) {
1087         /* if incorrect delay, use previous one */
1088         delay = is->frame_last_delay;
1089     } else {
1090         is->frame_last_delay = delay;
1091     }
1092     is->frame_last_pts = frame_current_pts;
1093
1094     /* update delay to follow master synchronisation source */
1095     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1096          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1097         /* if video is slave, we try to correct big delays by
1098            duplicating or deleting a frame */
1099         diff = get_video_clock(is) - get_master_clock(is);
1100
1101         /* skip or repeat frame. We take into account the
1102            delay to compute the threshold. I still don't know
1103            if it is the best guess */
1104         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1105         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1106             if (diff <= -sync_threshold)
1107                 delay = 0;
1108             else if (diff >= sync_threshold)
1109                 delay = 2 * delay;
1110         }
1111     }
1112     is->frame_timer += delay;
1113 #if defined(DEBUG_SYNC)
1114     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1115             delay, actual_delay, frame_current_pts, -diff);
1116 #endif
1117
1118     return is->frame_timer;
1119 }
1120
1121 /* called to display each frame */
1122 static void video_refresh_timer(void *opaque)
1123 {
1124     VideoState *is = opaque;
1125     VideoPicture *vp;
1126
1127     SubPicture *sp, *sp2;
1128
1129     if (is->video_st) {
1130 retry:
1131         if (is->pictq_size == 0) {
1132             //nothing to do, no picture to display in the que
1133         } else {
1134             double time= av_gettime()/1000000.0;
1135             double next_target;
1136             /* dequeue the picture */
1137             vp = &is->pictq[is->pictq_rindex];
1138
1139             if(time < vp->target_clock)
1140                 return;
1141             /* update current video pts */
1142             is->video_current_pts = vp->pts;
1143             is->video_current_pts_drift = is->video_current_pts - time;
1144             is->video_current_pos = vp->pos;
1145             if(is->pictq_size > 1){
1146                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1147                 assert(nextvp->target_clock >= vp->target_clock);
1148                 next_target= nextvp->target_clock;
1149             }else{
1150                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1151             }
1152             if(framedrop && time > next_target){
1153                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1154                 if(is->pictq_size > 1 || time > next_target + 0.5){
1155                     /* update queue size and signal for next picture */
1156                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1157                         is->pictq_rindex = 0;
1158
1159                     SDL_LockMutex(is->pictq_mutex);
1160                     is->pictq_size--;
1161                     SDL_CondSignal(is->pictq_cond);
1162                     SDL_UnlockMutex(is->pictq_mutex);
1163                     goto retry;
1164                 }
1165             }
1166
1167             if(is->subtitle_st) {
1168                 if (is->subtitle_stream_changed) {
1169                     SDL_LockMutex(is->subpq_mutex);
1170
1171                     while (is->subpq_size) {
1172                         free_subpicture(&is->subpq[is->subpq_rindex]);
1173
1174                         /* update queue size and signal for next picture */
1175                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1176                             is->subpq_rindex = 0;
1177
1178                         is->subpq_size--;
1179                     }
1180                     is->subtitle_stream_changed = 0;
1181
1182                     SDL_CondSignal(is->subpq_cond);
1183                     SDL_UnlockMutex(is->subpq_mutex);
1184                 } else {
1185                     if (is->subpq_size > 0) {
1186                         sp = &is->subpq[is->subpq_rindex];
1187
1188                         if (is->subpq_size > 1)
1189                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1190                         else
1191                             sp2 = NULL;
1192
1193                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1194                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1195                         {
1196                             free_subpicture(sp);
1197
1198                             /* update queue size and signal for next picture */
1199                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1200                                 is->subpq_rindex = 0;
1201
1202                             SDL_LockMutex(is->subpq_mutex);
1203                             is->subpq_size--;
1204                             SDL_CondSignal(is->subpq_cond);
1205                             SDL_UnlockMutex(is->subpq_mutex);
1206                         }
1207                     }
1208                 }
1209             }
1210
1211             /* display picture */
1212             if (!display_disable)
1213                 video_display(is);
1214
1215             /* update queue size and signal for next picture */
1216             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1217                 is->pictq_rindex = 0;
1218
1219             SDL_LockMutex(is->pictq_mutex);
1220             is->pictq_size--;
1221             SDL_CondSignal(is->pictq_cond);
1222             SDL_UnlockMutex(is->pictq_mutex);
1223         }
1224     } else if (is->audio_st) {
1225         /* draw the next audio frame */
1226
1227         /* if only audio stream, then display the audio bars (better
1228            than nothing, just to test the implementation */
1229
1230         /* display picture */
1231         if (!display_disable)
1232             video_display(is);
1233     }
1234     if (show_status) {
1235         static int64_t last_time;
1236         int64_t cur_time;
1237         int aqsize, vqsize, sqsize;
1238         double av_diff;
1239
1240         cur_time = av_gettime();
1241         if (!last_time || (cur_time - last_time) >= 30000) {
1242             aqsize = 0;
1243             vqsize = 0;
1244             sqsize = 0;
1245             if (is->audio_st)
1246                 aqsize = is->audioq.size;
1247             if (is->video_st)
1248                 vqsize = is->videoq.size;
1249             if (is->subtitle_st)
1250                 sqsize = is->subtitleq.size;
1251             av_diff = 0;
1252             if (is->audio_st && is->video_st)
1253                 av_diff = get_audio_clock(is) - get_video_clock(is);
1254             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1255                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1256             fflush(stdout);
1257             last_time = cur_time;
1258         }
1259     }
1260 }
1261
1262 static void stream_close(VideoState *is)
1263 {
1264     VideoPicture *vp;
1265     int i;
1266     /* XXX: use a special url_shutdown call to abort parse cleanly */
1267     is->abort_request = 1;
1268     SDL_WaitThread(is->parse_tid, NULL);
1269     SDL_WaitThread(is->refresh_tid, NULL);
1270
1271     /* free all pictures */
1272     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1273         vp = &is->pictq[i];
1274 #if CONFIG_AVFILTER
1275         if (vp->picref) {
1276             avfilter_unref_buffer(vp->picref);
1277             vp->picref = NULL;
1278         }
1279 #endif
1280         if (vp->bmp) {
1281             SDL_FreeYUVOverlay(vp->bmp);
1282             vp->bmp = NULL;
1283         }
1284     }
1285     SDL_DestroyMutex(is->pictq_mutex);
1286     SDL_DestroyCond(is->pictq_cond);
1287     SDL_DestroyMutex(is->subpq_mutex);
1288     SDL_DestroyCond(is->subpq_cond);
1289 #if !CONFIG_AVFILTER
1290     if (is->img_convert_ctx)
1291         sws_freeContext(is->img_convert_ctx);
1292 #endif
1293     av_free(is);
1294 }
1295
1296 static void do_exit(void)
1297 {
1298     if (cur_stream) {
1299         stream_close(cur_stream);
1300         cur_stream = NULL;
1301     }
1302     uninit_opts();
1303 #if CONFIG_AVFILTER
1304     avfilter_uninit();
1305 #endif
1306     if (show_status)
1307         printf("\n");
1308     SDL_Quit();
1309     av_log(NULL, AV_LOG_QUIET, "");
1310     exit(0);
1311 }
1312
1313 /* allocate a picture (needs to do that in main thread to avoid
1314    potential locking problems */
1315 static void alloc_picture(void *opaque)
1316 {
1317     VideoState *is = opaque;
1318     VideoPicture *vp;
1319
1320     vp = &is->pictq[is->pictq_windex];
1321
1322     if (vp->bmp)
1323         SDL_FreeYUVOverlay(vp->bmp);
1324
1325 #if CONFIG_AVFILTER
1326     if (vp->picref)
1327         avfilter_unref_buffer(vp->picref);
1328     vp->picref = NULL;
1329
1330     vp->width   = is->out_video_filter->inputs[0]->w;
1331     vp->height  = is->out_video_filter->inputs[0]->h;
1332     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1333 #else
1334     vp->width   = is->video_st->codec->width;
1335     vp->height  = is->video_st->codec->height;
1336     vp->pix_fmt = is->video_st->codec->pix_fmt;
1337 #endif
1338
1339     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1340                                    SDL_YV12_OVERLAY,
1341                                    screen);
1342     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1343         /* SDL allocates a buffer smaller than requested if the video
1344          * overlay hardware is unable to support the requested size. */
1345         fprintf(stderr, "Error: the video system does not support an image\n"
1346                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1347                         "to reduce the image size.\n", vp->width, vp->height );
1348         do_exit();
1349     }
1350
1351     SDL_LockMutex(is->pictq_mutex);
1352     vp->allocated = 1;
1353     SDL_CondSignal(is->pictq_cond);
1354     SDL_UnlockMutex(is->pictq_mutex);
1355 }
1356
1357 /**
1358  *
1359  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1360  */
1361 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1362 {
1363     VideoPicture *vp;
1364     int dst_pix_fmt;
1365 #if CONFIG_AVFILTER
1366     AVPicture pict_src;
1367 #endif
1368     /* wait until we have space to put a new picture */
1369     SDL_LockMutex(is->pictq_mutex);
1370
1371     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1372         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1373
1374     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1375            !is->videoq.abort_request) {
1376         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1377     }
1378     SDL_UnlockMutex(is->pictq_mutex);
1379
1380     if (is->videoq.abort_request)
1381         return -1;
1382
1383     vp = &is->pictq[is->pictq_windex];
1384
1385     /* alloc or resize hardware picture buffer */
1386     if (!vp->bmp ||
1387 #if CONFIG_AVFILTER
1388         vp->width  != is->out_video_filter->inputs[0]->w ||
1389         vp->height != is->out_video_filter->inputs[0]->h) {
1390 #else
1391         vp->width != is->video_st->codec->width ||
1392         vp->height != is->video_st->codec->height) {
1393 #endif
1394         SDL_Event event;
1395
1396         vp->allocated = 0;
1397
1398         /* the allocation must be done in the main thread to avoid
1399            locking problems */
1400         event.type = FF_ALLOC_EVENT;
1401         event.user.data1 = is;
1402         SDL_PushEvent(&event);
1403
1404         /* wait until the picture is allocated */
1405         SDL_LockMutex(is->pictq_mutex);
1406         while (!vp->allocated && !is->videoq.abort_request) {
1407             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408         }
1409         SDL_UnlockMutex(is->pictq_mutex);
1410
1411         if (is->videoq.abort_request)
1412             return -1;
1413     }
1414
1415     /* if the frame is not skipped, then display it */
1416     if (vp->bmp) {
1417         AVPicture pict;
1418 #if CONFIG_AVFILTER
1419         if(vp->picref)
1420             avfilter_unref_buffer(vp->picref);
1421         vp->picref = src_frame->opaque;
1422 #endif
1423
1424         /* get a pointer on the bitmap */
1425         SDL_LockYUVOverlay (vp->bmp);
1426
1427         dst_pix_fmt = PIX_FMT_YUV420P;
1428         memset(&pict,0,sizeof(AVPicture));
1429         pict.data[0] = vp->bmp->pixels[0];
1430         pict.data[1] = vp->bmp->pixels[2];
1431         pict.data[2] = vp->bmp->pixels[1];
1432
1433         pict.linesize[0] = vp->bmp->pitches[0];
1434         pict.linesize[1] = vp->bmp->pitches[2];
1435         pict.linesize[2] = vp->bmp->pitches[1];
1436
1437 #if CONFIG_AVFILTER
1438         pict_src.data[0] = src_frame->data[0];
1439         pict_src.data[1] = src_frame->data[1];
1440         pict_src.data[2] = src_frame->data[2];
1441
1442         pict_src.linesize[0] = src_frame->linesize[0];
1443         pict_src.linesize[1] = src_frame->linesize[1];
1444         pict_src.linesize[2] = src_frame->linesize[2];
1445
1446         //FIXME use direct rendering
1447         av_picture_copy(&pict, &pict_src,
1448                         vp->pix_fmt, vp->width, vp->height);
1449 #else
1450         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1451         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1452             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1453             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1454         if (is->img_convert_ctx == NULL) {
1455             fprintf(stderr, "Cannot initialize the conversion context\n");
1456             exit(1);
1457         }
1458         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1459                   0, vp->height, pict.data, pict.linesize);
1460 #endif
1461         /* update the bitmap content */
1462         SDL_UnlockYUVOverlay(vp->bmp);
1463
1464         vp->pts = pts;
1465         vp->pos = pos;
1466
1467         /* now we can update the picture count */
1468         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1469             is->pictq_windex = 0;
1470         SDL_LockMutex(is->pictq_mutex);
1471         vp->target_clock= compute_target_time(vp->pts, is);
1472
1473         is->pictq_size++;
1474         SDL_UnlockMutex(is->pictq_mutex);
1475     }
1476     return 0;
1477 }
1478
1479 /**
1480  * compute the exact PTS for the picture if it is omitted in the stream
1481  * @param pts1 the dts of the pkt / pts of the frame
1482  */
1483 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1484 {
1485     double frame_delay, pts;
1486
1487     pts = pts1;
1488
1489     if (pts != 0) {
1490         /* update video clock with pts, if present */
1491         is->video_clock = pts;
1492     } else {
1493         pts = is->video_clock;
1494     }
1495     /* update video clock for next frame */
1496     frame_delay = av_q2d(is->video_st->codec->time_base);
1497     /* for MPEG2, the frame can be repeated, so we update the
1498        clock accordingly */
1499     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1500     is->video_clock += frame_delay;
1501
1502 #if defined(DEBUG_SYNC) && 0
1503     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1504            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1505 #endif
1506     return queue_picture(is, src_frame, pts, pos);
1507 }
1508
1509 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1510 {
1511     int len1, got_picture, i;
1512
1513     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1514         return -1;
1515
1516     if (pkt->data == flush_pkt.data) {
1517         avcodec_flush_buffers(is->video_st->codec);
1518
1519         SDL_LockMutex(is->pictq_mutex);
1520         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1521         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1522             is->pictq[i].target_clock= 0;
1523         }
1524         while (is->pictq_size && !is->videoq.abort_request) {
1525             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1526         }
1527         is->video_current_pos = -1;
1528         SDL_UnlockMutex(is->pictq_mutex);
1529
1530         init_pts_correction(&is->pts_ctx);
1531         is->frame_last_pts = AV_NOPTS_VALUE;
1532         is->frame_last_delay = 0;
1533         is->frame_timer = (double)av_gettime() / 1000000.0;
1534         is->skip_frames = 1;
1535         is->skip_frames_index = 0;
1536         return 0;
1537     }
1538
1539     len1 = avcodec_decode_video2(is->video_st->codec,
1540                                  frame, &got_picture,
1541                                  pkt);
1542
1543     if (got_picture) {
1544         if (decoder_reorder_pts == -1) {
1545             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1546         } else if (decoder_reorder_pts) {
1547             *pts = frame->pkt_pts;
1548         } else {
1549             *pts = frame->pkt_dts;
1550         }
1551
1552         if (*pts == AV_NOPTS_VALUE) {
1553             *pts = 0;
1554         }
1555
1556         is->skip_frames_index += 1;
1557         if(is->skip_frames_index >= is->skip_frames){
1558             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1559             return 1;
1560         }
1561
1562     }
1563     return 0;
1564 }
1565
1566 #if CONFIG_AVFILTER
1567 typedef struct {
1568     VideoState *is;
1569     AVFrame *frame;
1570     int use_dr1;
1571 } FilterPriv;
1572
1573 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1574 {
1575     AVFilterContext *ctx = codec->opaque;
1576     AVFilterBufferRef  *ref;
1577     int perms = AV_PERM_WRITE;
1578     int i, w, h, stride[4];
1579     unsigned edge;
1580
1581     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1582         perms |= AV_PERM_NEG_LINESIZES;
1583
1584     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1585         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1586         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1587         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1588     }
1589     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1590
1591     w = codec->width;
1592     h = codec->height;
1593     avcodec_align_dimensions2(codec, &w, &h, stride);
1594     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1595     w += edge << 1;
1596     h += edge << 1;
1597
1598     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1599         return -1;
1600
1601     ref->video->w = codec->width;
1602     ref->video->h = codec->height;
1603     for(i = 0; i < 4; i ++) {
1604         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1605         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1606
1607         if (ref->data[i]) {
1608             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1609         }
1610         pic->data[i]     = ref->data[i];
1611         pic->linesize[i] = ref->linesize[i];
1612     }
1613     pic->opaque = ref;
1614     pic->age    = INT_MAX;
1615     pic->type   = FF_BUFFER_TYPE_USER;
1616     pic->reordered_opaque = codec->reordered_opaque;
1617     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1618     else           pic->pkt_pts = AV_NOPTS_VALUE;
1619     return 0;
1620 }
1621
1622 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1623 {
1624     memset(pic->data, 0, sizeof(pic->data));
1625     avfilter_unref_buffer(pic->opaque);
1626 }
1627
1628 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1629 {
1630     AVFilterBufferRef *ref = pic->opaque;
1631
1632     if (pic->data[0] == NULL) {
1633         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1634         return codec->get_buffer(codec, pic);
1635     }
1636
1637     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1638         (codec->pix_fmt != ref->format)) {
1639         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1640         return -1;
1641     }
1642
1643     pic->reordered_opaque = codec->reordered_opaque;
1644     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1645     else           pic->pkt_pts = AV_NOPTS_VALUE;
1646     return 0;
1647 }
1648
1649 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     AVCodecContext *codec;
1653     if(!opaque) return -1;
1654
1655     priv->is = opaque;
1656     codec    = priv->is->video_st->codec;
1657     codec->opaque = ctx;
1658     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1659         priv->use_dr1 = 1;
1660         codec->get_buffer     = input_get_buffer;
1661         codec->release_buffer = input_release_buffer;
1662         codec->reget_buffer   = input_reget_buffer;
1663         codec->thread_safe_callbacks = 1;
1664     }
1665
1666     priv->frame = avcodec_alloc_frame();
1667
1668     return 0;
1669 }
1670
1671 static void input_uninit(AVFilterContext *ctx)
1672 {
1673     FilterPriv *priv = ctx->priv;
1674     av_free(priv->frame);
1675 }
1676
1677 static int input_request_frame(AVFilterLink *link)
1678 {
1679     FilterPriv *priv = link->src->priv;
1680     AVFilterBufferRef *picref;
1681     int64_t pts = 0;
1682     AVPacket pkt;
1683     int ret;
1684
1685     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1686         av_free_packet(&pkt);
1687     if (ret < 0)
1688         return -1;
1689
1690     if(priv->use_dr1) {
1691         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1692     } else {
1693         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1694         av_image_copy(picref->data, picref->linesize,
1695                       priv->frame->data, priv->frame->linesize,
1696                       picref->format, link->w, link->h);
1697     }
1698     av_free_packet(&pkt);
1699
1700     picref->pts = pts;
1701     picref->pos = pkt.pos;
1702     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1703     avfilter_start_frame(link, picref);
1704     avfilter_draw_slice(link, 0, link->h, 1);
1705     avfilter_end_frame(link);
1706
1707     return 0;
1708 }
1709
1710 static int input_query_formats(AVFilterContext *ctx)
1711 {
1712     FilterPriv *priv = ctx->priv;
1713     enum PixelFormat pix_fmts[] = {
1714         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1715     };
1716
1717     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1718     return 0;
1719 }
1720
1721 static int input_config_props(AVFilterLink *link)
1722 {
1723     FilterPriv *priv  = link->src->priv;
1724     AVCodecContext *c = priv->is->video_st->codec;
1725
1726     link->w = c->width;
1727     link->h = c->height;
1728     link->time_base = priv->is->video_st->time_base;
1729
1730     return 0;
1731 }
1732
1733 static AVFilter input_filter =
1734 {
1735     .name      = "ffplay_input",
1736
1737     .priv_size = sizeof(FilterPriv),
1738
1739     .init      = input_init,
1740     .uninit    = input_uninit,
1741
1742     .query_formats = input_query_formats,
1743
1744     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1745     .outputs   = (AVFilterPad[]) {{ .name = "default",
1746                                     .type = AVMEDIA_TYPE_VIDEO,
1747                                     .request_frame = input_request_frame,
1748                                     .config_props  = input_config_props, },
1749                                   { .name = NULL }},
1750 };
1751
1752 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1753 {
1754     char sws_flags_str[128];
1755     int ret;
1756     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1757     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1758     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1759     graph->scale_sws_opts = av_strdup(sws_flags_str);
1760
1761     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1762                                             NULL, is, graph)) < 0)
1763         goto the_end;
1764     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1765                                             NULL, &ffsink_ctx, graph)) < 0)
1766         goto the_end;
1767
1768     if(vfilters) {
1769         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1770         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1771
1772         outputs->name    = av_strdup("in");
1773         outputs->filter_ctx = filt_src;
1774         outputs->pad_idx = 0;
1775         outputs->next    = NULL;
1776
1777         inputs->name    = av_strdup("out");
1778         inputs->filter_ctx = filt_out;
1779         inputs->pad_idx = 0;
1780         inputs->next    = NULL;
1781
1782         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1783             goto the_end;
1784         av_freep(&vfilters);
1785     } else {
1786         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1787             goto the_end;
1788     }
1789
1790     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1791         goto the_end;
1792
1793     is->out_video_filter = filt_out;
1794 the_end:
1795     return ret;
1796 }
1797
1798 #endif  /* CONFIG_AVFILTER */
1799
1800 static int video_thread(void *arg)
1801 {
1802     VideoState *is = arg;
1803     AVFrame *frame= avcodec_alloc_frame();
1804     int64_t pts_int;
1805     double pts;
1806     int ret;
1807
1808 #if CONFIG_AVFILTER
1809     AVFilterGraph *graph = avfilter_graph_alloc();
1810     AVFilterContext *filt_out = NULL;
1811     int64_t pos;
1812
1813     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1814         goto the_end;
1815     filt_out = is->out_video_filter;
1816 #endif
1817
1818     for(;;) {
1819 #if !CONFIG_AVFILTER
1820         AVPacket pkt;
1821 #else
1822         AVFilterBufferRef *picref;
1823         AVRational tb;
1824 #endif
1825         while (is->paused && !is->videoq.abort_request)
1826             SDL_Delay(10);
1827 #if CONFIG_AVFILTER
1828         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1829         if (picref) {
1830             pts_int = picref->pts;
1831             pos     = picref->pos;
1832             frame->opaque = picref;
1833         }
1834
1835         if (av_cmp_q(tb, is->video_st->time_base)) {
1836             av_unused int64_t pts1 = pts_int;
1837             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1838             av_dlog(NULL, "video_thread(): "
1839                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1840                     tb.num, tb.den, pts1,
1841                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1842         }
1843 #else
1844         ret = get_video_frame(is, frame, &pts_int, &pkt);
1845 #endif
1846
1847         if (ret < 0) goto the_end;
1848
1849         if (!ret)
1850             continue;
1851
1852         pts = pts_int*av_q2d(is->video_st->time_base);
1853
1854 #if CONFIG_AVFILTER
1855         ret = output_picture2(is, frame, pts, pos);
1856 #else
1857         ret = output_picture2(is, frame, pts,  pkt.pos);
1858         av_free_packet(&pkt);
1859 #endif
1860         if (ret < 0)
1861             goto the_end;
1862
1863         if (step)
1864             if (cur_stream)
1865                 stream_pause(cur_stream);
1866     }
1867  the_end:
1868 #if CONFIG_AVFILTER
1869     avfilter_graph_free(&graph);
1870 #endif
1871     av_free(frame);
1872     return 0;
1873 }
1874
1875 static int subtitle_thread(void *arg)
1876 {
1877     VideoState *is = arg;
1878     SubPicture *sp;
1879     AVPacket pkt1, *pkt = &pkt1;
1880     int len1, got_subtitle;
1881     double pts;
1882     int i, j;
1883     int r, g, b, y, u, v, a;
1884
1885     for(;;) {
1886         while (is->paused && !is->subtitleq.abort_request) {
1887             SDL_Delay(10);
1888         }
1889         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1890             break;
1891
1892         if(pkt->data == flush_pkt.data){
1893             avcodec_flush_buffers(is->subtitle_st->codec);
1894             continue;
1895         }
1896         SDL_LockMutex(is->subpq_mutex);
1897         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1898                !is->subtitleq.abort_request) {
1899             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1900         }
1901         SDL_UnlockMutex(is->subpq_mutex);
1902
1903         if (is->subtitleq.abort_request)
1904             goto the_end;
1905
1906         sp = &is->subpq[is->subpq_windex];
1907
1908        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1909            this packet, if any */
1910         pts = 0;
1911         if (pkt->pts != AV_NOPTS_VALUE)
1912             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1913
1914         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1915                                     &sp->sub, &got_subtitle,
1916                                     pkt);
1917 //            if (len1 < 0)
1918 //                break;
1919         if (got_subtitle && sp->sub.format == 0) {
1920             sp->pts = pts;
1921
1922             for (i = 0; i < sp->sub.num_rects; i++)
1923             {
1924                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1925                 {
1926                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1927                     y = RGB_TO_Y_CCIR(r, g, b);
1928                     u = RGB_TO_U_CCIR(r, g, b, 0);
1929                     v = RGB_TO_V_CCIR(r, g, b, 0);
1930                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1931                 }
1932             }
1933
1934             /* now we can update the picture count */
1935             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1936                 is->subpq_windex = 0;
1937             SDL_LockMutex(is->subpq_mutex);
1938             is->subpq_size++;
1939             SDL_UnlockMutex(is->subpq_mutex);
1940         }
1941         av_free_packet(pkt);
1942 //        if (step)
1943 //            if (cur_stream)
1944 //                stream_pause(cur_stream);
1945     }
1946  the_end:
1947     return 0;
1948 }
1949
1950 /* copy samples for viewing in editor window */
1951 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1952 {
1953     int size, len, channels;
1954
1955     channels = is->audio_st->codec->channels;
1956
1957     size = samples_size / sizeof(short);
1958     while (size > 0) {
1959         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1960         if (len > size)
1961             len = size;
1962         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1963         samples += len;
1964         is->sample_array_index += len;
1965         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1966             is->sample_array_index = 0;
1967         size -= len;
1968     }
1969 }
1970
1971 /* return the new audio buffer size (samples can be added or deleted
1972    to get better sync if video or external master clock) */
1973 static int synchronize_audio(VideoState *is, short *samples,
1974                              int samples_size1, double pts)
1975 {
1976     int n, samples_size;
1977     double ref_clock;
1978
1979     n = 2 * is->audio_st->codec->channels;
1980     samples_size = samples_size1;
1981
1982     /* if not master, then we try to remove or add samples to correct the clock */
1983     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1984          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1985         double diff, avg_diff;
1986         int wanted_size, min_size, max_size, nb_samples;
1987
1988         ref_clock = get_master_clock(is);
1989         diff = get_audio_clock(is) - ref_clock;
1990
1991         if (diff < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2002                     nb_samples = samples_size / n;
2003
2004                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2005                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2006                     if (wanted_size < min_size)
2007                         wanted_size = min_size;
2008                     else if (wanted_size > max_size)
2009                         wanted_size = max_size;
2010
2011                     /* add or remove samples to correction the synchro */
2012                     if (wanted_size < samples_size) {
2013                         /* remove samples */
2014                         samples_size = wanted_size;
2015                     } else if (wanted_size > samples_size) {
2016                         uint8_t *samples_end, *q;
2017                         int nb;
2018
2019                         /* add samples */
2020                         nb = (samples_size - wanted_size);
2021                         samples_end = (uint8_t *)samples + samples_size - n;
2022                         q = samples_end + n;
2023                         while (nb > 0) {
2024                             memcpy(q, samples_end, n);
2025                             q += n;
2026                             nb -= n;
2027                         }
2028                         samples_size = wanted_size;
2029                     }
2030                 }
2031                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2032                         diff, avg_diff, samples_size - samples_size1,
2033                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
2034             }
2035         } else {
2036             /* too big difference : may be initial PTS errors, so
2037                reset A-V filter */
2038             is->audio_diff_avg_count = 0;
2039             is->audio_diff_cum = 0;
2040         }
2041     }
2042
2043     return samples_size;
2044 }
2045
2046 /* decode one audio frame and returns its uncompressed size */
2047 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2048 {
2049     AVPacket *pkt_temp = &is->audio_pkt_temp;
2050     AVPacket *pkt = &is->audio_pkt;
2051     AVCodecContext *dec= is->audio_st->codec;
2052     int n, len1, data_size;
2053     double pts;
2054
2055     for(;;) {
2056         /* NOTE: the audio packet can contain several frames */
2057         while (pkt_temp->size > 0) {
2058             data_size = sizeof(is->audio_buf1);
2059             len1 = avcodec_decode_audio3(dec,
2060                                         (int16_t *)is->audio_buf1, &data_size,
2061                                         pkt_temp);
2062             if (len1 < 0) {
2063                 /* if error, we skip the frame */
2064                 pkt_temp->size = 0;
2065                 break;
2066             }
2067
2068             pkt_temp->data += len1;
2069             pkt_temp->size -= len1;
2070             if (data_size <= 0)
2071                 continue;
2072
2073             if (dec->sample_fmt != is->audio_src_fmt) {
2074                 if (is->reformat_ctx)
2075                     av_audio_convert_free(is->reformat_ctx);
2076                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2077                                                          dec->sample_fmt, 1, NULL, 0);
2078                 if (!is->reformat_ctx) {
2079                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2080                         av_get_sample_fmt_name(dec->sample_fmt),
2081                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2082                         break;
2083                 }
2084                 is->audio_src_fmt= dec->sample_fmt;
2085             }
2086
2087             if (is->reformat_ctx) {
2088                 const void *ibuf[6]= {is->audio_buf1};
2089                 void *obuf[6]= {is->audio_buf2};
2090                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2091                 int ostride[6]= {2};
2092                 int len= data_size/istride[0];
2093                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2094                     printf("av_audio_convert() failed\n");
2095                     break;
2096                 }
2097                 is->audio_buf= is->audio_buf2;
2098                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2099                           remove this legacy cruft */
2100                 data_size= len*2;
2101             }else{
2102                 is->audio_buf= is->audio_buf1;
2103             }
2104
2105             /* if no pts, then compute it */
2106             pts = is->audio_clock;
2107             *pts_ptr = pts;
2108             n = 2 * dec->channels;
2109             is->audio_clock += (double)data_size /
2110                 (double)(n * dec->sample_rate);
2111 #if defined(DEBUG_SYNC)
2112             {
2113                 static double last_clock;
2114                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2115                        is->audio_clock - last_clock,
2116                        is->audio_clock, pts);
2117                 last_clock = is->audio_clock;
2118             }
2119 #endif
2120             return data_size;
2121         }
2122
2123         /* free the current packet */
2124         if (pkt->data)
2125             av_free_packet(pkt);
2126
2127         if (is->paused || is->audioq.abort_request) {
2128             return -1;
2129         }
2130
2131         /* read next packet */
2132         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2133             return -1;
2134         if(pkt->data == flush_pkt.data){
2135             avcodec_flush_buffers(dec);
2136             continue;
2137         }
2138
2139         pkt_temp->data = pkt->data;
2140         pkt_temp->size = pkt->size;
2141
2142         /* if update the audio clock with the pts */
2143         if (pkt->pts != AV_NOPTS_VALUE) {
2144             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2145         }
2146     }
2147 }
2148
2149 /* get the current audio output buffer size, in samples. With SDL, we
2150    cannot have a precise information */
2151 static int audio_write_get_buf_size(VideoState *is)
2152 {
2153     return is->audio_buf_size - is->audio_buf_index;
2154 }
2155
2156
2157 /* prepare a new audio buffer */
2158 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2159 {
2160     VideoState *is = opaque;
2161     int audio_size, len1;
2162     double pts;
2163
2164     audio_callback_time = av_gettime();
2165
2166     while (len > 0) {
2167         if (is->audio_buf_index >= is->audio_buf_size) {
2168            audio_size = audio_decode_frame(is, &pts);
2169            if (audio_size < 0) {
2170                 /* if error, just output silence */
2171                is->audio_buf = is->audio_buf1;
2172                is->audio_buf_size = 1024;
2173                memset(is->audio_buf, 0, is->audio_buf_size);
2174            } else {
2175                if (is->show_audio)
2176                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2177                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2178                                               pts);
2179                is->audio_buf_size = audio_size;
2180            }
2181            is->audio_buf_index = 0;
2182         }
2183         len1 = is->audio_buf_size - is->audio_buf_index;
2184         if (len1 > len)
2185             len1 = len;
2186         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2187         len -= len1;
2188         stream += len1;
2189         is->audio_buf_index += len1;
2190     }
2191 }
2192
2193 /* open a given stream. Return 0 if OK */
2194 static int stream_component_open(VideoState *is, int stream_index)
2195 {
2196     AVFormatContext *ic = is->ic;
2197     AVCodecContext *avctx;
2198     AVCodec *codec;
2199     SDL_AudioSpec wanted_spec, spec;
2200
2201     if (stream_index < 0 || stream_index >= ic->nb_streams)
2202         return -1;
2203     avctx = ic->streams[stream_index]->codec;
2204
2205     /* prepare audio output */
2206     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2207         if (avctx->channels > 0) {
2208             avctx->request_channels = FFMIN(2, avctx->channels);
2209         } else {
2210             avctx->request_channels = 2;
2211         }
2212     }
2213
2214     codec = avcodec_find_decoder(avctx->codec_id);
2215     avctx->debug_mv = debug_mv;
2216     avctx->debug = debug;
2217     avctx->workaround_bugs = workaround_bugs;
2218     avctx->lowres = lowres;
2219     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2220     avctx->idct_algo= idct;
2221     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2222     avctx->skip_frame= skip_frame;
2223     avctx->skip_idct= skip_idct;
2224     avctx->skip_loop_filter= skip_loop_filter;
2225     avctx->error_recognition= error_recognition;
2226     avctx->error_concealment= error_concealment;
2227     avctx->thread_count= thread_count;
2228
2229     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2230
2231     if (!codec ||
2232         avcodec_open(avctx, codec) < 0)
2233         return -1;
2234
2235     /* prepare audio output */
2236     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2237         wanted_spec.freq = avctx->sample_rate;
2238         wanted_spec.format = AUDIO_S16SYS;
2239         wanted_spec.channels = avctx->channels;
2240         wanted_spec.silence = 0;
2241         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2242         wanted_spec.callback = sdl_audio_callback;
2243         wanted_spec.userdata = is;
2244         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2245             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2246             return -1;
2247         }
2248         is->audio_hw_buf_size = spec.size;
2249         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2250     }
2251
2252     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2253     switch(avctx->codec_type) {
2254     case AVMEDIA_TYPE_AUDIO:
2255         is->audio_stream = stream_index;
2256         is->audio_st = ic->streams[stream_index];
2257         is->audio_buf_size = 0;
2258         is->audio_buf_index = 0;
2259
2260         /* init averaging filter */
2261         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2262         is->audio_diff_avg_count = 0;
2263         /* since we do not have a precise anough audio fifo fullness,
2264            we correct audio sync only if larger than this threshold */
2265         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2266
2267         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2268         packet_queue_init(&is->audioq);
2269         SDL_PauseAudio(0);
2270         break;
2271     case AVMEDIA_TYPE_VIDEO:
2272         is->video_stream = stream_index;
2273         is->video_st = ic->streams[stream_index];
2274
2275 //        is->video_current_pts_time = av_gettime();
2276
2277         packet_queue_init(&is->videoq);
2278         is->video_tid = SDL_CreateThread(video_thread, is);
2279         break;
2280     case AVMEDIA_TYPE_SUBTITLE:
2281         is->subtitle_stream = stream_index;
2282         is->subtitle_st = ic->streams[stream_index];
2283         packet_queue_init(&is->subtitleq);
2284
2285         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2286         break;
2287     default:
2288         break;
2289     }
2290     return 0;
2291 }
2292
2293 static void stream_component_close(VideoState *is, int stream_index)
2294 {
2295     AVFormatContext *ic = is->ic;
2296     AVCodecContext *avctx;
2297
2298     if (stream_index < 0 || stream_index >= ic->nb_streams)
2299         return;
2300     avctx = ic->streams[stream_index]->codec;
2301
2302     switch(avctx->codec_type) {
2303     case AVMEDIA_TYPE_AUDIO:
2304         packet_queue_abort(&is->audioq);
2305
2306         SDL_CloseAudio();
2307
2308         packet_queue_end(&is->audioq);
2309         if (is->reformat_ctx)
2310             av_audio_convert_free(is->reformat_ctx);
2311         is->reformat_ctx = NULL;
2312         break;
2313     case AVMEDIA_TYPE_VIDEO:
2314         packet_queue_abort(&is->videoq);
2315
2316         /* note: we also signal this mutex to make sure we deblock the
2317            video thread in all cases */
2318         SDL_LockMutex(is->pictq_mutex);
2319         SDL_CondSignal(is->pictq_cond);
2320         SDL_UnlockMutex(is->pictq_mutex);
2321
2322         SDL_WaitThread(is->video_tid, NULL);
2323
2324         packet_queue_end(&is->videoq);
2325         break;
2326     case AVMEDIA_TYPE_SUBTITLE:
2327         packet_queue_abort(&is->subtitleq);
2328
2329         /* note: we also signal this mutex to make sure we deblock the
2330            video thread in all cases */
2331         SDL_LockMutex(is->subpq_mutex);
2332         is->subtitle_stream_changed = 1;
2333
2334         SDL_CondSignal(is->subpq_cond);
2335         SDL_UnlockMutex(is->subpq_mutex);
2336
2337         SDL_WaitThread(is->subtitle_tid, NULL);
2338
2339         packet_queue_end(&is->subtitleq);
2340         break;
2341     default:
2342         break;
2343     }
2344
2345     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2346     avcodec_close(avctx);
2347     switch(avctx->codec_type) {
2348     case AVMEDIA_TYPE_AUDIO:
2349         is->audio_st = NULL;
2350         is->audio_stream = -1;
2351         break;
2352     case AVMEDIA_TYPE_VIDEO:
2353         is->video_st = NULL;
2354         is->video_stream = -1;
2355         break;
2356     case AVMEDIA_TYPE_SUBTITLE:
2357         is->subtitle_st = NULL;
2358         is->subtitle_stream = -1;
2359         break;
2360     default:
2361         break;
2362     }
2363 }
2364
2365 /* since we have only one decoding thread, we can use a global
2366    variable instead of a thread local variable */
2367 static VideoState *global_video_state;
2368
2369 static int decode_interrupt_cb(void)
2370 {
2371     return (global_video_state && global_video_state->abort_request);
2372 }
2373
2374 /* this thread gets the stream from the disk or the network */
2375 static int decode_thread(void *arg)
2376 {
2377     VideoState *is = arg;
2378     AVFormatContext *ic;
2379     int err, i, ret;
2380     int st_index[AVMEDIA_TYPE_NB];
2381     AVPacket pkt1, *pkt = &pkt1;
2382     AVFormatParameters params, *ap = &params;
2383     int eof=0;
2384     int pkt_in_play_range = 0;
2385
2386     ic = avformat_alloc_context();
2387
2388     memset(st_index, -1, sizeof(st_index));
2389     is->video_stream = -1;
2390     is->audio_stream = -1;
2391     is->subtitle_stream = -1;
2392
2393     global_video_state = is;
2394     avio_set_interrupt_cb(decode_interrupt_cb);
2395
2396     memset(ap, 0, sizeof(*ap));
2397
2398     ap->prealloced_context = 1;
2399     ap->width = frame_width;
2400     ap->height= frame_height;
2401     ap->time_base= (AVRational){1, 25};
2402     ap->pix_fmt = frame_pix_fmt;
2403
2404     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2405
2406     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2407     if (err < 0) {
2408         print_error(is->filename, err);
2409         ret = -1;
2410         goto fail;
2411     }
2412     is->ic = ic;
2413
2414     if(genpts)
2415         ic->flags |= AVFMT_FLAG_GENPTS;
2416
2417     err = av_find_stream_info(ic);
2418     if (err < 0) {
2419         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2420         ret = -1;
2421         goto fail;
2422     }
2423     if(ic->pb)
2424         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2425
2426     if(seek_by_bytes<0)
2427         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2428
2429     /* if seeking requested, we execute it */
2430     if (start_time != AV_NOPTS_VALUE) {
2431         int64_t timestamp;
2432
2433         timestamp = start_time;
2434         /* add the stream start time */
2435         if (ic->start_time != AV_NOPTS_VALUE)
2436             timestamp += ic->start_time;
2437         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2438         if (ret < 0) {
2439             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2440                     is->filename, (double)timestamp / AV_TIME_BASE);
2441         }
2442     }
2443
2444     for (i = 0; i < ic->nb_streams; i++)
2445         ic->streams[i]->discard = AVDISCARD_ALL;
2446     if (!video_disable)
2447         st_index[AVMEDIA_TYPE_VIDEO] =
2448             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2449                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2450     if (!audio_disable)
2451         st_index[AVMEDIA_TYPE_AUDIO] =
2452             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2453                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2454                                 st_index[AVMEDIA_TYPE_VIDEO],
2455                                 NULL, 0);
2456     if (!video_disable)
2457         st_index[AVMEDIA_TYPE_SUBTITLE] =
2458             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2459                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2460                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2461                                  st_index[AVMEDIA_TYPE_AUDIO] :
2462                                  st_index[AVMEDIA_TYPE_VIDEO]),
2463                                 NULL, 0);
2464     if (show_status) {
2465         av_dump_format(ic, 0, is->filename, 0);
2466     }
2467
2468     /* open the streams */
2469     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2470         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2471     }
2472
2473     ret=-1;
2474     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2475         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2476     }
2477     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2478     if(ret<0) {
2479         if (!display_disable)
2480             is->show_audio = 2;
2481     }
2482
2483     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2484         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2485     }
2486
2487     if (is->video_stream < 0 && is->audio_stream < 0) {
2488         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2489         ret = -1;
2490         goto fail;
2491     }
2492
2493     for(;;) {
2494         if (is->abort_request)
2495             break;
2496         if (is->paused != is->last_paused) {
2497             is->last_paused = is->paused;
2498             if (is->paused)
2499                 is->read_pause_return= av_read_pause(ic);
2500             else
2501                 av_read_play(ic);
2502         }
2503 #if CONFIG_RTSP_DEMUXER
2504         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2505             /* wait 10 ms to avoid trying to get another packet */
2506             /* XXX: horrible */
2507             SDL_Delay(10);
2508             continue;
2509         }
2510 #endif
2511         if (is->seek_req) {
2512             int64_t seek_target= is->seek_pos;
2513             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2514             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2515 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2516 //      of the seek_pos/seek_rel variables
2517
2518             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2519             if (ret < 0) {
2520                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2521             }else{
2522                 if (is->audio_stream >= 0) {
2523                     packet_queue_flush(&is->audioq);
2524                     packet_queue_put(&is->audioq, &flush_pkt);
2525                 }
2526                 if (is->subtitle_stream >= 0) {
2527                     packet_queue_flush(&is->subtitleq);
2528                     packet_queue_put(&is->subtitleq, &flush_pkt);
2529                 }
2530                 if (is->video_stream >= 0) {
2531                     packet_queue_flush(&is->videoq);
2532                     packet_queue_put(&is->videoq, &flush_pkt);
2533                 }
2534             }
2535             is->seek_req = 0;
2536             eof= 0;
2537         }
2538
2539         /* if the queue are full, no need to read more */
2540         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2541             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2542                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2543                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2544             /* wait 10 ms */
2545             SDL_Delay(10);
2546             continue;
2547         }
2548         if(eof) {
2549             if(is->video_stream >= 0){
2550                 av_init_packet(pkt);
2551                 pkt->data=NULL;
2552                 pkt->size=0;
2553                 pkt->stream_index= is->video_stream;
2554                 packet_queue_put(&is->videoq, pkt);
2555             }
2556             SDL_Delay(10);
2557             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2558                 if(loop!=1 && (!loop || --loop)){
2559                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2560                 }else if(autoexit){
2561                     ret=AVERROR_EOF;
2562                     goto fail;
2563                 }
2564             }
2565             continue;
2566         }
2567         ret = av_read_frame(ic, pkt);
2568         if (ret < 0) {
2569             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2570                 eof=1;
2571             if (ic->pb && ic->pb->error)
2572                 break;
2573             SDL_Delay(100); /* wait for user event */
2574             continue;
2575         }
2576         /* check if packet is in play range specified by user, then queue, otherwise discard */
2577         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2578                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2579                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2580                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2581                 <= ((double)duration/1000000);
2582         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2583             packet_queue_put(&is->audioq, pkt);
2584         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->videoq, pkt);
2586         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->subtitleq, pkt);
2588         } else {
2589             av_free_packet(pkt);
2590         }
2591     }
2592     /* wait until the end */
2593     while (!is->abort_request) {
2594         SDL_Delay(100);
2595     }
2596
2597     ret = 0;
2598  fail:
2599     /* disable interrupting */
2600     global_video_state = NULL;
2601
2602     /* close each stream */
2603     if (is->audio_stream >= 0)
2604         stream_component_close(is, is->audio_stream);
2605     if (is->video_stream >= 0)
2606         stream_component_close(is, is->video_stream);
2607     if (is->subtitle_stream >= 0)
2608         stream_component_close(is, is->subtitle_stream);
2609     if (is->ic) {
2610         av_close_input_file(is->ic);
2611         is->ic = NULL; /* safety */
2612     }
2613     avio_set_interrupt_cb(NULL);
2614
2615     if (ret != 0) {
2616         SDL_Event event;
2617
2618         event.type = FF_QUIT_EVENT;
2619         event.user.data1 = is;
2620         SDL_PushEvent(&event);
2621     }
2622     return 0;
2623 }
2624
2625 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2626 {
2627     VideoState *is;
2628
2629     is = av_mallocz(sizeof(VideoState));
2630     if (!is)
2631         return NULL;
2632     av_strlcpy(is->filename, filename, sizeof(is->filename));
2633     is->iformat = iformat;
2634     is->ytop = 0;
2635     is->xleft = 0;
2636
2637     /* start video display */
2638     is->pictq_mutex = SDL_CreateMutex();
2639     is->pictq_cond = SDL_CreateCond();
2640
2641     is->subpq_mutex = SDL_CreateMutex();
2642     is->subpq_cond = SDL_CreateCond();
2643
2644     is->av_sync_type = av_sync_type;
2645     is->parse_tid = SDL_CreateThread(decode_thread, is);
2646     if (!is->parse_tid) {
2647         av_free(is);
2648         return NULL;
2649     }
2650     return is;
2651 }
2652
2653 static void stream_cycle_channel(VideoState *is, int codec_type)
2654 {
2655     AVFormatContext *ic = is->ic;
2656     int start_index, stream_index;
2657     AVStream *st;
2658
2659     if (codec_type == AVMEDIA_TYPE_VIDEO)
2660         start_index = is->video_stream;
2661     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2662         start_index = is->audio_stream;
2663     else
2664         start_index = is->subtitle_stream;
2665     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2666         return;
2667     stream_index = start_index;
2668     for(;;) {
2669         if (++stream_index >= is->ic->nb_streams)
2670         {
2671             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2672             {
2673                 stream_index = -1;
2674                 goto the_end;
2675             } else
2676                 stream_index = 0;
2677         }
2678         if (stream_index == start_index)
2679             return;
2680         st = ic->streams[stream_index];
2681         if (st->codec->codec_type == codec_type) {
2682             /* check that parameters are OK */
2683             switch(codec_type) {
2684             case AVMEDIA_TYPE_AUDIO:
2685                 if (st->codec->sample_rate != 0 &&
2686                     st->codec->channels != 0)
2687                     goto the_end;
2688                 break;
2689             case AVMEDIA_TYPE_VIDEO:
2690             case AVMEDIA_TYPE_SUBTITLE:
2691                 goto the_end;
2692             default:
2693                 break;
2694             }
2695         }
2696     }
2697  the_end:
2698     stream_component_close(is, start_index);
2699     stream_component_open(is, stream_index);
2700 }
2701
2702
2703 static void toggle_full_screen(void)
2704 {
2705     is_full_screen = !is_full_screen;
2706     if (!fs_screen_width) {
2707         /* use default SDL method */
2708 //        SDL_WM_ToggleFullScreen(screen);
2709     }
2710     video_open(cur_stream);
2711 }
2712
2713 static void toggle_pause(void)
2714 {
2715     if (cur_stream)
2716         stream_pause(cur_stream);
2717     step = 0;
2718 }
2719
2720 static void step_to_next_frame(void)
2721 {
2722     if (cur_stream) {
2723         /* if the stream is paused unpause it, then step */
2724         if (cur_stream->paused)
2725             stream_pause(cur_stream);
2726     }
2727     step = 1;
2728 }
2729
2730 static void toggle_audio_display(void)
2731 {
2732     if (cur_stream) {
2733         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2734         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2735         fill_rectangle(screen,
2736                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2737                     bgcolor);
2738         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2739     }
2740 }
2741
2742 /* handle an event sent by the GUI */
2743 static void event_loop(void)
2744 {
2745     SDL_Event event;
2746     double incr, pos, frac;
2747
2748     for(;;) {
2749         double x;
2750         SDL_WaitEvent(&event);
2751         switch(event.type) {
2752         case SDL_KEYDOWN:
2753             if (exit_on_keydown) {
2754                 do_exit();
2755                 break;
2756             }
2757             switch(event.key.keysym.sym) {
2758             case SDLK_ESCAPE:
2759             case SDLK_q:
2760                 do_exit();
2761                 break;
2762             case SDLK_f:
2763                 toggle_full_screen();
2764                 break;
2765             case SDLK_p:
2766             case SDLK_SPACE:
2767                 toggle_pause();
2768                 break;
2769             case SDLK_s: //S: Step to next frame
2770                 step_to_next_frame();
2771                 break;
2772             case SDLK_a:
2773                 if (cur_stream)
2774                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2775                 break;
2776             case SDLK_v:
2777                 if (cur_stream)
2778                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2779                 break;
2780             case SDLK_t:
2781                 if (cur_stream)
2782                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2783                 break;
2784             case SDLK_w:
2785                 toggle_audio_display();
2786                 break;
2787             case SDLK_LEFT:
2788                 incr = -10.0;
2789                 goto do_seek;
2790             case SDLK_RIGHT:
2791                 incr = 10.0;
2792                 goto do_seek;
2793             case SDLK_UP:
2794                 incr = 60.0;
2795                 goto do_seek;
2796             case SDLK_DOWN:
2797                 incr = -60.0;
2798             do_seek:
2799                 if (cur_stream) {
2800                     if (seek_by_bytes) {
2801                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2802                             pos= cur_stream->video_current_pos;
2803                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2804                             pos= cur_stream->audio_pkt.pos;
2805                         }else
2806                             pos = avio_tell(cur_stream->ic->pb);
2807                         if (cur_stream->ic->bit_rate)
2808                             incr *= cur_stream->ic->bit_rate / 8.0;
2809                         else
2810                             incr *= 180000.0;
2811                         pos += incr;
2812                         stream_seek(cur_stream, pos, incr, 1);
2813                     } else {
2814                         pos = get_master_clock(cur_stream);
2815                         pos += incr;
2816                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2817                     }
2818                 }
2819                 break;
2820             default:
2821                 break;
2822             }
2823             break;
2824         case SDL_MOUSEBUTTONDOWN:
2825             if (exit_on_mousedown) {
2826                 do_exit();
2827                 break;
2828             }
2829         case SDL_MOUSEMOTION:
2830             if(event.type ==SDL_MOUSEBUTTONDOWN){
2831                 x= event.button.x;
2832             }else{
2833                 if(event.motion.state != SDL_PRESSED)
2834                     break;
2835                 x= event.motion.x;
2836             }
2837             if (cur_stream) {
2838                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2839                     uint64_t size=  avio_size(cur_stream->ic->pb);
2840                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2841                 }else{
2842                     int64_t ts;
2843                     int ns, hh, mm, ss;
2844                     int tns, thh, tmm, tss;
2845                     tns = cur_stream->ic->duration/1000000LL;
2846                     thh = tns/3600;
2847                     tmm = (tns%3600)/60;
2848                     tss = (tns%60);
2849                     frac = x/cur_stream->width;
2850                     ns = frac*tns;
2851                     hh = ns/3600;
2852                     mm = (ns%3600)/60;
2853                     ss = (ns%60);
2854                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2855                             hh, mm, ss, thh, tmm, tss);
2856                     ts = frac*cur_stream->ic->duration;
2857                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2858                         ts += cur_stream->ic->start_time;
2859                     stream_seek(cur_stream, ts, 0, 0);
2860                 }
2861             }
2862             break;
2863         case SDL_VIDEORESIZE:
2864             if (cur_stream) {
2865                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2866                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2867                 screen_width = cur_stream->width = event.resize.w;
2868                 screen_height= cur_stream->height= event.resize.h;
2869             }
2870             break;
2871         case SDL_QUIT:
2872         case FF_QUIT_EVENT:
2873             do_exit();
2874             break;
2875         case FF_ALLOC_EVENT:
2876             video_open(event.user.data1);
2877             alloc_picture(event.user.data1);
2878             break;
2879         case FF_REFRESH_EVENT:
2880             video_refresh_timer(event.user.data1);
2881             cur_stream->refresh=0;
2882             break;
2883         default:
2884             break;
2885         }
2886     }
2887 }
2888
2889 static void opt_frame_size(const char *arg)
2890 {
2891     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2892         fprintf(stderr, "Incorrect frame size\n");
2893         exit(1);
2894     }
2895     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2896         fprintf(stderr, "Frame size must be a multiple of 2\n");
2897         exit(1);
2898     }
2899 }
2900
2901 static int opt_width(const char *opt, const char *arg)
2902 {
2903     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2904     return 0;
2905 }
2906
2907 static int opt_height(const char *opt, const char *arg)
2908 {
2909     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2910     return 0;
2911 }
2912
2913 static void opt_format(const char *arg)
2914 {
2915     file_iformat = av_find_input_format(arg);
2916     if (!file_iformat) {
2917         fprintf(stderr, "Unknown input format: %s\n", arg);
2918         exit(1);
2919     }
2920 }
2921
2922 static void opt_frame_pix_fmt(const char *arg)
2923 {
2924     frame_pix_fmt = av_get_pix_fmt(arg);
2925 }
2926
2927 static int opt_sync(const char *opt, const char *arg)
2928 {
2929     if (!strcmp(arg, "audio"))
2930         av_sync_type = AV_SYNC_AUDIO_MASTER;
2931     else if (!strcmp(arg, "video"))
2932         av_sync_type = AV_SYNC_VIDEO_MASTER;
2933     else if (!strcmp(arg, "ext"))
2934         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2935     else {
2936         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2937         exit(1);
2938     }
2939     return 0;
2940 }
2941
2942 static int opt_seek(const char *opt, const char *arg)
2943 {
2944     start_time = parse_time_or_die(opt, arg, 1);
2945     return 0;
2946 }
2947
2948 static int opt_duration(const char *opt, const char *arg)
2949 {
2950     duration = parse_time_or_die(opt, arg, 1);
2951     return 0;
2952 }
2953
2954 static int opt_debug(const char *opt, const char *arg)
2955 {
2956     av_log_set_level(99);
2957     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2958     return 0;
2959 }
2960
2961 static int opt_vismv(const char *opt, const char *arg)
2962 {
2963     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2964     return 0;
2965 }
2966
2967 static int opt_thread_count(const char *opt, const char *arg)
2968 {
2969     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2970 #if !HAVE_THREADS
2971     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2972 #endif
2973     return 0;
2974 }
2975
2976 static const OptionDef options[] = {
2977 #include "cmdutils_common_opts.h"
2978     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2979     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2980     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2981     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2982     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2983     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2984     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2985     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2986     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2987     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2988     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2989     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2990     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2991     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2992     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2993     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2994     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2995     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2996     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2997     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2998     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2999     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3000     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3001     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3002     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3003     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3004     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3005     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3006     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3007     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3008     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3009     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3010     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3011     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3012     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3013     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3014     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3015 #if CONFIG_AVFILTER
3016     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3017 #endif
3018     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3019     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3020     { NULL, },
3021 };
3022
3023 static void show_usage(void)
3024 {
3025     printf("Simple media player\n");
3026     printf("usage: ffplay [options] input_file\n");
3027     printf("\n");
3028 }
3029
3030 static void show_help(void)
3031 {
3032     av_log_set_callback(log_callback_help);
3033     show_usage();
3034     show_help_options(options, "Main options:\n",
3035                       OPT_EXPERT, 0);
3036     show_help_options(options, "\nAdvanced options:\n",
3037                       OPT_EXPERT, OPT_EXPERT);
3038     printf("\n");
3039     av_opt_show2(avcodec_opts[0], NULL,
3040                  AV_OPT_FLAG_DECODING_PARAM, 0);
3041     printf("\n");
3042     av_opt_show2(avformat_opts, NULL,
3043                  AV_OPT_FLAG_DECODING_PARAM, 0);
3044 #if !CONFIG_AVFILTER
3045     printf("\n");
3046     av_opt_show2(sws_opts, NULL,
3047                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3048 #endif
3049     printf("\nWhile playing:\n"
3050            "q, ESC              quit\n"
3051            "f                   toggle full screen\n"
3052            "p, SPC              pause\n"
3053            "a                   cycle audio channel\n"
3054            "v                   cycle video channel\n"
3055            "t                   cycle subtitle channel\n"
3056            "w                   show audio waves\n"
3057            "s                   activate frame-step mode\n"
3058            "left/right          seek backward/forward 10 seconds\n"
3059            "down/up             seek backward/forward 1 minute\n"
3060            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3061            );
3062 }
3063
3064 static void opt_input_file(const char *filename)
3065 {
3066     if (input_filename) {
3067         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3068                 filename, input_filename);
3069         exit(1);
3070     }
3071     if (!strcmp(filename, "-"))
3072         filename = "pipe:";
3073     input_filename = filename;
3074 }
3075
3076 /* Called from the main */
3077 int main(int argc, char **argv)
3078 {
3079     int flags;
3080
3081     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3082
3083     /* register all codecs, demux and protocols */
3084     avcodec_register_all();
3085 #if CONFIG_AVDEVICE
3086     avdevice_register_all();
3087 #endif
3088 #if CONFIG_AVFILTER
3089     avfilter_register_all();
3090 #endif
3091     av_register_all();
3092
3093     init_opts();
3094
3095     show_banner();
3096
3097     parse_options(argc, argv, options, opt_input_file);
3098
3099     if (!input_filename) {
3100         show_usage();
3101         fprintf(stderr, "An input file must be specified\n");
3102         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3103         exit(1);
3104     }
3105
3106     if (display_disable) {
3107         video_disable = 1;
3108     }
3109     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3110 #if !defined(__MINGW32__) && !defined(__APPLE__)
3111     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3112 #endif
3113     if (SDL_Init (flags)) {
3114         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3115         exit(1);
3116     }
3117
3118     if (!display_disable) {
3119 #if HAVE_SDL_VIDEO_SIZE
3120         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3121         fs_screen_width = vi->current_w;
3122         fs_screen_height = vi->current_h;
3123 #endif
3124     }
3125
3126     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3127     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3128     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3129
3130     av_init_packet(&flush_pkt);
3131     flush_pkt.data= "FLUSH";
3132
3133     cur_stream = stream_open(input_filename, file_iformat);
3134
3135     event_loop();
3136
3137     /* never returns */
3138
3139     return 0;
3140 }