OSDN Git Service

Add silence support for AV_SAMPLE_FMT_U8.
[coroid/ffmpeg_saccubus.git] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the Libav libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of Libav.
6  *
7  * Libav is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * Libav is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with Libav; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #define _XOPEN_SOURCE 600
23
24 #include "config.h"
25 #include <inttypes.h>
26 #include <math.h>
27 #include <limits.h>
28 #include "libavutil/avstring.h"
29 #include "libavutil/colorspace.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavformat/avformat.h"
35 #include "libavdevice/avdevice.h"
36 #include "libswscale/swscale.h"
37 #include "libavcodec/audioconvert.h"
38 #include "libavcodec/opt.h"
39 #include "libavcodec/avfft.h"
40
41 #if CONFIG_AVFILTER
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 #endif
45
46 #include "cmdutils.h"
47
48 #include <SDL.h>
49 #include <SDL_thread.h>
50
51 #ifdef __MINGW32__
52 #undef main /* We don't want SDL to override our main() */
53 #endif
54
55 #include <unistd.h>
56 #include <assert.h>
57
58 const char program_name[] = "FFplay";
59 const int program_birth_year = 2003;
60
61 //#define DEBUG
62 //#define DEBUG_SYNC
63
64 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66 #define MIN_FRAMES 5
67
68 /* SDL audio buffer size, in samples. Should be small to have precise
69    A/V sync as SDL does not have hardware buffer fullness info. */
70 #define SDL_AUDIO_BUFFER_SIZE 1024
71
72 /* no AV sync correction is done if below the AV sync threshold */
73 #define AV_SYNC_THRESHOLD 0.01
74 /* no AV correction is done if too big error */
75 #define AV_NOSYNC_THRESHOLD 10.0
76
77 #define FRAME_SKIP_FACTOR 0.05
78
79 /* maximum audio speed change to get correct sync */
80 #define SAMPLE_CORRECTION_PERCENT_MAX 10
81
82 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83 #define AUDIO_DIFF_AVG_NB   20
84
85 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86 #define SAMPLE_ARRAY_SIZE (2*65536)
87
88 static int sws_flags = SWS_BICUBIC;
89
90 typedef struct PacketQueue {
91     AVPacketList *first_pkt, *last_pkt;
92     int nb_packets;
93     int size;
94     int abort_request;
95     SDL_mutex *mutex;
96     SDL_cond *cond;
97 } PacketQueue;
98
99 #define VIDEO_PICTURE_QUEUE_SIZE 2
100 #define SUBPICTURE_QUEUE_SIZE 4
101
102 typedef struct VideoPicture {
103     double pts;                                  ///<presentation time stamp for this picture
104     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105     int64_t pos;                                 ///<byte position in file
106     SDL_Overlay *bmp;
107     int width, height; /* source height & width */
108     int allocated;
109     enum PixelFormat pix_fmt;
110
111 #if CONFIG_AVFILTER
112     AVFilterBufferRef *picref;
113 #endif
114 } VideoPicture;
115
116 typedef struct SubPicture {
117     double pts; /* presentation time stamp for this picture */
118     AVSubtitle sub;
119 } SubPicture;
120
121 enum {
122     AV_SYNC_AUDIO_MASTER, /* default choice */
123     AV_SYNC_VIDEO_MASTER,
124     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125 };
126
127 typedef struct VideoState {
128     SDL_Thread *parse_tid;
129     SDL_Thread *video_tid;
130     SDL_Thread *refresh_tid;
131     AVInputFormat *iformat;
132     int no_background;
133     int abort_request;
134     int paused;
135     int last_paused;
136     int seek_req;
137     int seek_flags;
138     int64_t seek_pos;
139     int64_t seek_rel;
140     int read_pause_return;
141     AVFormatContext *ic;
142     int dtg_active_format;
143
144     int audio_stream;
145
146     int av_sync_type;
147     double external_clock; /* external clock base */
148     int64_t external_clock_time;
149
150     double audio_clock;
151     double audio_diff_cum; /* used for AV difference average computation */
152     double audio_diff_avg_coef;
153     double audio_diff_threshold;
154     int audio_diff_avg_count;
155     AVStream *audio_st;
156     PacketQueue audioq;
157     int audio_hw_buf_size;
158     /* samples output by the codec. we reserve more space for avsync
159        compensation */
160     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162     uint8_t *audio_buf;
163     unsigned int audio_buf_size; /* in bytes */
164     int audio_buf_index; /* in bytes */
165     AVPacket audio_pkt_temp;
166     AVPacket audio_pkt;
167     enum AVSampleFormat audio_src_fmt;
168     AVAudioConvert *reformat_ctx;
169
170     int show_audio; /* if true, display audio samples */
171     int16_t sample_array[SAMPLE_ARRAY_SIZE];
172     int sample_array_index;
173     int last_i_start;
174     RDFTContext *rdft;
175     int rdft_bits;
176     FFTSample *rdft_data;
177     int xpos;
178
179     SDL_Thread *subtitle_tid;
180     int subtitle_stream;
181     int subtitle_stream_changed;
182     AVStream *subtitle_st;
183     PacketQueue subtitleq;
184     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185     int subpq_size, subpq_rindex, subpq_windex;
186     SDL_mutex *subpq_mutex;
187     SDL_cond *subpq_cond;
188
189     double frame_timer;
190     double frame_last_pts;
191     double frame_last_delay;
192     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193     int video_stream;
194     AVStream *video_st;
195     PacketQueue videoq;
196     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198     int64_t video_current_pos;                   ///<current displayed file pos
199     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200     int pictq_size, pictq_rindex, pictq_windex;
201     SDL_mutex *pictq_mutex;
202     SDL_cond *pictq_cond;
203 #if !CONFIG_AVFILTER
204     struct SwsContext *img_convert_ctx;
205 #endif
206
207     //    QETimer *video_timer;
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211     PtsCorrectionContext pts_ctx;
212
213 #if CONFIG_AVFILTER
214     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215 #endif
216
217     float skip_frames;
218     float skip_frames_index;
219     int refresh;
220 } VideoState;
221
222 static void show_help(void);
223 static int audio_write_get_buf_size(VideoState *is);
224
225 /* options specified by the user */
226 static AVInputFormat *file_iformat;
227 static const char *input_filename;
228 static const char *window_title;
229 static int fs_screen_width;
230 static int fs_screen_height;
231 static int screen_width = 0;
232 static int screen_height = 0;
233 static int frame_width = 0;
234 static int frame_height = 0;
235 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
236 static int audio_disable;
237 static int video_disable;
238 static int wanted_stream[AVMEDIA_TYPE_NB]={
239     [AVMEDIA_TYPE_AUDIO]=-1,
240     [AVMEDIA_TYPE_VIDEO]=-1,
241     [AVMEDIA_TYPE_SUBTITLE]=-1,
242 };
243 static int seek_by_bytes=-1;
244 static int display_disable;
245 static int show_status = 1;
246 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
247 static int64_t start_time = AV_NOPTS_VALUE;
248 static int64_t duration = AV_NOPTS_VALUE;
249 static int debug = 0;
250 static int debug_mv = 0;
251 static int step = 0;
252 static int thread_count = 1;
253 static int workaround_bugs = 1;
254 static int fast = 0;
255 static int genpts = 0;
256 static int lowres = 0;
257 static int idct = FF_IDCT_AUTO;
258 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
259 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
260 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
261 static int error_recognition = FF_ER_CAREFUL;
262 static int error_concealment = 3;
263 static int decoder_reorder_pts= -1;
264 static int autoexit;
265 static int exit_on_keydown;
266 static int exit_on_mousedown;
267 static int loop=1;
268 static int framedrop=1;
269
270 static int rdftspeed=20;
271 #if CONFIG_AVFILTER
272 static char *vfilters = NULL;
273 #endif
274
275 /* current context */
276 static int is_full_screen;
277 static VideoState *cur_stream;
278 static int64_t audio_callback_time;
279
280 static AVPacket flush_pkt;
281
282 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285
286 static SDL_Surface *screen;
287
288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
289
290 /* packet queue handling */
291 static void packet_queue_init(PacketQueue *q)
292 {
293     memset(q, 0, sizeof(PacketQueue));
294     q->mutex = SDL_CreateMutex();
295     q->cond = SDL_CreateCond();
296     packet_queue_put(q, &flush_pkt);
297 }
298
299 static void packet_queue_flush(PacketQueue *q)
300 {
301     AVPacketList *pkt, *pkt1;
302
303     SDL_LockMutex(q->mutex);
304     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
305         pkt1 = pkt->next;
306         av_free_packet(&pkt->pkt);
307         av_freep(&pkt);
308     }
309     q->last_pkt = NULL;
310     q->first_pkt = NULL;
311     q->nb_packets = 0;
312     q->size = 0;
313     SDL_UnlockMutex(q->mutex);
314 }
315
316 static void packet_queue_end(PacketQueue *q)
317 {
318     packet_queue_flush(q);
319     SDL_DestroyMutex(q->mutex);
320     SDL_DestroyCond(q->cond);
321 }
322
323 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
324 {
325     AVPacketList *pkt1;
326
327     /* duplicate the packet */
328     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
329         return -1;
330
331     pkt1 = av_malloc(sizeof(AVPacketList));
332     if (!pkt1)
333         return -1;
334     pkt1->pkt = *pkt;
335     pkt1->next = NULL;
336
337
338     SDL_LockMutex(q->mutex);
339
340     if (!q->last_pkt)
341
342         q->first_pkt = pkt1;
343     else
344         q->last_pkt->next = pkt1;
345     q->last_pkt = pkt1;
346     q->nb_packets++;
347     q->size += pkt1->pkt.size + sizeof(*pkt1);
348     /* XXX: should duplicate packet data in DV case */
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352     return 0;
353 }
354
355 static void packet_queue_abort(PacketQueue *q)
356 {
357     SDL_LockMutex(q->mutex);
358
359     q->abort_request = 1;
360
361     SDL_CondSignal(q->cond);
362
363     SDL_UnlockMutex(q->mutex);
364 }
365
366 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
367 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
368 {
369     AVPacketList *pkt1;
370     int ret;
371
372     SDL_LockMutex(q->mutex);
373
374     for(;;) {
375         if (q->abort_request) {
376             ret = -1;
377             break;
378         }
379
380         pkt1 = q->first_pkt;
381         if (pkt1) {
382             q->first_pkt = pkt1->next;
383             if (!q->first_pkt)
384                 q->last_pkt = NULL;
385             q->nb_packets--;
386             q->size -= pkt1->pkt.size + sizeof(*pkt1);
387             *pkt = pkt1->pkt;
388             av_free(pkt1);
389             ret = 1;
390             break;
391         } else if (!block) {
392             ret = 0;
393             break;
394         } else {
395             SDL_CondWait(q->cond, q->mutex);
396         }
397     }
398     SDL_UnlockMutex(q->mutex);
399     return ret;
400 }
401
402 static inline void fill_rectangle(SDL_Surface *screen,
403                                   int x, int y, int w, int h, int color)
404 {
405     SDL_Rect rect;
406     rect.x = x;
407     rect.y = y;
408     rect.w = w;
409     rect.h = h;
410     SDL_FillRect(screen, &rect, color);
411 }
412
413 #if 0
414 /* draw only the border of a rectangle */
415 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
416 {
417     int w1, w2, h1, h2;
418
419     /* fill the background */
420     w1 = x;
421     if (w1 < 0)
422         w1 = 0;
423     w2 = s->width - (x + w);
424     if (w2 < 0)
425         w2 = 0;
426     h1 = y;
427     if (h1 < 0)
428         h1 = 0;
429     h2 = s->height - (y + h);
430     if (h2 < 0)
431         h2 = 0;
432     fill_rectangle(screen,
433                    s->xleft, s->ytop,
434                    w1, s->height,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + s->width - w2, s->ytop,
438                    w2, s->height,
439                    color);
440     fill_rectangle(screen,
441                    s->xleft + w1, s->ytop,
442                    s->width - w1 - w2, h1,
443                    color);
444     fill_rectangle(screen,
445                    s->xleft + w1, s->ytop + s->height - h2,
446                    s->width - w1 - w2, h2,
447                    color);
448 }
449 #endif
450
451 #define ALPHA_BLEND(a, oldp, newp, s)\
452 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
453
454 #define RGBA_IN(r, g, b, a, s)\
455 {\
456     unsigned int v = ((const uint32_t *)(s))[0];\
457     a = (v >> 24) & 0xff;\
458     r = (v >> 16) & 0xff;\
459     g = (v >> 8) & 0xff;\
460     b = v & 0xff;\
461 }
462
463 #define YUVA_IN(y, u, v, a, s, pal)\
464 {\
465     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
466     a = (val >> 24) & 0xff;\
467     y = (val >> 16) & 0xff;\
468     u = (val >> 8) & 0xff;\
469     v = val & 0xff;\
470 }
471
472 #define YUVA_OUT(d, y, u, v, a)\
473 {\
474     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
475 }
476
477
478 #define BPP 1
479
480 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
481 {
482     int wrap, wrap3, width2, skip2;
483     int y, u, v, a, u1, v1, a1, w, h;
484     uint8_t *lum, *cb, *cr;
485     const uint8_t *p;
486     const uint32_t *pal;
487     int dstx, dsty, dstw, dsth;
488
489     dstw = av_clip(rect->w, 0, imgw);
490     dsth = av_clip(rect->h, 0, imgh);
491     dstx = av_clip(rect->x, 0, imgw - dstw);
492     dsty = av_clip(rect->y, 0, imgh - dsth);
493     lum = dst->data[0] + dsty * dst->linesize[0];
494     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
495     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
496
497     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
498     skip2 = dstx >> 1;
499     wrap = dst->linesize[0];
500     wrap3 = rect->pict.linesize[0];
501     p = rect->pict.data[0];
502     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
503
504     if (dsty & 1) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
513             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
514             cb++;
515             cr++;
516             lum++;
517             p += BPP;
518         }
519         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520             YUVA_IN(y, u, v, a, p, pal);
521             u1 = u;
522             v1 = v;
523             a1 = a;
524             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
526             YUVA_IN(y, u, v, a, p + BPP, pal);
527             u1 += u;
528             v1 += v;
529             a1 += a;
530             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
532             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
533             cb++;
534             cr++;
535             p += 2 * BPP;
536             lum += 2;
537         }
538         if (w) {
539             YUVA_IN(y, u, v, a, p, pal);
540             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
542             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
543             p++;
544             lum++;
545         }
546         p += wrap3 - dstw * BPP;
547         lum += wrap - dstw - dstx;
548         cb += dst->linesize[1] - width2 - skip2;
549         cr += dst->linesize[2] - width2 - skip2;
550     }
551     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
552         lum += dstx;
553         cb += skip2;
554         cr += skip2;
555
556         if (dstx & 1) {
557             YUVA_IN(y, u, v, a, p, pal);
558             u1 = u;
559             v1 = v;
560             a1 = a;
561             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562             p += wrap3;
563             lum += wrap;
564             YUVA_IN(y, u, v, a, p, pal);
565             u1 += u;
566             v1 += v;
567             a1 += a;
568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
570             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
571             cb++;
572             cr++;
573             p += -wrap3 + BPP;
574             lum += -wrap + 1;
575         }
576         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
577             YUVA_IN(y, u, v, a, p, pal);
578             u1 = u;
579             v1 = v;
580             a1 = a;
581             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582
583             YUVA_IN(y, u, v, a, p + BPP, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
588             p += wrap3;
589             lum += wrap;
590
591             YUVA_IN(y, u, v, a, p, pal);
592             u1 += u;
593             v1 += v;
594             a1 += a;
595             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
597             YUVA_IN(y, u, v, a, p + BPP, pal);
598             u1 += u;
599             v1 += v;
600             a1 += a;
601             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602
603             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
604             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
605
606             cb++;
607             cr++;
608             p += -wrap3 + 2 * BPP;
609             lum += -wrap + 2;
610         }
611         if (w) {
612             YUVA_IN(y, u, v, a, p, pal);
613             u1 = u;
614             v1 = v;
615             a1 = a;
616             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617             p += wrap3;
618             lum += wrap;
619             YUVA_IN(y, u, v, a, p, pal);
620             u1 += u;
621             v1 += v;
622             a1 += a;
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
625             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
626             cb++;
627             cr++;
628             p += -wrap3 + BPP;
629             lum += -wrap + 1;
630         }
631         p += wrap3 + (wrap3 - dstw * BPP);
632         lum += wrap + (wrap - dstw - dstx);
633         cb += dst->linesize[1] - width2 - skip2;
634         cr += dst->linesize[2] - width2 - skip2;
635     }
636     /* handle odd height */
637     if (h) {
638         lum += dstx;
639         cb += skip2;
640         cr += skip2;
641
642         if (dstx & 1) {
643             YUVA_IN(y, u, v, a, p, pal);
644             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
645             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
646             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
647             cb++;
648             cr++;
649             lum++;
650             p += BPP;
651         }
652         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
653             YUVA_IN(y, u, v, a, p, pal);
654             u1 = u;
655             v1 = v;
656             a1 = a;
657             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
659             YUVA_IN(y, u, v, a, p + BPP, pal);
660             u1 += u;
661             v1 += v;
662             a1 += a;
663             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
664             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
665             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
666             cb++;
667             cr++;
668             p += 2 * BPP;
669             lum += 2;
670         }
671         if (w) {
672             YUVA_IN(y, u, v, a, p, pal);
673             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
675             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
676         }
677     }
678 }
679
680 static void free_subpicture(SubPicture *sp)
681 {
682     avsubtitle_free(&sp->sub);
683 }
684
685 static void video_image_display(VideoState *is)
686 {
687     VideoPicture *vp;
688     SubPicture *sp;
689     AVPicture pict;
690     float aspect_ratio;
691     int width, height, x, y;
692     SDL_Rect rect;
693     int i;
694
695     vp = &is->pictq[is->pictq_rindex];
696     if (vp->bmp) {
697 #if CONFIG_AVFILTER
698          if (vp->picref->video->pixel_aspect.num == 0)
699              aspect_ratio = 0;
700          else
701              aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
702 #else
703
704         /* XXX: use variable in the frame */
705         if (is->video_st->sample_aspect_ratio.num)
706             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
707         else if (is->video_st->codec->sample_aspect_ratio.num)
708             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
709         else
710             aspect_ratio = 0;
711 #endif
712         if (aspect_ratio <= 0.0)
713             aspect_ratio = 1.0;
714         aspect_ratio *= (float)vp->width / (float)vp->height;
715
716         if (is->subtitle_st)
717         {
718             if (is->subpq_size > 0)
719             {
720                 sp = &is->subpq[is->subpq_rindex];
721
722                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
723                 {
724                     SDL_LockYUVOverlay (vp->bmp);
725
726                     pict.data[0] = vp->bmp->pixels[0];
727                     pict.data[1] = vp->bmp->pixels[2];
728                     pict.data[2] = vp->bmp->pixels[1];
729
730                     pict.linesize[0] = vp->bmp->pitches[0];
731                     pict.linesize[1] = vp->bmp->pitches[2];
732                     pict.linesize[2] = vp->bmp->pitches[1];
733
734                     for (i = 0; i < sp->sub.num_rects; i++)
735                         blend_subrect(&pict, sp->sub.rects[i],
736                                       vp->bmp->w, vp->bmp->h);
737
738                     SDL_UnlockYUVOverlay (vp->bmp);
739                 }
740             }
741         }
742
743
744         /* XXX: we suppose the screen has a 1.0 pixel ratio */
745         height = is->height;
746         width = ((int)rint(height * aspect_ratio)) & ~1;
747         if (width > is->width) {
748             width = is->width;
749             height = ((int)rint(width / aspect_ratio)) & ~1;
750         }
751         x = (is->width - width) / 2;
752         y = (is->height - height) / 2;
753         if (!is->no_background) {
754             /* fill the background */
755             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
756         } else {
757             is->no_background = 0;
758         }
759         rect.x = is->xleft + x;
760         rect.y = is->ytop  + y;
761         rect.w = width;
762         rect.h = height;
763         SDL_DisplayYUVOverlay(vp->bmp, &rect);
764     } else {
765 #if 0
766         fill_rectangle(screen,
767                        is->xleft, is->ytop, is->width, is->height,
768                        QERGB(0x00, 0x00, 0x00));
769 #endif
770     }
771 }
772
773 static inline int compute_mod(int a, int b)
774 {
775     a = a % b;
776     if (a >= 0)
777         return a;
778     else
779         return a + b;
780 }
781
782 static void video_audio_display(VideoState *s)
783 {
784     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
785     int ch, channels, h, h2, bgcolor, fgcolor;
786     int16_t time_diff;
787     int rdft_bits, nb_freq;
788
789     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
790         ;
791     nb_freq= 1<<(rdft_bits-1);
792
793     /* compute display index : center on currently output samples */
794     channels = s->audio_st->codec->channels;
795     nb_display_channels = channels;
796     if (!s->paused) {
797         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
798         n = 2 * channels;
799         delay = audio_write_get_buf_size(s);
800         delay /= n;
801
802         /* to be more precise, we take into account the time spent since
803            the last buffer computation */
804         if (audio_callback_time) {
805             time_diff = av_gettime() - audio_callback_time;
806             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
807         }
808
809         delay += 2*data_used;
810         if (delay < data_used)
811             delay = data_used;
812
813         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
814         if(s->show_audio==1){
815             h= INT_MIN;
816             for(i=0; i<1000; i+=channels){
817                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
818                 int a= s->sample_array[idx];
819                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
820                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
821                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
822                 int score= a-d;
823                 if(h<score && (b^c)<0){
824                     h= score;
825                     i_start= idx;
826                 }
827             }
828         }
829
830         s->last_i_start = i_start;
831     } else {
832         i_start = s->last_i_start;
833     }
834
835     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
836     if(s->show_audio==1){
837         fill_rectangle(screen,
838                        s->xleft, s->ytop, s->width, s->height,
839                        bgcolor);
840
841         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
842
843         /* total height for one channel */
844         h = s->height / nb_display_channels;
845         /* graph height / 2 */
846         h2 = (h * 9) / 20;
847         for(ch = 0;ch < nb_display_channels; ch++) {
848             i = i_start + ch;
849             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
850             for(x = 0; x < s->width; x++) {
851                 y = (s->sample_array[i] * h2) >> 15;
852                 if (y < 0) {
853                     y = -y;
854                     ys = y1 - y;
855                 } else {
856                     ys = y1;
857                 }
858                 fill_rectangle(screen,
859                                s->xleft + x, ys, 1, y,
860                                fgcolor);
861                 i += channels;
862                 if (i >= SAMPLE_ARRAY_SIZE)
863                     i -= SAMPLE_ARRAY_SIZE;
864             }
865         }
866
867         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
868
869         for(ch = 1;ch < nb_display_channels; ch++) {
870             y = s->ytop + ch * h;
871             fill_rectangle(screen,
872                            s->xleft, y, s->width, 1,
873                            fgcolor);
874         }
875         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
876     }else{
877         nb_display_channels= FFMIN(nb_display_channels, 2);
878         if(rdft_bits != s->rdft_bits){
879             av_rdft_end(s->rdft);
880             av_free(s->rdft_data);
881             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
882             s->rdft_bits= rdft_bits;
883             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
884         }
885         {
886             FFTSample *data[2];
887             for(ch = 0;ch < nb_display_channels; ch++) {
888                 data[ch] = s->rdft_data + 2*nb_freq*ch;
889                 i = i_start + ch;
890                 for(x = 0; x < 2*nb_freq; x++) {
891                     double w= (x-nb_freq)*(1.0/nb_freq);
892                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
893                     i += channels;
894                     if (i >= SAMPLE_ARRAY_SIZE)
895                         i -= SAMPLE_ARRAY_SIZE;
896                 }
897                 av_rdft_calc(s->rdft, data[ch]);
898             }
899             //least efficient way to do this, we should of course directly access it but its more than fast enough
900             for(y=0; y<s->height; y++){
901                 double w= 1/sqrt(nb_freq);
902                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
903                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
904                        + data[1][2*y+1]*data[1][2*y+1])) : a;
905                 a= FFMIN(a,255);
906                 b= FFMIN(b,255);
907                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
908
909                 fill_rectangle(screen,
910                             s->xpos, s->height-y, 1, 1,
911                             fgcolor);
912             }
913         }
914         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
915         s->xpos++;
916         if(s->xpos >= s->width)
917             s->xpos= s->xleft;
918     }
919 }
920
921 static int video_open(VideoState *is){
922     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
923     int w,h;
924
925     if(is_full_screen) flags |= SDL_FULLSCREEN;
926     else               flags |= SDL_RESIZABLE;
927
928     if (is_full_screen && fs_screen_width) {
929         w = fs_screen_width;
930         h = fs_screen_height;
931     } else if(!is_full_screen && screen_width){
932         w = screen_width;
933         h = screen_height;
934 #if CONFIG_AVFILTER
935     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
936         w = is->out_video_filter->inputs[0]->w;
937         h = is->out_video_filter->inputs[0]->h;
938 #else
939     }else if (is->video_st && is->video_st->codec->width){
940         w = is->video_st->codec->width;
941         h = is->video_st->codec->height;
942 #endif
943     } else {
944         w = 640;
945         h = 480;
946     }
947     if(screen && is->width == screen->w && screen->w == w
948        && is->height== screen->h && screen->h == h)
949         return 0;
950
951 #ifndef __APPLE__
952     screen = SDL_SetVideoMode(w, h, 0, flags);
953 #else
954     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
955     screen = SDL_SetVideoMode(w, h, 24, flags);
956 #endif
957     if (!screen) {
958         fprintf(stderr, "SDL: could not set video mode - exiting\n");
959         return -1;
960     }
961     if (!window_title)
962         window_title = input_filename;
963     SDL_WM_SetCaption(window_title, window_title);
964
965     is->width = screen->w;
966     is->height = screen->h;
967
968     return 0;
969 }
970
971 /* display the current picture, if any */
972 static void video_display(VideoState *is)
973 {
974     if(!screen)
975         video_open(cur_stream);
976     if (is->audio_st && is->show_audio)
977         video_audio_display(is);
978     else if (is->video_st)
979         video_image_display(is);
980 }
981
982 static int refresh_thread(void *opaque)
983 {
984     VideoState *is= opaque;
985     while(!is->abort_request){
986         SDL_Event event;
987         event.type = FF_REFRESH_EVENT;
988         event.user.data1 = opaque;
989         if(!is->refresh){
990             is->refresh=1;
991             SDL_PushEvent(&event);
992         }
993         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
994     }
995     return 0;
996 }
997
998 /* get the current audio clock value */
999 static double get_audio_clock(VideoState *is)
1000 {
1001     double pts;
1002     int hw_buf_size, bytes_per_sec;
1003     pts = is->audio_clock;
1004     hw_buf_size = audio_write_get_buf_size(is);
1005     bytes_per_sec = 0;
1006     if (is->audio_st) {
1007         bytes_per_sec = is->audio_st->codec->sample_rate *
1008             2 * is->audio_st->codec->channels;
1009     }
1010     if (bytes_per_sec)
1011         pts -= (double)hw_buf_size / bytes_per_sec;
1012     return pts;
1013 }
1014
1015 /* get the current video clock value */
1016 static double get_video_clock(VideoState *is)
1017 {
1018     if (is->paused) {
1019         return is->video_current_pts;
1020     } else {
1021         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1022     }
1023 }
1024
1025 /* get the current external clock value */
1026 static double get_external_clock(VideoState *is)
1027 {
1028     int64_t ti;
1029     ti = av_gettime();
1030     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1031 }
1032
1033 /* get the current master clock value */
1034 static double get_master_clock(VideoState *is)
1035 {
1036     double val;
1037
1038     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1039         if (is->video_st)
1040             val = get_video_clock(is);
1041         else
1042             val = get_audio_clock(is);
1043     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1044         if (is->audio_st)
1045             val = get_audio_clock(is);
1046         else
1047             val = get_video_clock(is);
1048     } else {
1049         val = get_external_clock(is);
1050     }
1051     return val;
1052 }
1053
1054 /* seek in the stream */
1055 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1056 {
1057     if (!is->seek_req) {
1058         is->seek_pos = pos;
1059         is->seek_rel = rel;
1060         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1061         if (seek_by_bytes)
1062             is->seek_flags |= AVSEEK_FLAG_BYTE;
1063         is->seek_req = 1;
1064     }
1065 }
1066
1067 /* pause or resume the video */
1068 static void stream_pause(VideoState *is)
1069 {
1070     if (is->paused) {
1071         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1072         if(is->read_pause_return != AVERROR(ENOSYS)){
1073             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1074         }
1075         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1076     }
1077     is->paused = !is->paused;
1078 }
1079
1080 static double compute_target_time(double frame_current_pts, VideoState *is)
1081 {
1082     double delay, sync_threshold, diff;
1083
1084     /* compute nominal delay */
1085     delay = frame_current_pts - is->frame_last_pts;
1086     if (delay <= 0 || delay >= 10.0) {
1087         /* if incorrect delay, use previous one */
1088         delay = is->frame_last_delay;
1089     } else {
1090         is->frame_last_delay = delay;
1091     }
1092     is->frame_last_pts = frame_current_pts;
1093
1094     /* update delay to follow master synchronisation source */
1095     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1096          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1097         /* if video is slave, we try to correct big delays by
1098            duplicating or deleting a frame */
1099         diff = get_video_clock(is) - get_master_clock(is);
1100
1101         /* skip or repeat frame. We take into account the
1102            delay to compute the threshold. I still don't know
1103            if it is the best guess */
1104         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1105         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1106             if (diff <= -sync_threshold)
1107                 delay = 0;
1108             else if (diff >= sync_threshold)
1109                 delay = 2 * delay;
1110         }
1111     }
1112     is->frame_timer += delay;
1113 #if defined(DEBUG_SYNC)
1114     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1115             delay, actual_delay, frame_current_pts, -diff);
1116 #endif
1117
1118     return is->frame_timer;
1119 }
1120
1121 /* called to display each frame */
1122 static void video_refresh_timer(void *opaque)
1123 {
1124     VideoState *is = opaque;
1125     VideoPicture *vp;
1126
1127     SubPicture *sp, *sp2;
1128
1129     if (is->video_st) {
1130 retry:
1131         if (is->pictq_size == 0) {
1132             //nothing to do, no picture to display in the que
1133         } else {
1134             double time= av_gettime()/1000000.0;
1135             double next_target;
1136             /* dequeue the picture */
1137             vp = &is->pictq[is->pictq_rindex];
1138
1139             if(time < vp->target_clock)
1140                 return;
1141             /* update current video pts */
1142             is->video_current_pts = vp->pts;
1143             is->video_current_pts_drift = is->video_current_pts - time;
1144             is->video_current_pos = vp->pos;
1145             if(is->pictq_size > 1){
1146                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1147                 assert(nextvp->target_clock >= vp->target_clock);
1148                 next_target= nextvp->target_clock;
1149             }else{
1150                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1151             }
1152             if(framedrop && time > next_target){
1153                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1154                 if(is->pictq_size > 1 || time > next_target + 0.5){
1155                     /* update queue size and signal for next picture */
1156                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1157                         is->pictq_rindex = 0;
1158
1159                     SDL_LockMutex(is->pictq_mutex);
1160                     is->pictq_size--;
1161                     SDL_CondSignal(is->pictq_cond);
1162                     SDL_UnlockMutex(is->pictq_mutex);
1163                     goto retry;
1164                 }
1165             }
1166
1167             if(is->subtitle_st) {
1168                 if (is->subtitle_stream_changed) {
1169                     SDL_LockMutex(is->subpq_mutex);
1170
1171                     while (is->subpq_size) {
1172                         free_subpicture(&is->subpq[is->subpq_rindex]);
1173
1174                         /* update queue size and signal for next picture */
1175                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1176                             is->subpq_rindex = 0;
1177
1178                         is->subpq_size--;
1179                     }
1180                     is->subtitle_stream_changed = 0;
1181
1182                     SDL_CondSignal(is->subpq_cond);
1183                     SDL_UnlockMutex(is->subpq_mutex);
1184                 } else {
1185                     if (is->subpq_size > 0) {
1186                         sp = &is->subpq[is->subpq_rindex];
1187
1188                         if (is->subpq_size > 1)
1189                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1190                         else
1191                             sp2 = NULL;
1192
1193                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1194                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1195                         {
1196                             free_subpicture(sp);
1197
1198                             /* update queue size and signal for next picture */
1199                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1200                                 is->subpq_rindex = 0;
1201
1202                             SDL_LockMutex(is->subpq_mutex);
1203                             is->subpq_size--;
1204                             SDL_CondSignal(is->subpq_cond);
1205                             SDL_UnlockMutex(is->subpq_mutex);
1206                         }
1207                     }
1208                 }
1209             }
1210
1211             /* display picture */
1212             if (!display_disable)
1213                 video_display(is);
1214
1215             /* update queue size and signal for next picture */
1216             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1217                 is->pictq_rindex = 0;
1218
1219             SDL_LockMutex(is->pictq_mutex);
1220             is->pictq_size--;
1221             SDL_CondSignal(is->pictq_cond);
1222             SDL_UnlockMutex(is->pictq_mutex);
1223         }
1224     } else if (is->audio_st) {
1225         /* draw the next audio frame */
1226
1227         /* if only audio stream, then display the audio bars (better
1228            than nothing, just to test the implementation */
1229
1230         /* display picture */
1231         if (!display_disable)
1232             video_display(is);
1233     }
1234     if (show_status) {
1235         static int64_t last_time;
1236         int64_t cur_time;
1237         int aqsize, vqsize, sqsize;
1238         double av_diff;
1239
1240         cur_time = av_gettime();
1241         if (!last_time || (cur_time - last_time) >= 30000) {
1242             aqsize = 0;
1243             vqsize = 0;
1244             sqsize = 0;
1245             if (is->audio_st)
1246                 aqsize = is->audioq.size;
1247             if (is->video_st)
1248                 vqsize = is->videoq.size;
1249             if (is->subtitle_st)
1250                 sqsize = is->subtitleq.size;
1251             av_diff = 0;
1252             if (is->audio_st && is->video_st)
1253                 av_diff = get_audio_clock(is) - get_video_clock(is);
1254             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1255                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1256             fflush(stdout);
1257             last_time = cur_time;
1258         }
1259     }
1260 }
1261
1262 static void stream_close(VideoState *is)
1263 {
1264     VideoPicture *vp;
1265     int i;
1266     /* XXX: use a special url_shutdown call to abort parse cleanly */
1267     is->abort_request = 1;
1268     SDL_WaitThread(is->parse_tid, NULL);
1269     SDL_WaitThread(is->refresh_tid, NULL);
1270
1271     /* free all pictures */
1272     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1273         vp = &is->pictq[i];
1274 #if CONFIG_AVFILTER
1275         if (vp->picref) {
1276             avfilter_unref_buffer(vp->picref);
1277             vp->picref = NULL;
1278         }
1279 #endif
1280         if (vp->bmp) {
1281             SDL_FreeYUVOverlay(vp->bmp);
1282             vp->bmp = NULL;
1283         }
1284     }
1285     SDL_DestroyMutex(is->pictq_mutex);
1286     SDL_DestroyCond(is->pictq_cond);
1287     SDL_DestroyMutex(is->subpq_mutex);
1288     SDL_DestroyCond(is->subpq_cond);
1289 #if !CONFIG_AVFILTER
1290     if (is->img_convert_ctx)
1291         sws_freeContext(is->img_convert_ctx);
1292 #endif
1293     av_free(is);
1294 }
1295
1296 static void do_exit(void)
1297 {
1298     if (cur_stream) {
1299         stream_close(cur_stream);
1300         cur_stream = NULL;
1301     }
1302     uninit_opts();
1303 #if CONFIG_AVFILTER
1304     avfilter_uninit();
1305 #endif
1306     if (show_status)
1307         printf("\n");
1308     SDL_Quit();
1309     av_log(NULL, AV_LOG_QUIET, "");
1310     exit(0);
1311 }
1312
1313 /* allocate a picture (needs to do that in main thread to avoid
1314    potential locking problems */
1315 static void alloc_picture(void *opaque)
1316 {
1317     VideoState *is = opaque;
1318     VideoPicture *vp;
1319
1320     vp = &is->pictq[is->pictq_windex];
1321
1322     if (vp->bmp)
1323         SDL_FreeYUVOverlay(vp->bmp);
1324
1325 #if CONFIG_AVFILTER
1326     if (vp->picref)
1327         avfilter_unref_buffer(vp->picref);
1328     vp->picref = NULL;
1329
1330     vp->width   = is->out_video_filter->inputs[0]->w;
1331     vp->height  = is->out_video_filter->inputs[0]->h;
1332     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1333 #else
1334     vp->width   = is->video_st->codec->width;
1335     vp->height  = is->video_st->codec->height;
1336     vp->pix_fmt = is->video_st->codec->pix_fmt;
1337 #endif
1338
1339     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1340                                    SDL_YV12_OVERLAY,
1341                                    screen);
1342     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1343         /* SDL allocates a buffer smaller than requested if the video
1344          * overlay hardware is unable to support the requested size. */
1345         fprintf(stderr, "Error: the video system does not support an image\n"
1346                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1347                         "to reduce the image size.\n", vp->width, vp->height );
1348         do_exit();
1349     }
1350
1351     SDL_LockMutex(is->pictq_mutex);
1352     vp->allocated = 1;
1353     SDL_CondSignal(is->pictq_cond);
1354     SDL_UnlockMutex(is->pictq_mutex);
1355 }
1356
1357 /**
1358  *
1359  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1360  */
1361 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1362 {
1363     VideoPicture *vp;
1364     int dst_pix_fmt;
1365 #if CONFIG_AVFILTER
1366     AVPicture pict_src;
1367 #endif
1368     /* wait until we have space to put a new picture */
1369     SDL_LockMutex(is->pictq_mutex);
1370
1371     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1372         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1373
1374     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1375            !is->videoq.abort_request) {
1376         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1377     }
1378     SDL_UnlockMutex(is->pictq_mutex);
1379
1380     if (is->videoq.abort_request)
1381         return -1;
1382
1383     vp = &is->pictq[is->pictq_windex];
1384
1385     /* alloc or resize hardware picture buffer */
1386     if (!vp->bmp ||
1387 #if CONFIG_AVFILTER
1388         vp->width  != is->out_video_filter->inputs[0]->w ||
1389         vp->height != is->out_video_filter->inputs[0]->h) {
1390 #else
1391         vp->width != is->video_st->codec->width ||
1392         vp->height != is->video_st->codec->height) {
1393 #endif
1394         SDL_Event event;
1395
1396         vp->allocated = 0;
1397
1398         /* the allocation must be done in the main thread to avoid
1399            locking problems */
1400         event.type = FF_ALLOC_EVENT;
1401         event.user.data1 = is;
1402         SDL_PushEvent(&event);
1403
1404         /* wait until the picture is allocated */
1405         SDL_LockMutex(is->pictq_mutex);
1406         while (!vp->allocated && !is->videoq.abort_request) {
1407             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408         }
1409         SDL_UnlockMutex(is->pictq_mutex);
1410
1411         if (is->videoq.abort_request)
1412             return -1;
1413     }
1414
1415     /* if the frame is not skipped, then display it */
1416     if (vp->bmp) {
1417         AVPicture pict;
1418 #if CONFIG_AVFILTER
1419         if(vp->picref)
1420             avfilter_unref_buffer(vp->picref);
1421         vp->picref = src_frame->opaque;
1422 #endif
1423
1424         /* get a pointer on the bitmap */
1425         SDL_LockYUVOverlay (vp->bmp);
1426
1427         dst_pix_fmt = PIX_FMT_YUV420P;
1428         memset(&pict,0,sizeof(AVPicture));
1429         pict.data[0] = vp->bmp->pixels[0];
1430         pict.data[1] = vp->bmp->pixels[2];
1431         pict.data[2] = vp->bmp->pixels[1];
1432
1433         pict.linesize[0] = vp->bmp->pitches[0];
1434         pict.linesize[1] = vp->bmp->pitches[2];
1435         pict.linesize[2] = vp->bmp->pitches[1];
1436
1437 #if CONFIG_AVFILTER
1438         pict_src.data[0] = src_frame->data[0];
1439         pict_src.data[1] = src_frame->data[1];
1440         pict_src.data[2] = src_frame->data[2];
1441
1442         pict_src.linesize[0] = src_frame->linesize[0];
1443         pict_src.linesize[1] = src_frame->linesize[1];
1444         pict_src.linesize[2] = src_frame->linesize[2];
1445
1446         //FIXME use direct rendering
1447         av_picture_copy(&pict, &pict_src,
1448                         vp->pix_fmt, vp->width, vp->height);
1449 #else
1450         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1451         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1452             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1453             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1454         if (is->img_convert_ctx == NULL) {
1455             fprintf(stderr, "Cannot initialize the conversion context\n");
1456             exit(1);
1457         }
1458         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1459                   0, vp->height, pict.data, pict.linesize);
1460 #endif
1461         /* update the bitmap content */
1462         SDL_UnlockYUVOverlay(vp->bmp);
1463
1464         vp->pts = pts;
1465         vp->pos = pos;
1466
1467         /* now we can update the picture count */
1468         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1469             is->pictq_windex = 0;
1470         SDL_LockMutex(is->pictq_mutex);
1471         vp->target_clock= compute_target_time(vp->pts, is);
1472
1473         is->pictq_size++;
1474         SDL_UnlockMutex(is->pictq_mutex);
1475     }
1476     return 0;
1477 }
1478
1479 /**
1480  * compute the exact PTS for the picture if it is omitted in the stream
1481  * @param pts1 the dts of the pkt / pts of the frame
1482  */
1483 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1484 {
1485     double frame_delay, pts;
1486
1487     pts = pts1;
1488
1489     if (pts != 0) {
1490         /* update video clock with pts, if present */
1491         is->video_clock = pts;
1492     } else {
1493         pts = is->video_clock;
1494     }
1495     /* update video clock for next frame */
1496     frame_delay = av_q2d(is->video_st->codec->time_base);
1497     /* for MPEG2, the frame can be repeated, so we update the
1498        clock accordingly */
1499     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1500     is->video_clock += frame_delay;
1501
1502 #if defined(DEBUG_SYNC) && 0
1503     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1504            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1505 #endif
1506     return queue_picture(is, src_frame, pts, pos);
1507 }
1508
1509 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1510 {
1511     int len1, got_picture, i;
1512
1513     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1514         return -1;
1515
1516     if (pkt->data == flush_pkt.data) {
1517         avcodec_flush_buffers(is->video_st->codec);
1518
1519         SDL_LockMutex(is->pictq_mutex);
1520         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1521         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1522             is->pictq[i].target_clock= 0;
1523         }
1524         while (is->pictq_size && !is->videoq.abort_request) {
1525             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1526         }
1527         is->video_current_pos = -1;
1528         SDL_UnlockMutex(is->pictq_mutex);
1529
1530         init_pts_correction(&is->pts_ctx);
1531         is->frame_last_pts = AV_NOPTS_VALUE;
1532         is->frame_last_delay = 0;
1533         is->frame_timer = (double)av_gettime() / 1000000.0;
1534         is->skip_frames = 1;
1535         is->skip_frames_index = 0;
1536         return 0;
1537     }
1538
1539     len1 = avcodec_decode_video2(is->video_st->codec,
1540                                  frame, &got_picture,
1541                                  pkt);
1542
1543     if (got_picture) {
1544         if (decoder_reorder_pts == -1) {
1545             *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
1546         } else if (decoder_reorder_pts) {
1547             *pts = frame->pkt_pts;
1548         } else {
1549             *pts = frame->pkt_dts;
1550         }
1551
1552         if (*pts == AV_NOPTS_VALUE) {
1553             *pts = 0;
1554         }
1555
1556         is->skip_frames_index += 1;
1557         if(is->skip_frames_index >= is->skip_frames){
1558             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1559             return 1;
1560         }
1561
1562     }
1563     return 0;
1564 }
1565
1566 #if CONFIG_AVFILTER
1567 typedef struct {
1568     VideoState *is;
1569     AVFrame *frame;
1570     int use_dr1;
1571 } FilterPriv;
1572
1573 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1574 {
1575     AVFilterContext *ctx = codec->opaque;
1576     AVFilterBufferRef  *ref;
1577     int perms = AV_PERM_WRITE;
1578     int i, w, h, stride[4];
1579     unsigned edge;
1580
1581     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1582         perms |= AV_PERM_NEG_LINESIZES;
1583
1584     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1585         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1586         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1587         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1588     }
1589     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1590
1591     w = codec->width;
1592     h = codec->height;
1593     avcodec_align_dimensions2(codec, &w, &h, stride);
1594     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1595     w += edge << 1;
1596     h += edge << 1;
1597
1598     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1599         return -1;
1600
1601     ref->video->w = codec->width;
1602     ref->video->h = codec->height;
1603     for(i = 0; i < 4; i ++) {
1604         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1605         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1606
1607         if (ref->data[i]) {
1608             ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1609         }
1610         pic->data[i]     = ref->data[i];
1611         pic->linesize[i] = ref->linesize[i];
1612     }
1613     pic->opaque = ref;
1614     pic->age    = INT_MAX;
1615     pic->type   = FF_BUFFER_TYPE_USER;
1616     pic->reordered_opaque = codec->reordered_opaque;
1617     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1618     else           pic->pkt_pts = AV_NOPTS_VALUE;
1619     return 0;
1620 }
1621
1622 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1623 {
1624     memset(pic->data, 0, sizeof(pic->data));
1625     avfilter_unref_buffer(pic->opaque);
1626 }
1627
1628 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1629 {
1630     AVFilterBufferRef *ref = pic->opaque;
1631
1632     if (pic->data[0] == NULL) {
1633         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1634         return codec->get_buffer(codec, pic);
1635     }
1636
1637     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1638         (codec->pix_fmt != ref->format)) {
1639         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1640         return -1;
1641     }
1642
1643     pic->reordered_opaque = codec->reordered_opaque;
1644     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1645     else           pic->pkt_pts = AV_NOPTS_VALUE;
1646     return 0;
1647 }
1648
1649 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1650 {
1651     FilterPriv *priv = ctx->priv;
1652     AVCodecContext *codec;
1653     if(!opaque) return -1;
1654
1655     priv->is = opaque;
1656     codec    = priv->is->video_st->codec;
1657     codec->opaque = ctx;
1658     if(codec->codec->capabilities & CODEC_CAP_DR1) {
1659         priv->use_dr1 = 1;
1660         codec->get_buffer     = input_get_buffer;
1661         codec->release_buffer = input_release_buffer;
1662         codec->reget_buffer   = input_reget_buffer;
1663         codec->thread_safe_callbacks = 1;
1664     }
1665
1666     priv->frame = avcodec_alloc_frame();
1667
1668     return 0;
1669 }
1670
1671 static void input_uninit(AVFilterContext *ctx)
1672 {
1673     FilterPriv *priv = ctx->priv;
1674     av_free(priv->frame);
1675 }
1676
1677 static int input_request_frame(AVFilterLink *link)
1678 {
1679     FilterPriv *priv = link->src->priv;
1680     AVFilterBufferRef *picref;
1681     int64_t pts = 0;
1682     AVPacket pkt;
1683     int ret;
1684
1685     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1686         av_free_packet(&pkt);
1687     if (ret < 0)
1688         return -1;
1689
1690     if(priv->use_dr1) {
1691         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1692     } else {
1693         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1694         av_image_copy(picref->data, picref->linesize,
1695                       priv->frame->data, priv->frame->linesize,
1696                       picref->format, link->w, link->h);
1697     }
1698     av_free_packet(&pkt);
1699
1700     picref->pts = pts;
1701     picref->pos = pkt.pos;
1702     picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1703     avfilter_start_frame(link, picref);
1704     avfilter_draw_slice(link, 0, link->h, 1);
1705     avfilter_end_frame(link);
1706
1707     return 0;
1708 }
1709
1710 static int input_query_formats(AVFilterContext *ctx)
1711 {
1712     FilterPriv *priv = ctx->priv;
1713     enum PixelFormat pix_fmts[] = {
1714         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1715     };
1716
1717     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1718     return 0;
1719 }
1720
1721 static int input_config_props(AVFilterLink *link)
1722 {
1723     FilterPriv *priv  = link->src->priv;
1724     AVCodecContext *c = priv->is->video_st->codec;
1725
1726     link->w = c->width;
1727     link->h = c->height;
1728     link->time_base = priv->is->video_st->time_base;
1729
1730     return 0;
1731 }
1732
1733 static AVFilter input_filter =
1734 {
1735     .name      = "ffplay_input",
1736
1737     .priv_size = sizeof(FilterPriv),
1738
1739     .init      = input_init,
1740     .uninit    = input_uninit,
1741
1742     .query_formats = input_query_formats,
1743
1744     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1745     .outputs   = (AVFilterPad[]) {{ .name = "default",
1746                                     .type = AVMEDIA_TYPE_VIDEO,
1747                                     .request_frame = input_request_frame,
1748                                     .config_props  = input_config_props, },
1749                                   { .name = NULL }},
1750 };
1751
1752 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1753 {
1754     char sws_flags_str[128];
1755     int ret;
1756     FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1757     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1758     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1759     graph->scale_sws_opts = av_strdup(sws_flags_str);
1760
1761     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1762                                             NULL, is, graph)) < 0)
1763         goto the_end;
1764     if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1765                                             NULL, &ffsink_ctx, graph)) < 0)
1766         goto the_end;
1767
1768     if(vfilters) {
1769         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1770         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1771
1772         outputs->name    = av_strdup("in");
1773         outputs->filter_ctx = filt_src;
1774         outputs->pad_idx = 0;
1775         outputs->next    = NULL;
1776
1777         inputs->name    = av_strdup("out");
1778         inputs->filter_ctx = filt_out;
1779         inputs->pad_idx = 0;
1780         inputs->next    = NULL;
1781
1782         if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1783             goto the_end;
1784         av_freep(&vfilters);
1785     } else {
1786         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1787             goto the_end;
1788     }
1789
1790     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1791         goto the_end;
1792
1793     is->out_video_filter = filt_out;
1794 the_end:
1795     return ret;
1796 }
1797
1798 #endif  /* CONFIG_AVFILTER */
1799
1800 static int video_thread(void *arg)
1801 {
1802     VideoState *is = arg;
1803     AVFrame *frame= avcodec_alloc_frame();
1804     int64_t pts_int;
1805     double pts;
1806     int ret;
1807
1808 #if CONFIG_AVFILTER
1809     AVFilterGraph *graph = avfilter_graph_alloc();
1810     AVFilterContext *filt_out = NULL;
1811     int64_t pos;
1812
1813     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1814         goto the_end;
1815     filt_out = is->out_video_filter;
1816 #endif
1817
1818     for(;;) {
1819 #if !CONFIG_AVFILTER
1820         AVPacket pkt;
1821 #else
1822         AVFilterBufferRef *picref;
1823         AVRational tb;
1824 #endif
1825         while (is->paused && !is->videoq.abort_request)
1826             SDL_Delay(10);
1827 #if CONFIG_AVFILTER
1828         ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1829         if (picref) {
1830             pts_int = picref->pts;
1831             pos     = picref->pos;
1832             frame->opaque = picref;
1833         }
1834
1835         if (av_cmp_q(tb, is->video_st->time_base)) {
1836             av_unused int64_t pts1 = pts_int;
1837             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1838             av_dlog(NULL, "video_thread(): "
1839                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1840                     tb.num, tb.den, pts1,
1841                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1842         }
1843 #else
1844         ret = get_video_frame(is, frame, &pts_int, &pkt);
1845 #endif
1846
1847         if (ret < 0) goto the_end;
1848
1849         if (!ret)
1850             continue;
1851
1852         pts = pts_int*av_q2d(is->video_st->time_base);
1853
1854 #if CONFIG_AVFILTER
1855         ret = output_picture2(is, frame, pts, pos);
1856 #else
1857         ret = output_picture2(is, frame, pts,  pkt.pos);
1858         av_free_packet(&pkt);
1859 #endif
1860         if (ret < 0)
1861             goto the_end;
1862
1863         if (step)
1864             if (cur_stream)
1865                 stream_pause(cur_stream);
1866     }
1867  the_end:
1868 #if CONFIG_AVFILTER
1869     avfilter_graph_free(&graph);
1870 #endif
1871     av_free(frame);
1872     return 0;
1873 }
1874
1875 static int subtitle_thread(void *arg)
1876 {
1877     VideoState *is = arg;
1878     SubPicture *sp;
1879     AVPacket pkt1, *pkt = &pkt1;
1880     int len1, got_subtitle;
1881     double pts;
1882     int i, j;
1883     int r, g, b, y, u, v, a;
1884
1885     for(;;) {
1886         while (is->paused && !is->subtitleq.abort_request) {
1887             SDL_Delay(10);
1888         }
1889         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1890             break;
1891
1892         if(pkt->data == flush_pkt.data){
1893             avcodec_flush_buffers(is->subtitle_st->codec);
1894             continue;
1895         }
1896         SDL_LockMutex(is->subpq_mutex);
1897         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1898                !is->subtitleq.abort_request) {
1899             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1900         }
1901         SDL_UnlockMutex(is->subpq_mutex);
1902
1903         if (is->subtitleq.abort_request)
1904             goto the_end;
1905
1906         sp = &is->subpq[is->subpq_windex];
1907
1908        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1909            this packet, if any */
1910         pts = 0;
1911         if (pkt->pts != AV_NOPTS_VALUE)
1912             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1913
1914         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1915                                     &sp->sub, &got_subtitle,
1916                                     pkt);
1917 //            if (len1 < 0)
1918 //                break;
1919         if (got_subtitle && sp->sub.format == 0) {
1920             sp->pts = pts;
1921
1922             for (i = 0; i < sp->sub.num_rects; i++)
1923             {
1924                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1925                 {
1926                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1927                     y = RGB_TO_Y_CCIR(r, g, b);
1928                     u = RGB_TO_U_CCIR(r, g, b, 0);
1929                     v = RGB_TO_V_CCIR(r, g, b, 0);
1930                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1931                 }
1932             }
1933
1934             /* now we can update the picture count */
1935             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1936                 is->subpq_windex = 0;
1937             SDL_LockMutex(is->subpq_mutex);
1938             is->subpq_size++;
1939             SDL_UnlockMutex(is->subpq_mutex);
1940         }
1941         av_free_packet(pkt);
1942 //        if (step)
1943 //            if (cur_stream)
1944 //                stream_pause(cur_stream);
1945     }
1946  the_end:
1947     return 0;
1948 }
1949
1950 /* copy samples for viewing in editor window */
1951 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1952 {
1953     int size, len, channels;
1954
1955     channels = is->audio_st->codec->channels;
1956
1957     size = samples_size / sizeof(short);
1958     while (size > 0) {
1959         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1960         if (len > size)
1961             len = size;
1962         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1963         samples += len;
1964         is->sample_array_index += len;
1965         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1966             is->sample_array_index = 0;
1967         size -= len;
1968     }
1969 }
1970
1971 /* return the new audio buffer size (samples can be added or deleted
1972    to get better sync if video or external master clock) */
1973 static int synchronize_audio(VideoState *is, short *samples,
1974                              int samples_size1, double pts)
1975 {
1976     int n, samples_size;
1977     double ref_clock;
1978
1979     n = 2 * is->audio_st->codec->channels;
1980     samples_size = samples_size1;
1981
1982     /* if not master, then we try to remove or add samples to correct the clock */
1983     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1984          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1985         double diff, avg_diff;
1986         int wanted_size, min_size, max_size, nb_samples;
1987
1988         ref_clock = get_master_clock(is);
1989         diff = get_audio_clock(is) - ref_clock;
1990
1991         if (diff < AV_NOSYNC_THRESHOLD) {
1992             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1993             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1994                 /* not enough measures to have a correct estimate */
1995                 is->audio_diff_avg_count++;
1996             } else {
1997                 /* estimate the A-V difference */
1998                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1999
2000                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
2001                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2002                     nb_samples = samples_size / n;
2003
2004                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2005                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2006                     if (wanted_size < min_size)
2007                         wanted_size = min_size;
2008                     else if (wanted_size > max_size)
2009                         wanted_size = max_size;
2010
2011                     /* add or remove samples to correction the synchro */
2012                     if (wanted_size < samples_size) {
2013                         /* remove samples */
2014                         samples_size = wanted_size;
2015                     } else if (wanted_size > samples_size) {
2016                         uint8_t *samples_end, *q;
2017                         int nb;
2018
2019                         /* add samples */
2020                         nb = (samples_size - wanted_size);
2021                         samples_end = (uint8_t *)samples + samples_size - n;
2022                         q = samples_end + n;
2023                         while (nb > 0) {
2024                             memcpy(q, samples_end, n);
2025                             q += n;
2026                             nb -= n;
2027                         }
2028                         samples_size = wanted_size;
2029                     }
2030                 }
2031 #if 0
2032                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2033                        diff, avg_diff, samples_size - samples_size1,
2034                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
2035 #endif
2036             }
2037         } else {
2038             /* too big difference : may be initial PTS errors, so
2039                reset A-V filter */
2040             is->audio_diff_avg_count = 0;
2041             is->audio_diff_cum = 0;
2042         }
2043     }
2044
2045     return samples_size;
2046 }
2047
2048 /* decode one audio frame and returns its uncompressed size */
2049 static int audio_decode_frame(VideoState *is, double *pts_ptr)
2050 {
2051     AVPacket *pkt_temp = &is->audio_pkt_temp;
2052     AVPacket *pkt = &is->audio_pkt;
2053     AVCodecContext *dec= is->audio_st->codec;
2054     int n, len1, data_size;
2055     double pts;
2056
2057     for(;;) {
2058         /* NOTE: the audio packet can contain several frames */
2059         while (pkt_temp->size > 0) {
2060             data_size = sizeof(is->audio_buf1);
2061             len1 = avcodec_decode_audio3(dec,
2062                                         (int16_t *)is->audio_buf1, &data_size,
2063                                         pkt_temp);
2064             if (len1 < 0) {
2065                 /* if error, we skip the frame */
2066                 pkt_temp->size = 0;
2067                 break;
2068             }
2069
2070             pkt_temp->data += len1;
2071             pkt_temp->size -= len1;
2072             if (data_size <= 0)
2073                 continue;
2074
2075             if (dec->sample_fmt != is->audio_src_fmt) {
2076                 if (is->reformat_ctx)
2077                     av_audio_convert_free(is->reformat_ctx);
2078                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2079                                                          dec->sample_fmt, 1, NULL, 0);
2080                 if (!is->reformat_ctx) {
2081                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2082                         av_get_sample_fmt_name(dec->sample_fmt),
2083                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2084                         break;
2085                 }
2086                 is->audio_src_fmt= dec->sample_fmt;
2087             }
2088
2089             if (is->reformat_ctx) {
2090                 const void *ibuf[6]= {is->audio_buf1};
2091                 void *obuf[6]= {is->audio_buf2};
2092                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2093                 int ostride[6]= {2};
2094                 int len= data_size/istride[0];
2095                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2096                     printf("av_audio_convert() failed\n");
2097                     break;
2098                 }
2099                 is->audio_buf= is->audio_buf2;
2100                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2101                           remove this legacy cruft */
2102                 data_size= len*2;
2103             }else{
2104                 is->audio_buf= is->audio_buf1;
2105             }
2106
2107             /* if no pts, then compute it */
2108             pts = is->audio_clock;
2109             *pts_ptr = pts;
2110             n = 2 * dec->channels;
2111             is->audio_clock += (double)data_size /
2112                 (double)(n * dec->sample_rate);
2113 #if defined(DEBUG_SYNC)
2114             {
2115                 static double last_clock;
2116                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2117                        is->audio_clock - last_clock,
2118                        is->audio_clock, pts);
2119                 last_clock = is->audio_clock;
2120             }
2121 #endif
2122             return data_size;
2123         }
2124
2125         /* free the current packet */
2126         if (pkt->data)
2127             av_free_packet(pkt);
2128
2129         if (is->paused || is->audioq.abort_request) {
2130             return -1;
2131         }
2132
2133         /* read next packet */
2134         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2135             return -1;
2136         if(pkt->data == flush_pkt.data){
2137             avcodec_flush_buffers(dec);
2138             continue;
2139         }
2140
2141         pkt_temp->data = pkt->data;
2142         pkt_temp->size = pkt->size;
2143
2144         /* if update the audio clock with the pts */
2145         if (pkt->pts != AV_NOPTS_VALUE) {
2146             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2147         }
2148     }
2149 }
2150
2151 /* get the current audio output buffer size, in samples. With SDL, we
2152    cannot have a precise information */
2153 static int audio_write_get_buf_size(VideoState *is)
2154 {
2155     return is->audio_buf_size - is->audio_buf_index;
2156 }
2157
2158
2159 /* prepare a new audio buffer */
2160 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2161 {
2162     VideoState *is = opaque;
2163     int audio_size, len1;
2164     double pts;
2165
2166     audio_callback_time = av_gettime();
2167
2168     while (len > 0) {
2169         if (is->audio_buf_index >= is->audio_buf_size) {
2170            audio_size = audio_decode_frame(is, &pts);
2171            if (audio_size < 0) {
2172                 /* if error, just output silence */
2173                is->audio_buf = is->audio_buf1;
2174                is->audio_buf_size = 1024;
2175                memset(is->audio_buf, 0, is->audio_buf_size);
2176            } else {
2177                if (is->show_audio)
2178                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2179                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2180                                               pts);
2181                is->audio_buf_size = audio_size;
2182            }
2183            is->audio_buf_index = 0;
2184         }
2185         len1 = is->audio_buf_size - is->audio_buf_index;
2186         if (len1 > len)
2187             len1 = len;
2188         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2189         len -= len1;
2190         stream += len1;
2191         is->audio_buf_index += len1;
2192     }
2193 }
2194
2195 /* open a given stream. Return 0 if OK */
2196 static int stream_component_open(VideoState *is, int stream_index)
2197 {
2198     AVFormatContext *ic = is->ic;
2199     AVCodecContext *avctx;
2200     AVCodec *codec;
2201     SDL_AudioSpec wanted_spec, spec;
2202
2203     if (stream_index < 0 || stream_index >= ic->nb_streams)
2204         return -1;
2205     avctx = ic->streams[stream_index]->codec;
2206
2207     /* prepare audio output */
2208     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2209         if (avctx->channels > 0) {
2210             avctx->request_channels = FFMIN(2, avctx->channels);
2211         } else {
2212             avctx->request_channels = 2;
2213         }
2214     }
2215
2216     codec = avcodec_find_decoder(avctx->codec_id);
2217     avctx->debug_mv = debug_mv;
2218     avctx->debug = debug;
2219     avctx->workaround_bugs = workaround_bugs;
2220     avctx->lowres = lowres;
2221     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2222     avctx->idct_algo= idct;
2223     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2224     avctx->skip_frame= skip_frame;
2225     avctx->skip_idct= skip_idct;
2226     avctx->skip_loop_filter= skip_loop_filter;
2227     avctx->error_recognition= error_recognition;
2228     avctx->error_concealment= error_concealment;
2229     avctx->thread_count= thread_count;
2230
2231     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2232
2233     if (!codec ||
2234         avcodec_open(avctx, codec) < 0)
2235         return -1;
2236
2237     /* prepare audio output */
2238     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2239         wanted_spec.freq = avctx->sample_rate;
2240         wanted_spec.format = AUDIO_S16SYS;
2241         wanted_spec.channels = avctx->channels;
2242         wanted_spec.silence = 0;
2243         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2244         wanted_spec.callback = sdl_audio_callback;
2245         wanted_spec.userdata = is;
2246         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2247             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2248             return -1;
2249         }
2250         is->audio_hw_buf_size = spec.size;
2251         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2252     }
2253
2254     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2255     switch(avctx->codec_type) {
2256     case AVMEDIA_TYPE_AUDIO:
2257         is->audio_stream = stream_index;
2258         is->audio_st = ic->streams[stream_index];
2259         is->audio_buf_size = 0;
2260         is->audio_buf_index = 0;
2261
2262         /* init averaging filter */
2263         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2264         is->audio_diff_avg_count = 0;
2265         /* since we do not have a precise anough audio fifo fullness,
2266            we correct audio sync only if larger than this threshold */
2267         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2268
2269         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2270         packet_queue_init(&is->audioq);
2271         SDL_PauseAudio(0);
2272         break;
2273     case AVMEDIA_TYPE_VIDEO:
2274         is->video_stream = stream_index;
2275         is->video_st = ic->streams[stream_index];
2276
2277 //        is->video_current_pts_time = av_gettime();
2278
2279         packet_queue_init(&is->videoq);
2280         is->video_tid = SDL_CreateThread(video_thread, is);
2281         break;
2282     case AVMEDIA_TYPE_SUBTITLE:
2283         is->subtitle_stream = stream_index;
2284         is->subtitle_st = ic->streams[stream_index];
2285         packet_queue_init(&is->subtitleq);
2286
2287         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2288         break;
2289     default:
2290         break;
2291     }
2292     return 0;
2293 }
2294
2295 static void stream_component_close(VideoState *is, int stream_index)
2296 {
2297     AVFormatContext *ic = is->ic;
2298     AVCodecContext *avctx;
2299
2300     if (stream_index < 0 || stream_index >= ic->nb_streams)
2301         return;
2302     avctx = ic->streams[stream_index]->codec;
2303
2304     switch(avctx->codec_type) {
2305     case AVMEDIA_TYPE_AUDIO:
2306         packet_queue_abort(&is->audioq);
2307
2308         SDL_CloseAudio();
2309
2310         packet_queue_end(&is->audioq);
2311         if (is->reformat_ctx)
2312             av_audio_convert_free(is->reformat_ctx);
2313         is->reformat_ctx = NULL;
2314         break;
2315     case AVMEDIA_TYPE_VIDEO:
2316         packet_queue_abort(&is->videoq);
2317
2318         /* note: we also signal this mutex to make sure we deblock the
2319            video thread in all cases */
2320         SDL_LockMutex(is->pictq_mutex);
2321         SDL_CondSignal(is->pictq_cond);
2322         SDL_UnlockMutex(is->pictq_mutex);
2323
2324         SDL_WaitThread(is->video_tid, NULL);
2325
2326         packet_queue_end(&is->videoq);
2327         break;
2328     case AVMEDIA_TYPE_SUBTITLE:
2329         packet_queue_abort(&is->subtitleq);
2330
2331         /* note: we also signal this mutex to make sure we deblock the
2332            video thread in all cases */
2333         SDL_LockMutex(is->subpq_mutex);
2334         is->subtitle_stream_changed = 1;
2335
2336         SDL_CondSignal(is->subpq_cond);
2337         SDL_UnlockMutex(is->subpq_mutex);
2338
2339         SDL_WaitThread(is->subtitle_tid, NULL);
2340
2341         packet_queue_end(&is->subtitleq);
2342         break;
2343     default:
2344         break;
2345     }
2346
2347     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2348     avcodec_close(avctx);
2349     switch(avctx->codec_type) {
2350     case AVMEDIA_TYPE_AUDIO:
2351         is->audio_st = NULL;
2352         is->audio_stream = -1;
2353         break;
2354     case AVMEDIA_TYPE_VIDEO:
2355         is->video_st = NULL;
2356         is->video_stream = -1;
2357         break;
2358     case AVMEDIA_TYPE_SUBTITLE:
2359         is->subtitle_st = NULL;
2360         is->subtitle_stream = -1;
2361         break;
2362     default:
2363         break;
2364     }
2365 }
2366
2367 /* since we have only one decoding thread, we can use a global
2368    variable instead of a thread local variable */
2369 static VideoState *global_video_state;
2370
2371 static int decode_interrupt_cb(void)
2372 {
2373     return (global_video_state && global_video_state->abort_request);
2374 }
2375
2376 /* this thread gets the stream from the disk or the network */
2377 static int decode_thread(void *arg)
2378 {
2379     VideoState *is = arg;
2380     AVFormatContext *ic;
2381     int err, i, ret;
2382     int st_index[AVMEDIA_TYPE_NB];
2383     AVPacket pkt1, *pkt = &pkt1;
2384     AVFormatParameters params, *ap = &params;
2385     int eof=0;
2386     int pkt_in_play_range = 0;
2387
2388     ic = avformat_alloc_context();
2389
2390     memset(st_index, -1, sizeof(st_index));
2391     is->video_stream = -1;
2392     is->audio_stream = -1;
2393     is->subtitle_stream = -1;
2394
2395     global_video_state = is;
2396     url_set_interrupt_cb(decode_interrupt_cb);
2397
2398     memset(ap, 0, sizeof(*ap));
2399
2400     ap->prealloced_context = 1;
2401     ap->width = frame_width;
2402     ap->height= frame_height;
2403     ap->time_base= (AVRational){1, 25};
2404     ap->pix_fmt = frame_pix_fmt;
2405
2406     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2407
2408     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2409     if (err < 0) {
2410         print_error(is->filename, err);
2411         ret = -1;
2412         goto fail;
2413     }
2414     is->ic = ic;
2415
2416     if(genpts)
2417         ic->flags |= AVFMT_FLAG_GENPTS;
2418
2419     err = av_find_stream_info(ic);
2420     if (err < 0) {
2421         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2422         ret = -1;
2423         goto fail;
2424     }
2425     if(ic->pb)
2426         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2427
2428     if(seek_by_bytes<0)
2429         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2430
2431     /* if seeking requested, we execute it */
2432     if (start_time != AV_NOPTS_VALUE) {
2433         int64_t timestamp;
2434
2435         timestamp = start_time;
2436         /* add the stream start time */
2437         if (ic->start_time != AV_NOPTS_VALUE)
2438             timestamp += ic->start_time;
2439         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2440         if (ret < 0) {
2441             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2442                     is->filename, (double)timestamp / AV_TIME_BASE);
2443         }
2444     }
2445
2446     for (i = 0; i < ic->nb_streams; i++)
2447         ic->streams[i]->discard = AVDISCARD_ALL;
2448     if (!video_disable)
2449         st_index[AVMEDIA_TYPE_VIDEO] =
2450             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2451                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2452     if (!audio_disable)
2453         st_index[AVMEDIA_TYPE_AUDIO] =
2454             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2455                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2456                                 st_index[AVMEDIA_TYPE_VIDEO],
2457                                 NULL, 0);
2458     if (!video_disable)
2459         st_index[AVMEDIA_TYPE_SUBTITLE] =
2460             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2461                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2462                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2463                                  st_index[AVMEDIA_TYPE_AUDIO] :
2464                                  st_index[AVMEDIA_TYPE_VIDEO]),
2465                                 NULL, 0);
2466     if (show_status) {
2467         av_dump_format(ic, 0, is->filename, 0);
2468     }
2469
2470     /* open the streams */
2471     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2472         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2473     }
2474
2475     ret=-1;
2476     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2477         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2478     }
2479     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2480     if(ret<0) {
2481         if (!display_disable)
2482             is->show_audio = 2;
2483     }
2484
2485     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2486         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2487     }
2488
2489     if (is->video_stream < 0 && is->audio_stream < 0) {
2490         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2491         ret = -1;
2492         goto fail;
2493     }
2494
2495     for(;;) {
2496         if (is->abort_request)
2497             break;
2498         if (is->paused != is->last_paused) {
2499             is->last_paused = is->paused;
2500             if (is->paused)
2501                 is->read_pause_return= av_read_pause(ic);
2502             else
2503                 av_read_play(ic);
2504         }
2505 #if CONFIG_RTSP_DEMUXER
2506         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2507             /* wait 10 ms to avoid trying to get another packet */
2508             /* XXX: horrible */
2509             SDL_Delay(10);
2510             continue;
2511         }
2512 #endif
2513         if (is->seek_req) {
2514             int64_t seek_target= is->seek_pos;
2515             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2516             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2517 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2518 //      of the seek_pos/seek_rel variables
2519
2520             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2521             if (ret < 0) {
2522                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2523             }else{
2524                 if (is->audio_stream >= 0) {
2525                     packet_queue_flush(&is->audioq);
2526                     packet_queue_put(&is->audioq, &flush_pkt);
2527                 }
2528                 if (is->subtitle_stream >= 0) {
2529                     packet_queue_flush(&is->subtitleq);
2530                     packet_queue_put(&is->subtitleq, &flush_pkt);
2531                 }
2532                 if (is->video_stream >= 0) {
2533                     packet_queue_flush(&is->videoq);
2534                     packet_queue_put(&is->videoq, &flush_pkt);
2535                 }
2536             }
2537             is->seek_req = 0;
2538             eof= 0;
2539         }
2540
2541         /* if the queue are full, no need to read more */
2542         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2543             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2544                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2545                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2546             /* wait 10 ms */
2547             SDL_Delay(10);
2548             continue;
2549         }
2550         if(eof) {
2551             if(is->video_stream >= 0){
2552                 av_init_packet(pkt);
2553                 pkt->data=NULL;
2554                 pkt->size=0;
2555                 pkt->stream_index= is->video_stream;
2556                 packet_queue_put(&is->videoq, pkt);
2557             }
2558             SDL_Delay(10);
2559             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2560                 if(loop!=1 && (!loop || --loop)){
2561                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2562                 }else if(autoexit){
2563                     ret=AVERROR_EOF;
2564                     goto fail;
2565                 }
2566             }
2567             continue;
2568         }
2569         ret = av_read_frame(ic, pkt);
2570         if (ret < 0) {
2571             if (ret == AVERROR_EOF || (ic->pb && ic->pb->eof_reached))
2572                 eof=1;
2573             if (ic->pb && ic->pb->error)
2574                 break;
2575             SDL_Delay(100); /* wait for user event */
2576             continue;
2577         }
2578         /* check if packet is in play range specified by user, then queue, otherwise discard */
2579         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2580                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2581                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2582                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2583                 <= ((double)duration/1000000);
2584         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2585             packet_queue_put(&is->audioq, pkt);
2586         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2587             packet_queue_put(&is->videoq, pkt);
2588         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2589             packet_queue_put(&is->subtitleq, pkt);
2590         } else {
2591             av_free_packet(pkt);
2592         }
2593     }
2594     /* wait until the end */
2595     while (!is->abort_request) {
2596         SDL_Delay(100);
2597     }
2598
2599     ret = 0;
2600  fail:
2601     /* disable interrupting */
2602     global_video_state = NULL;
2603
2604     /* close each stream */
2605     if (is->audio_stream >= 0)
2606         stream_component_close(is, is->audio_stream);
2607     if (is->video_stream >= 0)
2608         stream_component_close(is, is->video_stream);
2609     if (is->subtitle_stream >= 0)
2610         stream_component_close(is, is->subtitle_stream);
2611     if (is->ic) {
2612         av_close_input_file(is->ic);
2613         is->ic = NULL; /* safety */
2614     }
2615     url_set_interrupt_cb(NULL);
2616
2617     if (ret != 0) {
2618         SDL_Event event;
2619
2620         event.type = FF_QUIT_EVENT;
2621         event.user.data1 = is;
2622         SDL_PushEvent(&event);
2623     }
2624     return 0;
2625 }
2626
2627 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2628 {
2629     VideoState *is;
2630
2631     is = av_mallocz(sizeof(VideoState));
2632     if (!is)
2633         return NULL;
2634     av_strlcpy(is->filename, filename, sizeof(is->filename));
2635     is->iformat = iformat;
2636     is->ytop = 0;
2637     is->xleft = 0;
2638
2639     /* start video display */
2640     is->pictq_mutex = SDL_CreateMutex();
2641     is->pictq_cond = SDL_CreateCond();
2642
2643     is->subpq_mutex = SDL_CreateMutex();
2644     is->subpq_cond = SDL_CreateCond();
2645
2646     is->av_sync_type = av_sync_type;
2647     is->parse_tid = SDL_CreateThread(decode_thread, is);
2648     if (!is->parse_tid) {
2649         av_free(is);
2650         return NULL;
2651     }
2652     return is;
2653 }
2654
2655 static void stream_cycle_channel(VideoState *is, int codec_type)
2656 {
2657     AVFormatContext *ic = is->ic;
2658     int start_index, stream_index;
2659     AVStream *st;
2660
2661     if (codec_type == AVMEDIA_TYPE_VIDEO)
2662         start_index = is->video_stream;
2663     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2664         start_index = is->audio_stream;
2665     else
2666         start_index = is->subtitle_stream;
2667     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2668         return;
2669     stream_index = start_index;
2670     for(;;) {
2671         if (++stream_index >= is->ic->nb_streams)
2672         {
2673             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2674             {
2675                 stream_index = -1;
2676                 goto the_end;
2677             } else
2678                 stream_index = 0;
2679         }
2680         if (stream_index == start_index)
2681             return;
2682         st = ic->streams[stream_index];
2683         if (st->codec->codec_type == codec_type) {
2684             /* check that parameters are OK */
2685             switch(codec_type) {
2686             case AVMEDIA_TYPE_AUDIO:
2687                 if (st->codec->sample_rate != 0 &&
2688                     st->codec->channels != 0)
2689                     goto the_end;
2690                 break;
2691             case AVMEDIA_TYPE_VIDEO:
2692             case AVMEDIA_TYPE_SUBTITLE:
2693                 goto the_end;
2694             default:
2695                 break;
2696             }
2697         }
2698     }
2699  the_end:
2700     stream_component_close(is, start_index);
2701     stream_component_open(is, stream_index);
2702 }
2703
2704
2705 static void toggle_full_screen(void)
2706 {
2707     is_full_screen = !is_full_screen;
2708     if (!fs_screen_width) {
2709         /* use default SDL method */
2710 //        SDL_WM_ToggleFullScreen(screen);
2711     }
2712     video_open(cur_stream);
2713 }
2714
2715 static void toggle_pause(void)
2716 {
2717     if (cur_stream)
2718         stream_pause(cur_stream);
2719     step = 0;
2720 }
2721
2722 static void step_to_next_frame(void)
2723 {
2724     if (cur_stream) {
2725         /* if the stream is paused unpause it, then step */
2726         if (cur_stream->paused)
2727             stream_pause(cur_stream);
2728     }
2729     step = 1;
2730 }
2731
2732 static void toggle_audio_display(void)
2733 {
2734     if (cur_stream) {
2735         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2736         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2737         fill_rectangle(screen,
2738                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2739                     bgcolor);
2740         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2741     }
2742 }
2743
2744 /* handle an event sent by the GUI */
2745 static void event_loop(void)
2746 {
2747     SDL_Event event;
2748     double incr, pos, frac;
2749
2750     for(;;) {
2751         double x;
2752         SDL_WaitEvent(&event);
2753         switch(event.type) {
2754         case SDL_KEYDOWN:
2755             if (exit_on_keydown) {
2756                 do_exit();
2757                 break;
2758             }
2759             switch(event.key.keysym.sym) {
2760             case SDLK_ESCAPE:
2761             case SDLK_q:
2762                 do_exit();
2763                 break;
2764             case SDLK_f:
2765                 toggle_full_screen();
2766                 break;
2767             case SDLK_p:
2768             case SDLK_SPACE:
2769                 toggle_pause();
2770                 break;
2771             case SDLK_s: //S: Step to next frame
2772                 step_to_next_frame();
2773                 break;
2774             case SDLK_a:
2775                 if (cur_stream)
2776                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2777                 break;
2778             case SDLK_v:
2779                 if (cur_stream)
2780                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2781                 break;
2782             case SDLK_t:
2783                 if (cur_stream)
2784                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2785                 break;
2786             case SDLK_w:
2787                 toggle_audio_display();
2788                 break;
2789             case SDLK_LEFT:
2790                 incr = -10.0;
2791                 goto do_seek;
2792             case SDLK_RIGHT:
2793                 incr = 10.0;
2794                 goto do_seek;
2795             case SDLK_UP:
2796                 incr = 60.0;
2797                 goto do_seek;
2798             case SDLK_DOWN:
2799                 incr = -60.0;
2800             do_seek:
2801                 if (cur_stream) {
2802                     if (seek_by_bytes) {
2803                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2804                             pos= cur_stream->video_current_pos;
2805                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2806                             pos= cur_stream->audio_pkt.pos;
2807                         }else
2808                             pos = avio_tell(cur_stream->ic->pb);
2809                         if (cur_stream->ic->bit_rate)
2810                             incr *= cur_stream->ic->bit_rate / 8.0;
2811                         else
2812                             incr *= 180000.0;
2813                         pos += incr;
2814                         stream_seek(cur_stream, pos, incr, 1);
2815                     } else {
2816                         pos = get_master_clock(cur_stream);
2817                         pos += incr;
2818                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2819                     }
2820                 }
2821                 break;
2822             default:
2823                 break;
2824             }
2825             break;
2826         case SDL_MOUSEBUTTONDOWN:
2827             if (exit_on_mousedown) {
2828                 do_exit();
2829                 break;
2830             }
2831         case SDL_MOUSEMOTION:
2832             if(event.type ==SDL_MOUSEBUTTONDOWN){
2833                 x= event.button.x;
2834             }else{
2835                 if(event.motion.state != SDL_PRESSED)
2836                     break;
2837                 x= event.motion.x;
2838             }
2839             if (cur_stream) {
2840                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2841                     uint64_t size=  avio_size(cur_stream->ic->pb);
2842                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2843                 }else{
2844                     int64_t ts;
2845                     int ns, hh, mm, ss;
2846                     int tns, thh, tmm, tss;
2847                     tns = cur_stream->ic->duration/1000000LL;
2848                     thh = tns/3600;
2849                     tmm = (tns%3600)/60;
2850                     tss = (tns%60);
2851                     frac = x/cur_stream->width;
2852                     ns = frac*tns;
2853                     hh = ns/3600;
2854                     mm = (ns%3600)/60;
2855                     ss = (ns%60);
2856                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2857                             hh, mm, ss, thh, tmm, tss);
2858                     ts = frac*cur_stream->ic->duration;
2859                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2860                         ts += cur_stream->ic->start_time;
2861                     stream_seek(cur_stream, ts, 0, 0);
2862                 }
2863             }
2864             break;
2865         case SDL_VIDEORESIZE:
2866             if (cur_stream) {
2867                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2868                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2869                 screen_width = cur_stream->width = event.resize.w;
2870                 screen_height= cur_stream->height= event.resize.h;
2871             }
2872             break;
2873         case SDL_QUIT:
2874         case FF_QUIT_EVENT:
2875             do_exit();
2876             break;
2877         case FF_ALLOC_EVENT:
2878             video_open(event.user.data1);
2879             alloc_picture(event.user.data1);
2880             break;
2881         case FF_REFRESH_EVENT:
2882             video_refresh_timer(event.user.data1);
2883             cur_stream->refresh=0;
2884             break;
2885         default:
2886             break;
2887         }
2888     }
2889 }
2890
2891 static void opt_frame_size(const char *arg)
2892 {
2893     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2894         fprintf(stderr, "Incorrect frame size\n");
2895         exit(1);
2896     }
2897     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2898         fprintf(stderr, "Frame size must be a multiple of 2\n");
2899         exit(1);
2900     }
2901 }
2902
2903 static int opt_width(const char *opt, const char *arg)
2904 {
2905     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2906     return 0;
2907 }
2908
2909 static int opt_height(const char *opt, const char *arg)
2910 {
2911     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2912     return 0;
2913 }
2914
2915 static void opt_format(const char *arg)
2916 {
2917     file_iformat = av_find_input_format(arg);
2918     if (!file_iformat) {
2919         fprintf(stderr, "Unknown input format: %s\n", arg);
2920         exit(1);
2921     }
2922 }
2923
2924 static void opt_frame_pix_fmt(const char *arg)
2925 {
2926     frame_pix_fmt = av_get_pix_fmt(arg);
2927 }
2928
2929 static int opt_sync(const char *opt, const char *arg)
2930 {
2931     if (!strcmp(arg, "audio"))
2932         av_sync_type = AV_SYNC_AUDIO_MASTER;
2933     else if (!strcmp(arg, "video"))
2934         av_sync_type = AV_SYNC_VIDEO_MASTER;
2935     else if (!strcmp(arg, "ext"))
2936         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2937     else {
2938         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2939         exit(1);
2940     }
2941     return 0;
2942 }
2943
2944 static int opt_seek(const char *opt, const char *arg)
2945 {
2946     start_time = parse_time_or_die(opt, arg, 1);
2947     return 0;
2948 }
2949
2950 static int opt_duration(const char *opt, const char *arg)
2951 {
2952     duration = parse_time_or_die(opt, arg, 1);
2953     return 0;
2954 }
2955
2956 static int opt_debug(const char *opt, const char *arg)
2957 {
2958     av_log_set_level(99);
2959     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2960     return 0;
2961 }
2962
2963 static int opt_vismv(const char *opt, const char *arg)
2964 {
2965     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2966     return 0;
2967 }
2968
2969 static int opt_thread_count(const char *opt, const char *arg)
2970 {
2971     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2972 #if !HAVE_THREADS
2973     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2974 #endif
2975     return 0;
2976 }
2977
2978 static const OptionDef options[] = {
2979 #include "cmdutils_common_opts.h"
2980     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2981     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2982     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2983     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2984     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2985     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2986     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2987     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2988     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2989     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2990     { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2991     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2992     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2993     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2994     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2995     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2996     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2997     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2998     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2999     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3000     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3001     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3002     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3003     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3004     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3005     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3006     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3007     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3008     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3009     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3010     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3011     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3012     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3013     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3014     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3015     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3016     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3017 #if CONFIG_AVFILTER
3018     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3019 #endif
3020     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3021     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3022     { NULL, },
3023 };
3024
3025 static void show_usage(void)
3026 {
3027     printf("Simple media player\n");
3028     printf("usage: ffplay [options] input_file\n");
3029     printf("\n");
3030 }
3031
3032 static void show_help(void)
3033 {
3034     av_log_set_callback(log_callback_help);
3035     show_usage();
3036     show_help_options(options, "Main options:\n",
3037                       OPT_EXPERT, 0);
3038     show_help_options(options, "\nAdvanced options:\n",
3039                       OPT_EXPERT, OPT_EXPERT);
3040     printf("\n");
3041     av_opt_show2(avcodec_opts[0], NULL,
3042                  AV_OPT_FLAG_DECODING_PARAM, 0);
3043     printf("\n");
3044     av_opt_show2(avformat_opts, NULL,
3045                  AV_OPT_FLAG_DECODING_PARAM, 0);
3046 #if !CONFIG_AVFILTER
3047     printf("\n");
3048     av_opt_show2(sws_opts, NULL,
3049                  AV_OPT_FLAG_ENCODING_PARAM, 0);
3050 #endif
3051     printf("\nWhile playing:\n"
3052            "q, ESC              quit\n"
3053            "f                   toggle full screen\n"
3054            "p, SPC              pause\n"
3055            "a                   cycle audio channel\n"
3056            "v                   cycle video channel\n"
3057            "t                   cycle subtitle channel\n"
3058            "w                   show audio waves\n"
3059            "s                   activate frame-step mode\n"
3060            "left/right          seek backward/forward 10 seconds\n"
3061            "down/up             seek backward/forward 1 minute\n"
3062            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3063            );
3064 }
3065
3066 static void opt_input_file(const char *filename)
3067 {
3068     if (input_filename) {
3069         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3070                 filename, input_filename);
3071         exit(1);
3072     }
3073     if (!strcmp(filename, "-"))
3074         filename = "pipe:";
3075     input_filename = filename;
3076 }
3077
3078 /* Called from the main */
3079 int main(int argc, char **argv)
3080 {
3081     int flags;
3082
3083     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3084
3085     /* register all codecs, demux and protocols */
3086     avcodec_register_all();
3087 #if CONFIG_AVDEVICE
3088     avdevice_register_all();
3089 #endif
3090 #if CONFIG_AVFILTER
3091     avfilter_register_all();
3092 #endif
3093     av_register_all();
3094
3095     init_opts();
3096
3097     show_banner();
3098
3099     parse_options(argc, argv, options, opt_input_file);
3100
3101     if (!input_filename) {
3102         show_usage();
3103         fprintf(stderr, "An input file must be specified\n");
3104         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3105         exit(1);
3106     }
3107
3108     if (display_disable) {
3109         video_disable = 1;
3110     }
3111     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3112 #if !defined(__MINGW32__) && !defined(__APPLE__)
3113     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3114 #endif
3115     if (SDL_Init (flags)) {
3116         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3117         exit(1);
3118     }
3119
3120     if (!display_disable) {
3121 #if HAVE_SDL_VIDEO_SIZE
3122         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3123         fs_screen_width = vi->current_w;
3124         fs_screen_height = vi->current_h;
3125 #endif
3126     }
3127
3128     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3129     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3130     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3131
3132     av_init_packet(&flush_pkt);
3133     flush_pkt.data= "FLUSH";
3134
3135     cur_stream = stream_open(input_filename, file_iformat);
3136
3137     event_loop();
3138
3139     /* never returns */
3140
3141     return 0;
3142 }