2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "libavcodec/internal.h"
24 #include "libavutil/opt.h"
26 #include "libavutil/avstring.h"
28 #include "audiointerleave.h"
42 * various utility functions for use within FFmpeg
45 unsigned avformat_version(void)
47 return LIBAVFORMAT_VERSION_INT;
50 const char *avformat_configuration(void)
52 return FFMPEG_CONFIGURATION;
55 const char *avformat_license(void)
57 #define LICENSE_PREFIX "libavformat license: "
58 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
61 /* fraction handling */
64 * f = val + (num / den) + 0.5.
66 * 'num' is normalized so that it is such as 0 <= num < den.
68 * @param f fractional number
69 * @param val integer value
70 * @param num must be >= 0
71 * @param den must be >= 1
73 static void av_frac_init(AVFrac *f, int64_t val, int64_t num, int64_t den)
86 * Fractional addition to f: f = f + (incr / f->den).
88 * @param f fractional number
89 * @param incr increment, can be positive or negative
91 static void av_frac_add(AVFrac *f, int64_t incr)
104 } else if (num >= den) {
111 /** head of registered input format linked list */
112 AVInputFormat *first_iformat = NULL;
113 /** head of registered output format linked list */
114 AVOutputFormat *first_oformat = NULL;
116 AVInputFormat *av_iformat_next(AVInputFormat *f)
118 if(f) return f->next;
119 else return first_iformat;
122 AVOutputFormat *av_oformat_next(AVOutputFormat *f)
124 if(f) return f->next;
125 else return first_oformat;
128 void av_register_input_format(AVInputFormat *format)
132 while (*p != NULL) p = &(*p)->next;
137 void av_register_output_format(AVOutputFormat *format)
141 while (*p != NULL) p = &(*p)->next;
146 int av_match_ext(const char *filename, const char *extensions)
154 ext = strrchr(filename, '.');
160 while (*p != '\0' && *p != ',' && q-ext1<sizeof(ext1)-1)
163 if (!strcasecmp(ext1, ext))
173 static int match_format(const char *name, const char *names)
181 namelen = strlen(name);
182 while ((p = strchr(names, ','))) {
183 len = FFMAX(p - names, namelen);
184 if (!strncasecmp(name, names, len))
188 return !strcasecmp(name, names);
191 #if LIBAVFORMAT_VERSION_MAJOR < 53
192 AVOutputFormat *guess_format(const char *short_name, const char *filename,
193 const char *mime_type)
195 return av_guess_format(short_name, filename, mime_type);
199 AVOutputFormat *av_guess_format(const char *short_name, const char *filename,
200 const char *mime_type)
202 AVOutputFormat *fmt, *fmt_found;
203 int score_max, score;
205 /* specific test for image sequences */
206 #if CONFIG_IMAGE2_MUXER
207 if (!short_name && filename &&
208 av_filename_number_test(filename) &&
209 av_guess_image2_codec(filename) != CODEC_ID_NONE) {
210 return av_guess_format("image2", NULL, NULL);
213 /* Find the proper file type. */
217 while (fmt != NULL) {
219 if (fmt->name && short_name && !strcmp(fmt->name, short_name))
221 if (fmt->mime_type && mime_type && !strcmp(fmt->mime_type, mime_type))
223 if (filename && fmt->extensions &&
224 av_match_ext(filename, fmt->extensions)) {
227 if (score > score_max) {
236 #if LIBAVFORMAT_VERSION_MAJOR < 53
237 AVOutputFormat *guess_stream_format(const char *short_name, const char *filename,
238 const char *mime_type)
240 AVOutputFormat *fmt = av_guess_format(short_name, filename, mime_type);
243 AVOutputFormat *stream_fmt;
244 char stream_format_name[64];
246 snprintf(stream_format_name, sizeof(stream_format_name), "%s_stream", fmt->name);
247 stream_fmt = av_guess_format(stream_format_name, NULL, NULL);
257 enum CodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
258 const char *filename, const char *mime_type, enum AVMediaType type){
259 if(type == AVMEDIA_TYPE_VIDEO){
260 enum CodecID codec_id= CODEC_ID_NONE;
262 #if CONFIG_IMAGE2_MUXER
263 if(!strcmp(fmt->name, "image2") || !strcmp(fmt->name, "image2pipe")){
264 codec_id= av_guess_image2_codec(filename);
267 if(codec_id == CODEC_ID_NONE)
268 codec_id= fmt->video_codec;
270 }else if(type == AVMEDIA_TYPE_AUDIO)
271 return fmt->audio_codec;
273 return CODEC_ID_NONE;
276 AVInputFormat *av_find_input_format(const char *short_name)
279 for(fmt = first_iformat; fmt != NULL; fmt = fmt->next) {
280 if (match_format(short_name, fmt->name))
286 #if LIBAVFORMAT_VERSION_MAJOR < 53 && CONFIG_SHARED && HAVE_SYMVER
287 FF_SYMVER(void, av_destruct_packet_nofree, (AVPacket *pkt), "LIBAVFORMAT_52")
289 av_destruct_packet_nofree(pkt);
292 FF_SYMVER(void, av_destruct_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
294 av_destruct_packet(pkt);
297 FF_SYMVER(int, av_new_packet, (AVPacket *pkt, int size), "LIBAVFORMAT_52")
299 return av_new_packet(pkt, size);
302 FF_SYMVER(int, av_dup_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
304 return av_dup_packet(pkt);
307 FF_SYMVER(void, av_free_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
312 FF_SYMVER(void, av_init_packet, (AVPacket *pkt), "LIBAVFORMAT_52")
314 av_log(NULL, AV_LOG_WARNING, "Diverting av_*_packet function calls to libavcodec. Recompile to improve performance\n");
319 int av_get_packet(ByteIOContext *s, AVPacket *pkt, int size)
321 int ret= av_new_packet(pkt, size);
326 pkt->pos= url_ftell(s);
328 ret= get_buffer(s, pkt->data, size);
332 av_shrink_packet(pkt, ret);
338 int av_filename_number_test(const char *filename)
341 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
344 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
346 AVInputFormat *fmt1, *fmt;
350 for(fmt1 = first_iformat; fmt1 != NULL; fmt1 = fmt1->next) {
351 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
354 if (fmt1->read_probe) {
355 score = fmt1->read_probe(pd);
356 } else if (fmt1->extensions) {
357 if (av_match_ext(pd->filename, fmt1->extensions)) {
361 if (score > *score_max) {
364 }else if (score == *score_max)
370 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
372 return av_probe_input_format2(pd, is_opened, &score);
375 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd, int score)
377 static const struct {
378 const char *name; enum CodecID id; enum AVMediaType type;
380 { "aac" , CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
381 { "ac3" , CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
382 { "dts" , CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
383 { "eac3" , CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
384 { "h264" , CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
385 { "m4v" , CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
386 { "mp3" , CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
387 { "mpegvideo", CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
390 AVInputFormat *fmt = av_probe_input_format2(pd, 1, &score);
394 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
395 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
396 for (i = 0; fmt_id_type[i].name; i++) {
397 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
398 st->codec->codec_id = fmt_id_type[i].id;
399 st->codec->codec_type = fmt_id_type[i].type;
407 /************************************************************/
408 /* input media file */
411 * Open a media file from an IO stream. 'fmt' must be specified.
413 int av_open_input_stream(AVFormatContext **ic_ptr,
414 ByteIOContext *pb, const char *filename,
415 AVInputFormat *fmt, AVFormatParameters *ap)
419 AVFormatParameters default_ap;
423 memset(ap, 0, sizeof(default_ap));
426 if(!ap->prealloced_context)
427 ic = avformat_alloc_context();
431 err = AVERROR(ENOMEM);
436 ic->duration = AV_NOPTS_VALUE;
437 ic->start_time = AV_NOPTS_VALUE;
438 av_strlcpy(ic->filename, filename, sizeof(ic->filename));
440 /* allocate private data */
441 if (fmt->priv_data_size > 0) {
442 ic->priv_data = av_mallocz(fmt->priv_data_size);
443 if (!ic->priv_data) {
444 err = AVERROR(ENOMEM);
448 ic->priv_data = NULL;
451 if (ic->iformat->read_header) {
452 err = ic->iformat->read_header(ic, ap);
457 if (pb && !ic->data_offset)
458 ic->data_offset = url_ftell(ic->pb);
460 #if FF_API_OLD_METADATA
461 ff_metadata_demux_compat(ic);
464 ic->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
471 av_freep(&ic->priv_data);
472 for(i=0;i<ic->nb_streams;i++) {
473 AVStream *st = ic->streams[i];
475 av_free(st->priv_data);
476 av_free(st->codec->extradata);
487 /** size of probe buffer, for guessing file type from file contents */
488 #define PROBE_BUF_MIN 2048
489 #define PROBE_BUF_MAX (1<<20)
491 int ff_probe_input_buffer(ByteIOContext **pb, AVInputFormat **fmt,
492 const char *filename, void *logctx,
493 unsigned int offset, unsigned int max_probe_size)
495 AVProbeData pd = { filename ? filename : "", NULL, -offset };
496 unsigned char *buf = NULL;
497 int ret = 0, probe_size;
499 if (!max_probe_size) {
500 max_probe_size = PROBE_BUF_MAX;
501 } else if (max_probe_size > PROBE_BUF_MAX) {
502 max_probe_size = PROBE_BUF_MAX;
503 } else if (max_probe_size < PROBE_BUF_MIN) {
504 return AVERROR(EINVAL);
507 if (offset >= max_probe_size) {
508 return AVERROR(EINVAL);
511 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt && ret >= 0;
512 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
513 int ret, score = probe_size < max_probe_size ? AVPROBE_SCORE_MAX/4 : 0;
514 int buf_offset = (probe_size == PROBE_BUF_MIN) ? 0 : probe_size>>1;
516 if (probe_size < offset) {
520 /* read probe data */
521 buf = av_realloc(buf, probe_size + AVPROBE_PADDING_SIZE);
522 if ((ret = get_buffer(*pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
523 /* fail if error was not end of file, otherwise, lower score */
524 if (ret != AVERROR_EOF) {
529 ret = 0; /* error was end of file, nothing read */
532 pd.buf = &buf[offset];
534 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
536 /* guess file format */
537 *fmt = av_probe_input_format2(&pd, 1, &score);
539 if(score <= AVPROBE_SCORE_MAX/4){ //this can only be true in the last iteration
540 av_log(logctx, AV_LOG_WARNING, "Format detected only with low score of %d, misdetection possible!\n", score);
542 av_log(logctx, AV_LOG_DEBUG, "Probed with size=%d and score=%d\n", probe_size, score);
548 return AVERROR_INVALIDDATA;
551 /* rewind. reuse probe buffer to avoid seeking */
552 if ((ret = ff_rewind_with_probe_data(*pb, buf, pd.buf_size)) < 0)
558 int av_open_input_file(AVFormatContext **ic_ptr, const char *filename,
561 AVFormatParameters *ap)
564 AVProbeData probe_data, *pd = &probe_data;
565 ByteIOContext *pb = NULL;
566 void *logctx= ap && ap->prealloced_context ? *ic_ptr : NULL;
570 pd->filename = filename;
575 /* guess format if no file can be opened */
576 fmt = av_probe_input_format(pd, 0);
579 /* Do not open file if the format does not need it. XXX: specific
580 hack needed to handle RTSP/TCP */
581 if (!fmt || !(fmt->flags & AVFMT_NOFILE)) {
582 /* if no file needed do not try to open one */
583 if ((err=url_fopen(&pb, filename, URL_RDONLY)) < 0) {
587 url_setbufsize(pb, buf_size);
589 if (!fmt && (err = ff_probe_input_buffer(&pb, &fmt, filename, logctx, 0, logctx ? (*ic_ptr)->probesize : 0)) < 0) {
594 /* if still no format found, error */
596 err = AVERROR_INVALIDDATA;
600 /* check filename in case an image number is expected */
601 if (fmt->flags & AVFMT_NEEDNUMBER) {
602 if (!av_filename_number_test(filename)) {
603 err = AVERROR_NUMEXPECTED;
607 err = av_open_input_stream(ic_ptr, pb, filename, fmt, ap);
615 if (ap && ap->prealloced_context)
622 /*******************************************************/
624 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
625 AVPacketList **plast_pktl){
626 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
631 (*plast_pktl)->next = pktl;
633 *packet_buffer = pktl;
635 /* add the packet in the buffered packet list */
641 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
647 AVPacketList *pktl = s->raw_packet_buffer;
651 if(s->streams[pkt->stream_index]->codec->codec_id != CODEC_ID_PROBE ||
652 !s->streams[pkt->stream_index]->probe_packets ||
653 s->raw_packet_buffer_remaining_size < pkt->size){
654 AVProbeData *pd = &s->streams[pkt->stream_index]->probe_data;
657 s->raw_packet_buffer = pktl->next;
658 s->raw_packet_buffer_remaining_size += pkt->size;
665 ret= s->iformat->read_packet(s, pkt);
667 if (!pktl || ret == AVERROR(EAGAIN))
669 for (i = 0; i < s->nb_streams; i++)
670 s->streams[i]->probe_packets = 0;
673 st= s->streams[pkt->stream_index];
675 switch(st->codec->codec_type){
676 case AVMEDIA_TYPE_VIDEO:
677 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
679 case AVMEDIA_TYPE_AUDIO:
680 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
682 case AVMEDIA_TYPE_SUBTITLE:
683 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
687 if(!pktl && (st->codec->codec_id != CODEC_ID_PROBE ||
691 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
692 s->raw_packet_buffer_remaining_size -= pkt->size;
694 if(st->codec->codec_id == CODEC_ID_PROBE){
695 AVProbeData *pd = &st->probe_data;
696 av_log(s, AV_LOG_DEBUG, "probing stream %d\n", st->index);
699 pd->buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
700 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
701 pd->buf_size += pkt->size;
702 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
704 if(av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
705 //FIXME we dont reduce score to 0 for the case of running out of buffer space in bytes
706 set_codec_from_probe_data(s, st, pd, st->probe_packets > 0 ? AVPROBE_SCORE_MAX/4 : 0);
707 if(st->codec->codec_id != CODEC_ID_PROBE){
710 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
717 /**********************************************************/
720 * Get the number of samples of an audio frame. Return -1 on error.
722 static int get_audio_frame_size(AVCodecContext *enc, int size)
726 if(enc->codec_id == CODEC_ID_VORBIS)
729 if (enc->frame_size <= 1) {
730 int bits_per_sample = av_get_bits_per_sample(enc->codec_id);
732 if (bits_per_sample) {
733 if (enc->channels == 0)
735 frame_size = (size << 3) / (bits_per_sample * enc->channels);
737 /* used for example by ADPCM codecs */
738 if (enc->bit_rate == 0)
740 frame_size = ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
743 frame_size = enc->frame_size;
750 * Return the frame duration in seconds. Return 0 if not available.
752 static void compute_frame_duration(int *pnum, int *pden, AVStream *st,
753 AVCodecParserContext *pc, AVPacket *pkt)
759 switch(st->codec->codec_type) {
760 case AVMEDIA_TYPE_VIDEO:
761 if(st->time_base.num*1000LL > st->time_base.den){
762 *pnum = st->time_base.num;
763 *pden = st->time_base.den;
764 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
765 *pnum = st->codec->time_base.num;
766 *pden = st->codec->time_base.den;
767 if (pc && pc->repeat_pict) {
768 *pnum = (*pnum) * (1 + pc->repeat_pict);
770 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
771 //Thus if we have no parser in such case leave duration undefined.
772 if(st->codec->ticks_per_frame>1 && !pc){
777 case AVMEDIA_TYPE_AUDIO:
778 frame_size = get_audio_frame_size(st->codec, pkt->size);
782 *pden = st->codec->sample_rate;
789 static int is_intra_only(AVCodecContext *enc){
790 if(enc->codec_type == AVMEDIA_TYPE_AUDIO){
792 }else if(enc->codec_type == AVMEDIA_TYPE_VIDEO){
793 switch(enc->codec_id){
795 case CODEC_ID_MJPEGB:
797 case CODEC_ID_RAWVIDEO:
798 case CODEC_ID_DVVIDEO:
799 case CODEC_ID_HUFFYUV:
800 case CODEC_ID_FFVHUFF:
805 case CODEC_ID_JPEG2000:
813 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
814 int64_t dts, int64_t pts)
816 AVStream *st= s->streams[stream_index];
817 AVPacketList *pktl= s->packet_buffer;
819 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
822 st->first_dts= dts - st->cur_dts;
825 for(; pktl; pktl= pktl->next){
826 if(pktl->pkt.stream_index != stream_index)
828 //FIXME think more about this check
829 if(pktl->pkt.pts != AV_NOPTS_VALUE && pktl->pkt.pts == pktl->pkt.dts)
830 pktl->pkt.pts += st->first_dts;
832 if(pktl->pkt.dts != AV_NOPTS_VALUE)
833 pktl->pkt.dts += st->first_dts;
835 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
836 st->start_time= pktl->pkt.pts;
838 if (st->start_time == AV_NOPTS_VALUE)
839 st->start_time = pts;
842 static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
844 AVPacketList *pktl= s->packet_buffer;
847 if(st->first_dts != AV_NOPTS_VALUE){
848 cur_dts= st->first_dts;
849 for(; pktl; pktl= pktl->next){
850 if(pktl->pkt.stream_index == pkt->stream_index){
851 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
853 cur_dts -= pkt->duration;
856 pktl= s->packet_buffer;
857 st->first_dts = cur_dts;
858 }else if(st->cur_dts)
861 for(; pktl; pktl= pktl->next){
862 if(pktl->pkt.stream_index != pkt->stream_index)
864 if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
865 && !pktl->pkt.duration){
866 pktl->pkt.dts= cur_dts;
867 if(!st->codec->has_b_frames)
868 pktl->pkt.pts= cur_dts;
869 cur_dts += pkt->duration;
870 pktl->pkt.duration= pkt->duration;
874 if(st->first_dts == AV_NOPTS_VALUE)
875 st->cur_dts= cur_dts;
878 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
879 AVCodecParserContext *pc, AVPacket *pkt)
881 int num, den, presentation_delayed, delay, i;
884 if (s->flags & AVFMT_FLAG_NOFILLIN)
887 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
888 pkt->dts= AV_NOPTS_VALUE;
890 if (st->codec->codec_id != CODEC_ID_H264 && pc && pc->pict_type == FF_B_TYPE)
891 //FIXME Set low_delay = 0 when has_b_frames = 1
892 st->codec->has_b_frames = 1;
894 /* do we have a video B-frame ? */
895 delay= st->codec->has_b_frames;
896 presentation_delayed = 0;
897 /* XXX: need has_b_frame, but cannot get it if the codec is
900 pc && pc->pict_type != FF_B_TYPE)
901 presentation_delayed = 1;
903 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && pkt->dts > pkt->pts && st->pts_wrap_bits<63
904 /*&& pkt->dts-(1LL<<st->pts_wrap_bits) < pkt->pts*/){
905 pkt->dts -= 1LL<<st->pts_wrap_bits;
908 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
909 // we take the conservative approach and discard both
910 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
911 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
912 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination\n");
913 pkt->dts= pkt->pts= AV_NOPTS_VALUE;
916 if (pkt->duration == 0) {
917 compute_frame_duration(&num, &den, st, pc, pkt);
919 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
921 if(pkt->duration != 0 && s->packet_buffer)
922 update_initial_durations(s, st, pkt);
926 /* correct timestamps with byte offset if demuxers only have timestamps
927 on packet boundaries */
928 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
929 /* this will estimate bitrate based on this frame's duration and size */
930 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
931 if(pkt->pts != AV_NOPTS_VALUE)
933 if(pkt->dts != AV_NOPTS_VALUE)
937 if (pc && pc->dts_sync_point >= 0) {
938 // we have synchronization info from the parser
939 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
941 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
942 if (pkt->dts != AV_NOPTS_VALUE) {
943 // got DTS from the stream, update reference timestamp
944 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
945 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
946 } else if (st->reference_dts != AV_NOPTS_VALUE) {
947 // compute DTS based on reference timestamp
948 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
949 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
951 if (pc->dts_sync_point > 0)
952 st->reference_dts = pkt->dts; // new reference
956 /* This may be redundant, but it should not hurt. */
957 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
958 presentation_delayed = 1;
960 // av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64" st:%d pc:%p\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts, pkt->stream_index, pc);
961 /* interpolate PTS and DTS if they are not present */
962 //We skip H264 currently because delay and has_b_frames are not reliably set
963 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != CODEC_ID_H264){
964 if (presentation_delayed) {
965 /* DTS = decompression timestamp */
966 /* PTS = presentation timestamp */
967 if (pkt->dts == AV_NOPTS_VALUE)
968 pkt->dts = st->last_IP_pts;
969 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
970 if (pkt->dts == AV_NOPTS_VALUE)
971 pkt->dts = st->cur_dts;
973 /* this is tricky: the dts must be incremented by the duration
974 of the frame we are displaying, i.e. the last I- or P-frame */
975 if (st->last_IP_duration == 0)
976 st->last_IP_duration = pkt->duration;
977 if(pkt->dts != AV_NOPTS_VALUE)
978 st->cur_dts = pkt->dts + st->last_IP_duration;
979 st->last_IP_duration = pkt->duration;
980 st->last_IP_pts= pkt->pts;
981 /* cannot compute PTS if not present (we can compute it only
982 by knowing the future */
983 } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
984 if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
985 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
986 int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
987 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
988 pkt->pts += pkt->duration;
989 // av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
993 /* presentation is not delayed : PTS and DTS are the same */
994 if(pkt->pts == AV_NOPTS_VALUE)
996 update_initial_timestamps(s, pkt->stream_index, pkt->pts, pkt->pts);
997 if(pkt->pts == AV_NOPTS_VALUE)
998 pkt->pts = st->cur_dts;
1000 if(pkt->pts != AV_NOPTS_VALUE)
1001 st->cur_dts = pkt->pts + pkt->duration;
1005 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
1006 st->pts_buffer[0]= pkt->pts;
1007 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1008 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1009 if(pkt->dts == AV_NOPTS_VALUE)
1010 pkt->dts= st->pts_buffer[0];
1011 if(st->codec->codec_id == CODEC_ID_H264){ //we skiped it above so we try here
1012 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts); // this should happen on the first packet
1014 if(pkt->dts > st->cur_dts)
1015 st->cur_dts = pkt->dts;
1018 // av_log(NULL, AV_LOG_ERROR, "OUTdelayed:%d/%d pts:%"PRId64", dts:%"PRId64" cur_dts:%"PRId64"\n", presentation_delayed, delay, pkt->pts, pkt->dts, st->cur_dts);
1021 if(is_intra_only(st->codec))
1022 pkt->flags |= AV_PKT_FLAG_KEY;
1025 /* keyframe computation */
1026 if (pc->key_frame == 1)
1027 pkt->flags |= AV_PKT_FLAG_KEY;
1028 else if (pc->key_frame == -1 && pc->pict_type == FF_I_TYPE)
1029 pkt->flags |= AV_PKT_FLAG_KEY;
1032 pkt->convergence_duration = pc->convergence_duration;
1036 static int av_read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1041 av_init_packet(pkt);
1044 /* select current input stream component */
1047 if (!st->need_parsing || !st->parser) {
1048 /* no parsing needed: we just output the packet as is */
1049 /* raw data support */
1050 *pkt = st->cur_pkt; st->cur_pkt.data= NULL;
1051 compute_pkt_fields(s, st, NULL, pkt);
1053 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1054 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1055 ff_reduce_index(s, st->index);
1056 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1059 } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
1060 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
1061 st->cur_ptr, st->cur_len,
1062 st->cur_pkt.pts, st->cur_pkt.dts,
1064 st->cur_pkt.pts = AV_NOPTS_VALUE;
1065 st->cur_pkt.dts = AV_NOPTS_VALUE;
1066 /* increment read pointer */
1070 /* return packet if any */
1074 pkt->stream_index = st->index;
1075 pkt->pts = st->parser->pts;
1076 pkt->dts = st->parser->dts;
1077 pkt->pos = st->parser->pos;
1078 if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
1080 pkt->destruct= st->cur_pkt.destruct;
1081 st->cur_pkt.destruct= NULL;
1082 st->cur_pkt.data = NULL;
1083 assert(st->cur_len == 0);
1085 pkt->destruct = NULL;
1087 compute_pkt_fields(s, st, st->parser, pkt);
1089 if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
1090 ff_reduce_index(s, st->index);
1091 av_add_index_entry(st, st->parser->frame_offset, pkt->dts,
1092 0, 0, AVINDEX_KEYFRAME);
1099 av_free_packet(&st->cur_pkt);
1104 /* read next packet */
1105 ret = av_read_packet(s, &cur_pkt);
1107 if (ret == AVERROR(EAGAIN))
1109 /* return the last frames, if any */
1110 for(i = 0; i < s->nb_streams; i++) {
1112 if (st->parser && st->need_parsing) {
1113 av_parser_parse2(st->parser, st->codec,
1114 &pkt->data, &pkt->size,
1116 AV_NOPTS_VALUE, AV_NOPTS_VALUE,
1122 /* no more packets: really terminate parsing */
1125 st = s->streams[cur_pkt.stream_index];
1126 st->cur_pkt= cur_pkt;
1128 if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
1129 st->cur_pkt.dts != AV_NOPTS_VALUE &&
1130 st->cur_pkt.pts < st->cur_pkt.dts){
1131 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
1132 st->cur_pkt.stream_index,
1136 // av_free_packet(&st->cur_pkt);
1140 if(s->debug & FF_FDEBUG_TS)
1141 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1142 st->cur_pkt.stream_index,
1146 st->cur_pkt.duration,
1150 st->cur_ptr = st->cur_pkt.data;
1151 st->cur_len = st->cur_pkt.size;
1152 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1153 st->parser = av_parser_init(st->codec->codec_id);
1155 /* no parser available: just output the raw packets */
1156 st->need_parsing = AVSTREAM_PARSE_NONE;
1157 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
1158 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1159 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
1160 st->parser->flags |= PARSER_FLAG_ONCE;
1165 if(s->debug & FF_FDEBUG_TS)
1166 av_log(s, AV_LOG_DEBUG, "av_read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
1177 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1181 const int genpts= s->flags & AVFMT_FLAG_GENPTS;
1184 pktl = s->packet_buffer;
1186 AVPacket *next_pkt= &pktl->pkt;
1188 if(genpts && next_pkt->dts != AV_NOPTS_VALUE){
1189 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1190 while(pktl && next_pkt->pts == AV_NOPTS_VALUE){
1191 if( pktl->pkt.stream_index == next_pkt->stream_index
1192 && (0 > av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)))
1193 && av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1194 next_pkt->pts= pktl->pkt.dts;
1198 pktl = s->packet_buffer;
1201 if( next_pkt->pts != AV_NOPTS_VALUE
1202 || next_pkt->dts == AV_NOPTS_VALUE
1204 /* read packet from packet buffer, if there is data */
1206 s->packet_buffer = pktl->next;
1212 int ret= av_read_frame_internal(s, pkt);
1214 if(pktl && ret != AVERROR(EAGAIN)){
1221 if(av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1222 &s->packet_buffer_end)) < 0)
1223 return AVERROR(ENOMEM);
1225 assert(!s->packet_buffer);
1226 return av_read_frame_internal(s, pkt);
1231 /* XXX: suppress the packet queue */
1232 static void flush_packet_queue(AVFormatContext *s)
1237 pktl = s->packet_buffer;
1240 s->packet_buffer = pktl->next;
1241 av_free_packet(&pktl->pkt);
1244 while(s->raw_packet_buffer){
1245 pktl = s->raw_packet_buffer;
1246 s->raw_packet_buffer = pktl->next;
1247 av_free_packet(&pktl->pkt);
1250 s->packet_buffer_end=
1251 s->raw_packet_buffer_end= NULL;
1252 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1255 /*******************************************************/
1258 int av_find_default_stream_index(AVFormatContext *s)
1260 int first_audio_index = -1;
1264 if (s->nb_streams <= 0)
1266 for(i = 0; i < s->nb_streams; i++) {
1268 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
1271 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1272 first_audio_index = i;
1274 return first_audio_index >= 0 ? first_audio_index : 0;
1278 * Flush the frame reader.
1280 void ff_read_frame_flush(AVFormatContext *s)
1285 flush_packet_queue(s);
1289 /* for each stream, reset read state */
1290 for(i = 0; i < s->nb_streams; i++) {
1294 av_parser_close(st->parser);
1296 av_free_packet(&st->cur_pkt);
1298 st->last_IP_pts = AV_NOPTS_VALUE;
1299 st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1300 st->reference_dts = AV_NOPTS_VALUE;
1305 st->probe_packets = MAX_PROBE_PACKETS;
1307 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1308 st->pts_buffer[j]= AV_NOPTS_VALUE;
1312 void av_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp){
1315 for(i = 0; i < s->nb_streams; i++) {
1316 AVStream *st = s->streams[i];
1318 st->cur_dts = av_rescale(timestamp,
1319 st->time_base.den * (int64_t)ref_st->time_base.num,
1320 st->time_base.num * (int64_t)ref_st->time_base.den);
1324 void ff_reduce_index(AVFormatContext *s, int stream_index)
1326 AVStream *st= s->streams[stream_index];
1327 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1329 if((unsigned)st->nb_index_entries >= max_entries){
1331 for(i=0; 2*i<st->nb_index_entries; i++)
1332 st->index_entries[i]= st->index_entries[2*i];
1333 st->nb_index_entries= i;
1337 int av_add_index_entry(AVStream *st,
1338 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1340 AVIndexEntry *entries, *ie;
1343 if((unsigned)st->nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1346 entries = av_fast_realloc(st->index_entries,
1347 &st->index_entries_allocated_size,
1348 (st->nb_index_entries + 1) *
1349 sizeof(AVIndexEntry));
1353 st->index_entries= entries;
1355 index= av_index_search_timestamp(st, timestamp, AVSEEK_FLAG_ANY);
1358 index= st->nb_index_entries++;
1359 ie= &entries[index];
1360 assert(index==0 || ie[-1].timestamp < timestamp);
1362 ie= &entries[index];
1363 if(ie->timestamp != timestamp){
1364 if(ie->timestamp <= timestamp)
1366 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(st->nb_index_entries - index));
1367 st->nb_index_entries++;
1368 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1369 distance= ie->min_distance;
1373 ie->timestamp = timestamp;
1374 ie->min_distance= distance;
1381 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1384 AVIndexEntry *entries= st->index_entries;
1385 int nb_entries= st->nb_index_entries;
1392 //optimize appending index entries at the end
1393 if(b && entries[b-1].timestamp < wanted_timestamp)
1398 timestamp = entries[m].timestamp;
1399 if(timestamp >= wanted_timestamp)
1401 if(timestamp <= wanted_timestamp)
1404 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1406 if(!(flags & AVSEEK_FLAG_ANY)){
1407 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1408 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1419 int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags){
1420 AVInputFormat *avif= s->iformat;
1421 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1422 int64_t ts_min, ts_max, ts;
1427 if (stream_index < 0)
1431 av_log(s, AV_LOG_DEBUG, "read_seek: %d %"PRId64"\n", stream_index, target_ts);
1435 ts_min= AV_NOPTS_VALUE;
1436 pos_limit= -1; //gcc falsely says it may be uninitialized
1438 st= s->streams[stream_index];
1439 if(st->index_entries){
1442 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1443 index= FFMAX(index, 0);
1444 e= &st->index_entries[index];
1446 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1448 ts_min= e->timestamp;
1450 av_log(s, AV_LOG_DEBUG, "using cached pos_min=0x%"PRIx64" dts_min=%"PRId64"\n",
1457 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1458 assert(index < st->nb_index_entries);
1460 e= &st->index_entries[index];
1461 assert(e->timestamp >= target_ts);
1463 ts_max= e->timestamp;
1464 pos_limit= pos_max - e->min_distance;
1466 av_log(s, AV_LOG_DEBUG, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%"PRId64"\n",
1467 pos_max,pos_limit, ts_max);
1472 pos= av_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1477 if ((ret = url_fseek(s->pb, pos, SEEK_SET)) < 0)
1480 av_update_cur_dts(s, st, ts);
1485 int64_t av_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts, int64_t pos_min, int64_t pos_max, int64_t pos_limit, int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret, int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t )){
1487 int64_t start_pos, filesize;
1491 av_log(s, AV_LOG_DEBUG, "gen_seek: %d %"PRId64"\n", stream_index, target_ts);
1494 if(ts_min == AV_NOPTS_VALUE){
1495 pos_min = s->data_offset;
1496 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1497 if (ts_min == AV_NOPTS_VALUE)
1501 if(ts_max == AV_NOPTS_VALUE){
1503 filesize = url_fsize(s->pb);
1504 pos_max = filesize - 1;
1507 ts_max = read_timestamp(s, stream_index, &pos_max, pos_max + step);
1509 }while(ts_max == AV_NOPTS_VALUE && pos_max >= step);
1510 if (ts_max == AV_NOPTS_VALUE)
1514 int64_t tmp_pos= pos_max + 1;
1515 int64_t tmp_ts= read_timestamp(s, stream_index, &tmp_pos, INT64_MAX);
1516 if(tmp_ts == AV_NOPTS_VALUE)
1520 if(tmp_pos >= filesize)
1526 if(ts_min > ts_max){
1528 }else if(ts_min == ts_max){
1533 while (pos_min < pos_limit) {
1535 av_log(s, AV_LOG_DEBUG, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%"PRId64" dts_max=%"PRId64"\n",
1539 assert(pos_limit <= pos_max);
1542 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1543 // interpolate position (better than dichotomy)
1544 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1545 + pos_min - approximate_keyframe_distance;
1546 }else if(no_change==1){
1547 // bisection, if interpolation failed to change min or max pos last time
1548 pos = (pos_min + pos_limit)>>1;
1550 /* linear search if bisection failed, can only happen if there
1551 are very few or no keyframes between min/max */
1556 else if(pos > pos_limit)
1560 ts = read_timestamp(s, stream_index, &pos, INT64_MAX); //may pass pos_limit instead of -1
1566 av_log(s, AV_LOG_DEBUG, "%"PRId64" %"PRId64" %"PRId64" / %"PRId64" %"PRId64" %"PRId64" target:%"PRId64" limit:%"PRId64" start:%"PRId64" noc:%d\n",
1567 pos_min, pos, pos_max, ts_min, ts, ts_max, target_ts, pos_limit,
1568 start_pos, no_change);
1570 if(ts == AV_NOPTS_VALUE){
1571 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1574 assert(ts != AV_NOPTS_VALUE);
1575 if (target_ts <= ts) {
1576 pos_limit = start_pos - 1;
1580 if (target_ts >= ts) {
1586 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1587 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1590 ts_min = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1592 ts_max = read_timestamp(s, stream_index, &pos_min, INT64_MAX);
1593 av_log(s, AV_LOG_DEBUG, "pos=0x%"PRIx64" %"PRId64"<=%"PRId64"<=%"PRId64"\n",
1594 pos, ts_min, target_ts, ts_max);
1600 static int av_seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1601 int64_t pos_min, pos_max;
1605 if (stream_index < 0)
1608 st= s->streams[stream_index];
1611 pos_min = s->data_offset;
1612 pos_max = url_fsize(s->pb) - 1;
1614 if (pos < pos_min) pos= pos_min;
1615 else if(pos > pos_max) pos= pos_max;
1617 url_fseek(s->pb, pos, SEEK_SET);
1620 av_update_cur_dts(s, st, ts);
1625 static int av_seek_frame_generic(AVFormatContext *s,
1626 int stream_index, int64_t timestamp, int flags)
1633 st = s->streams[stream_index];
1635 index = av_index_search_timestamp(st, timestamp, flags);
1637 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1640 if(index < 0 || index==st->nb_index_entries-1){
1644 if(st->nb_index_entries){
1645 assert(st->index_entries);
1646 ie= &st->index_entries[st->nb_index_entries-1];
1647 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1649 av_update_cur_dts(s, st, ie->timestamp);
1651 if ((ret = url_fseek(s->pb, s->data_offset, SEEK_SET)) < 0)
1657 ret = av_read_frame(s, &pkt);
1658 }while(ret == AVERROR(EAGAIN));
1661 av_free_packet(&pkt);
1662 if(stream_index == pkt.stream_index){
1663 if((pkt.flags & AV_PKT_FLAG_KEY) && pkt.dts > timestamp)
1667 index = av_index_search_timestamp(st, timestamp, flags);
1672 ff_read_frame_flush(s);
1673 if (s->iformat->read_seek){
1674 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
1677 ie = &st->index_entries[index];
1678 if ((ret = url_fseek(s->pb, ie->pos, SEEK_SET)) < 0)
1680 av_update_cur_dts(s, st, ie->timestamp);
1685 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
1690 ff_read_frame_flush(s);
1692 if(flags & AVSEEK_FLAG_BYTE)
1693 return av_seek_frame_byte(s, stream_index, timestamp, flags);
1695 if(stream_index < 0){
1696 stream_index= av_find_default_stream_index(s);
1697 if(stream_index < 0)
1700 st= s->streams[stream_index];
1701 /* timestamp for default must be expressed in AV_TIME_BASE units */
1702 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
1705 /* first, we try the format specific seek */
1706 if (s->iformat->read_seek)
1707 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
1714 if(s->iformat->read_timestamp)
1715 return av_seek_frame_binary(s, stream_index, timestamp, flags);
1717 return av_seek_frame_generic(s, stream_index, timestamp, flags);
1720 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
1722 if(min_ts > ts || max_ts < ts)
1725 ff_read_frame_flush(s);
1727 if (s->iformat->read_seek2)
1728 return s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
1730 if(s->iformat->read_timestamp){
1731 //try to seek via read_timestamp()
1734 //Fallback to old API if new is not implemented but old is
1735 //Note the old has somewat different sematics
1736 if(s->iformat->read_seek || 1)
1737 return av_seek_frame(s, stream_index, ts, flags | (ts - min_ts > (uint64_t)(max_ts - ts) ? AVSEEK_FLAG_BACKWARD : 0));
1739 // try some generic seek like av_seek_frame_generic() but with new ts semantics
1742 /*******************************************************/
1745 * Return TRUE if the stream has accurate duration in any stream.
1747 * @return TRUE if the stream has accurate duration for at least one component.
1749 static int av_has_duration(AVFormatContext *ic)
1754 for(i = 0;i < ic->nb_streams; i++) {
1755 st = ic->streams[i];
1756 if (st->duration != AV_NOPTS_VALUE)
1763 * Estimate the stream timings from the one of each components.
1765 * Also computes the global bitrate if possible.
1767 static void av_update_stream_timings(AVFormatContext *ic)
1769 int64_t start_time, start_time1, end_time, end_time1;
1770 int64_t duration, duration1;
1774 start_time = INT64_MAX;
1775 end_time = INT64_MIN;
1776 duration = INT64_MIN;
1777 for(i = 0;i < ic->nb_streams; i++) {
1778 st = ic->streams[i];
1779 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
1780 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
1781 if (start_time1 < start_time)
1782 start_time = start_time1;
1783 if (st->duration != AV_NOPTS_VALUE) {
1784 end_time1 = start_time1
1785 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1786 if (end_time1 > end_time)
1787 end_time = end_time1;
1790 if (st->duration != AV_NOPTS_VALUE) {
1791 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
1792 if (duration1 > duration)
1793 duration = duration1;
1796 if (start_time != INT64_MAX) {
1797 ic->start_time = start_time;
1798 if (end_time != INT64_MIN) {
1799 if (end_time - start_time > duration)
1800 duration = end_time - start_time;
1803 if (duration != INT64_MIN) {
1804 ic->duration = duration;
1805 if (ic->file_size > 0) {
1806 /* compute the bitrate */
1807 ic->bit_rate = (double)ic->file_size * 8.0 * AV_TIME_BASE /
1808 (double)ic->duration;
1813 static void fill_all_stream_timings(AVFormatContext *ic)
1818 av_update_stream_timings(ic);
1819 for(i = 0;i < ic->nb_streams; i++) {
1820 st = ic->streams[i];
1821 if (st->start_time == AV_NOPTS_VALUE) {
1822 if(ic->start_time != AV_NOPTS_VALUE)
1823 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
1824 if(ic->duration != AV_NOPTS_VALUE)
1825 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
1830 static void av_estimate_timings_from_bit_rate(AVFormatContext *ic)
1832 int64_t filesize, duration;
1836 /* if bit_rate is already set, we believe it */
1837 if (ic->bit_rate == 0) {
1839 for(i=0;i<ic->nb_streams;i++) {
1840 st = ic->streams[i];
1841 bit_rate += st->codec->bit_rate;
1843 ic->bit_rate = bit_rate;
1846 /* if duration is already set, we believe it */
1847 if (ic->duration == AV_NOPTS_VALUE &&
1848 ic->bit_rate != 0 &&
1849 ic->file_size != 0) {
1850 filesize = ic->file_size;
1852 for(i = 0; i < ic->nb_streams; i++) {
1853 st = ic->streams[i];
1854 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
1855 if (st->duration == AV_NOPTS_VALUE)
1856 st->duration = duration;
1862 #define DURATION_MAX_READ_SIZE 250000
1863 #define DURATION_MAX_RETRY 3
1865 /* only usable for MPEG-PS streams */
1866 static void av_estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
1868 AVPacket pkt1, *pkt = &pkt1;
1870 int read_size, i, ret;
1871 int64_t end_time, start_time[MAX_STREAMS];
1872 int64_t filesize, offset, duration;
1877 /* flush packet queue */
1878 flush_packet_queue(ic);
1880 for(i=0;i<ic->nb_streams;i++) {
1881 st = ic->streams[i];
1882 if(st->start_time != AV_NOPTS_VALUE){
1883 start_time[i]= st->start_time;
1884 }else if(st->first_dts != AV_NOPTS_VALUE){
1885 start_time[i]= st->first_dts;
1887 av_log(st->codec, AV_LOG_WARNING, "start time is not set in av_estimate_timings_from_pts\n");
1890 av_parser_close(st->parser);
1892 av_free_packet(&st->cur_pkt);
1896 /* estimate the end time (duration) */
1897 /* XXX: may need to support wrapping */
1898 filesize = ic->file_size;
1899 end_time = AV_NOPTS_VALUE;
1901 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
1905 url_fseek(ic->pb, offset, SEEK_SET);
1908 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
1912 ret = av_read_packet(ic, pkt);
1913 }while(ret == AVERROR(EAGAIN));
1916 read_size += pkt->size;
1917 st = ic->streams[pkt->stream_index];
1918 if (pkt->pts != AV_NOPTS_VALUE &&
1919 start_time[pkt->stream_index] != AV_NOPTS_VALUE) {
1920 end_time = pkt->pts;
1921 duration = end_time - start_time[pkt->stream_index];
1923 duration += 1LL<<st->pts_wrap_bits;
1925 if (st->duration == AV_NOPTS_VALUE ||
1926 st->duration < duration)
1927 st->duration = duration;
1930 av_free_packet(pkt);
1932 }while( end_time==AV_NOPTS_VALUE
1933 && filesize > (DURATION_MAX_READ_SIZE<<retry)
1934 && ++retry <= DURATION_MAX_RETRY);
1936 fill_all_stream_timings(ic);
1938 url_fseek(ic->pb, old_offset, SEEK_SET);
1939 for(i=0; i<ic->nb_streams; i++){
1941 st->cur_dts= st->first_dts;
1942 st->last_IP_pts = AV_NOPTS_VALUE;
1946 static void av_estimate_timings(AVFormatContext *ic, int64_t old_offset)
1950 /* get the file size, if possible */
1951 if (ic->iformat->flags & AVFMT_NOFILE) {
1954 file_size = url_fsize(ic->pb);
1958 ic->file_size = file_size;
1960 if ((!strcmp(ic->iformat->name, "mpeg") ||
1961 !strcmp(ic->iformat->name, "mpegts")) &&
1962 file_size && !url_is_streamed(ic->pb)) {
1963 /* get accurate estimate from the PTSes */
1964 av_estimate_timings_from_pts(ic, old_offset);
1965 } else if (av_has_duration(ic)) {
1966 /* at least one component has timings - we use them for all
1968 fill_all_stream_timings(ic);
1970 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
1971 /* less precise: use bitrate info */
1972 av_estimate_timings_from_bit_rate(ic);
1974 av_update_stream_timings(ic);
1980 for(i = 0;i < ic->nb_streams; i++) {
1981 st = ic->streams[i];
1982 printf("%d: start_time: %0.3f duration: %0.3f\n",
1983 i, (double)st->start_time / AV_TIME_BASE,
1984 (double)st->duration / AV_TIME_BASE);
1986 printf("stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
1987 (double)ic->start_time / AV_TIME_BASE,
1988 (double)ic->duration / AV_TIME_BASE,
1989 ic->bit_rate / 1000);
1994 static int has_codec_parameters(AVCodecContext *enc)
1997 switch(enc->codec_type) {
1998 case AVMEDIA_TYPE_AUDIO:
1999 val = enc->sample_rate && enc->channels && enc->sample_fmt != SAMPLE_FMT_NONE;
2000 if(!enc->frame_size &&
2001 (enc->codec_id == CODEC_ID_VORBIS ||
2002 enc->codec_id == CODEC_ID_AAC ||
2003 enc->codec_id == CODEC_ID_MP1 ||
2004 enc->codec_id == CODEC_ID_MP2 ||
2005 enc->codec_id == CODEC_ID_MP3 ||
2006 enc->codec_id == CODEC_ID_SPEEX))
2009 case AVMEDIA_TYPE_VIDEO:
2010 val = enc->width && enc->pix_fmt != PIX_FMT_NONE;
2016 return enc->codec_id != CODEC_ID_NONE && val != 0;
2019 static int has_decode_delay_been_guessed(AVStream *st)
2021 return st->codec->codec_id != CODEC_ID_H264 ||
2022 st->codec_info_nb_frames >= 4 + st->codec->has_b_frames;
2025 static int try_decode_frame(AVStream *st, AVPacket *avpkt)
2029 int got_picture, data_size, ret=0;
2032 if(!st->codec->codec){
2033 codec = avcodec_find_decoder(st->codec->codec_id);
2036 ret = avcodec_open(st->codec, codec);
2041 if(!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st)){
2042 switch(st->codec->codec_type) {
2043 case AVMEDIA_TYPE_VIDEO:
2044 avcodec_get_frame_defaults(&picture);
2045 ret = avcodec_decode_video2(st->codec, &picture,
2046 &got_picture, avpkt);
2048 case AVMEDIA_TYPE_AUDIO:
2049 data_size = FFMAX(avpkt->size, AVCODEC_MAX_AUDIO_FRAME_SIZE);
2050 samples = av_malloc(data_size);
2053 ret = avcodec_decode_audio3(st->codec, samples,
2065 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum CodecID id)
2067 while (tags->id != CODEC_ID_NONE) {
2075 enum CodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2078 for(i=0; tags[i].id != CODEC_ID_NONE;i++) {
2079 if(tag == tags[i].tag)
2082 for(i=0; tags[i].id != CODEC_ID_NONE; i++) {
2083 if (ff_toupper4(tag) == ff_toupper4(tags[i].tag))
2086 return CODEC_ID_NONE;
2089 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum CodecID id)
2092 for(i=0; tags && tags[i]; i++){
2093 int tag= ff_codec_get_tag(tags[i], id);
2099 enum CodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2102 for(i=0; tags && tags[i]; i++){
2103 enum CodecID id= ff_codec_get_id(tags[i], tag);
2104 if(id!=CODEC_ID_NONE) return id;
2106 return CODEC_ID_NONE;
2109 static void compute_chapters_end(AVFormatContext *s)
2113 for (i=0; i+1<s->nb_chapters; i++)
2114 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2115 assert(s->chapters[i]->start <= s->chapters[i+1]->start);
2116 assert(!av_cmp_q(s->chapters[i]->time_base, s->chapters[i+1]->time_base));
2117 s->chapters[i]->end = s->chapters[i+1]->start;
2120 if (s->nb_chapters && s->chapters[i]->end == AV_NOPTS_VALUE) {
2121 assert(s->start_time != AV_NOPTS_VALUE);
2122 assert(s->duration > 0);
2123 s->chapters[i]->end = av_rescale_q(s->start_time + s->duration,
2125 s->chapters[i]->time_base);
2129 #define MAX_STD_TIMEBASES (60*12+5)
2130 static int get_std_framerate(int i){
2131 if(i<60*12) return i*1001;
2132 else return ((const int[]){24,30,60,12,15})[i-60*12]*1000*12;
2136 * Is the time base unreliable.
2137 * This is a heuristic to balance between quick acceptance of the values in
2138 * the headers vs. some extra checks.
2139 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2140 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2141 * And there are "variable" fps files this needs to detect as well.
2143 static int tb_unreliable(AVCodecContext *c){
2144 if( c->time_base.den >= 101L*c->time_base.num
2145 || c->time_base.den < 5L*c->time_base.num
2146 /* || c->codec_tag == AV_RL32("DIVX")
2147 || c->codec_tag == AV_RL32("XVID")*/
2148 || c->codec_id == CODEC_ID_MPEG2VIDEO
2149 || c->codec_id == CODEC_ID_H264
2155 int av_find_stream_info(AVFormatContext *ic)
2157 int i, count, ret, read_size, j;
2159 AVPacket pkt1, *pkt;
2160 int64_t old_offset = url_ftell(ic->pb);
2163 int64_t duration_gcd;
2165 double duration_error[MAX_STD_TIMEBASES];
2166 int64_t codec_info_duration;
2167 } info[MAX_STREAMS] = {{0}};
2169 for(i=0;i<ic->nb_streams;i++) {
2171 st = ic->streams[i];
2172 if (st->codec->codec_id == CODEC_ID_AAC) {
2173 st->codec->sample_rate = 0;
2174 st->codec->frame_size = 0;
2175 st->codec->channels = 0;
2177 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
2178 /* if(!st->time_base.num)
2180 if(!st->codec->time_base.num)
2181 st->codec->time_base= st->time_base;
2183 //only for the split stuff
2184 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2185 st->parser = av_parser_init(st->codec->codec_id);
2186 if(st->need_parsing == AVSTREAM_PARSE_HEADERS && st->parser){
2187 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2190 assert(!st->codec->codec);
2191 codec = avcodec_find_decoder(st->codec->codec_id);
2193 /* Force decoding of at least one frame of codec data
2194 * this makes sure the codec initializes the channel configuration
2195 * and does not trust the values from the container.
2197 if (codec && codec->capabilities & CODEC_CAP_CHANNEL_CONF)
2198 st->codec->channels = 0;
2200 //try to just open decoders, in case this is enough to get parameters
2201 if(!has_codec_parameters(st->codec)){
2203 avcodec_open(st->codec, codec);
2207 for(i=0;i<MAX_STREAMS;i++){
2208 info[i].last_dts= AV_NOPTS_VALUE;
2214 if(url_interrupt_cb()){
2215 ret= AVERROR(EINTR);
2216 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2220 /* check if one codec still needs to be handled */
2221 for(i=0;i<ic->nb_streams;i++) {
2222 st = ic->streams[i];
2223 if (!has_codec_parameters(st->codec))
2225 /* variable fps and no guess at the real fps */
2226 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2227 && info[i].duration_count<20 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2229 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2231 if(st->first_dts == AV_NOPTS_VALUE)
2234 if (i == ic->nb_streams) {
2235 /* NOTE: if the format has no header, then we need to read
2236 some packets to get most of the streams, so we cannot
2238 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2239 /* if we found the info for all the codecs, we can stop */
2241 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2245 /* we did not get all the codec info, but we read too much data */
2246 if (read_size >= ic->probesize) {
2248 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit %d reached\n", ic->probesize);
2252 /* NOTE: a new stream can be added there if no header in file
2253 (AVFMTCTX_NOHEADER) */
2254 ret = av_read_frame_internal(ic, &pkt1);
2255 if(ret == AVERROR(EAGAIN))
2259 ret = -1; /* we could not have all the codec parameters before EOF */
2260 for(i=0;i<ic->nb_streams;i++) {
2261 st = ic->streams[i];
2262 if (!has_codec_parameters(st->codec)){
2264 avcodec_string(buf, sizeof(buf), st->codec, 0);
2265 av_log(ic, AV_LOG_WARNING, "Could not find codec parameters (%s)\n", buf);
2273 pkt= add_to_pktbuf(&ic->packet_buffer, &pkt1, &ic->packet_buffer_end);
2274 if(av_dup_packet(pkt) < 0) {
2275 return AVERROR(ENOMEM);
2278 read_size += pkt->size;
2280 st = ic->streams[pkt->stream_index];
2281 if(st->codec_info_nb_frames>1) {
2282 if (st->time_base.den > 0 && av_rescale_q(info[st->index].codec_info_duration, st->time_base, AV_TIME_BASE_Q) >= ic->max_analyze_duration){
2283 av_log(ic, AV_LOG_WARNING, "max_analyze_duration reached\n");
2286 info[st->index].codec_info_duration += pkt->duration;
2289 int index= pkt->stream_index;
2290 int64_t last= info[index].last_dts;
2291 int64_t duration= pkt->dts - last;
2293 if(pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && duration>0){
2294 double dur= duration * av_q2d(st->time_base);
2296 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2297 // av_log(NULL, AV_LOG_ERROR, "%f\n", dur);
2298 if(info[index].duration_count < 2)
2299 memset(info[index].duration_error, 0, sizeof(info[index].duration_error));
2300 for(i=1; i<MAX_STD_TIMEBASES; i++){
2301 int framerate= get_std_framerate(i);
2302 int ticks= lrintf(dur*framerate/(1001*12));
2303 double error= dur - ticks*1001*12/(double)framerate;
2304 info[index].duration_error[i] += error*error;
2306 info[index].duration_count++;
2307 // ignore the first 4 values, they might have some random jitter
2308 if (info[index].duration_count > 3)
2309 info[index].duration_gcd = av_gcd(info[index].duration_gcd, duration);
2311 if(last == AV_NOPTS_VALUE || info[index].duration_count <= 1)
2312 info[pkt->stream_index].last_dts = pkt->dts;
2314 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2315 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2317 st->codec->extradata_size= i;
2318 st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
2319 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2320 memset(st->codec->extradata + i, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2324 /* if still no information, we try to open the codec and to
2325 decompress the frame. We try to avoid that in most cases as
2326 it takes longer and uses more memory. For MPEG-4, we need to
2327 decompress for QuickTime. */
2328 if (!has_codec_parameters(st->codec) || !has_decode_delay_been_guessed(st))
2329 try_decode_frame(st, pkt);
2331 st->codec_info_nb_frames++;
2335 // close codecs which were opened in try_decode_frame()
2336 for(i=0;i<ic->nb_streams;i++) {
2337 st = ic->streams[i];
2338 if(st->codec->codec)
2339 avcodec_close(st->codec);
2341 for(i=0;i<ic->nb_streams;i++) {
2342 st = ic->streams[i];
2343 if(st->codec_info_nb_frames>2 && !st->avg_frame_rate.num && info[i].codec_info_duration)
2344 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
2345 (st->codec_info_nb_frames-2)*(int64_t)st->time_base.den,
2346 info[i].codec_info_duration*(int64_t)st->time_base.num, 60000);
2347 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2348 if(st->codec->codec_id == CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample)
2349 st->codec->codec_tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
2351 // the check for tb_unreliable() is not completely correct, since this is not about handling
2352 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
2353 // ipmovie.c produces.
2354 if (tb_unreliable(st->codec) && info[i].duration_count > 15 && info[i].duration_gcd > 1 && !st->r_frame_rate.num)
2355 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * info[i].duration_gcd, INT_MAX);
2356 if(info[i].duration_count && !st->r_frame_rate.num
2357 && tb_unreliable(st->codec) /*&&
2358 //FIXME we should not special-case MPEG-2, but this needs testing with non-MPEG-2 ...
2359 st->time_base.num*duration_sum[i]/info[i].duration_count*101LL > st->time_base.den*/){
2361 double best_error= 2*av_q2d(st->time_base);
2362 best_error= best_error*best_error*info[i].duration_count*1000*12*30;
2364 for(j=1; j<MAX_STD_TIMEBASES; j++){
2365 double error= info[i].duration_error[j] * get_std_framerate(j);
2366 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2367 // av_log(NULL, AV_LOG_ERROR, "%f %f\n", get_std_framerate(j) / 12.0/1001, error);
2368 if(error < best_error){
2370 num = get_std_framerate(j);
2373 // do not increase frame rate by more than 1 % in order to match a standard rate.
2374 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
2375 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
2378 if (!st->r_frame_rate.num){
2379 if( st->codec->time_base.den * (int64_t)st->time_base.num
2380 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
2381 st->r_frame_rate.num = st->codec->time_base.den;
2382 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
2384 st->r_frame_rate.num = st->time_base.den;
2385 st->r_frame_rate.den = st->time_base.num;
2388 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
2389 if(!st->codec->bits_per_coded_sample)
2390 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
2394 av_estimate_timings(ic, old_offset);
2396 compute_chapters_end(ic);
2399 /* correct DTS for B-frame streams with no timestamps */
2400 for(i=0;i<ic->nb_streams;i++) {
2401 st = ic->streams[i];
2402 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
2404 ppktl = &ic->packet_buffer;
2406 if(ppkt1->stream_index != i)
2408 if(ppkt1->pkt->dts < 0)
2410 if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
2412 ppkt1->pkt->dts -= delta;
2417 st->cur_dts -= delta;
2426 /*******************************************************/
2428 int av_read_play(AVFormatContext *s)
2430 if (s->iformat->read_play)
2431 return s->iformat->read_play(s);
2433 return av_url_read_fpause(s->pb, 0);
2434 return AVERROR(ENOSYS);
2437 int av_read_pause(AVFormatContext *s)
2439 if (s->iformat->read_pause)
2440 return s->iformat->read_pause(s);
2442 return av_url_read_fpause(s->pb, 1);
2443 return AVERROR(ENOSYS);
2446 void av_close_input_stream(AVFormatContext *s)
2451 if (s->iformat->read_close)
2452 s->iformat->read_close(s);
2453 for(i=0;i<s->nb_streams;i++) {
2454 /* free all data in a stream component */
2457 av_parser_close(st->parser);
2458 av_free_packet(&st->cur_pkt);
2460 av_metadata_free(&st->metadata);
2461 av_free(st->index_entries);
2462 av_free(st->codec->extradata);
2464 #if FF_API_OLD_METADATA
2465 av_free(st->filename);
2467 av_free(st->priv_data);
2470 for(i=s->nb_programs-1; i>=0; i--) {
2471 #if FF_API_OLD_METADATA
2472 av_freep(&s->programs[i]->provider_name);
2473 av_freep(&s->programs[i]->name);
2475 av_metadata_free(&s->programs[i]->metadata);
2476 av_freep(&s->programs[i]->stream_index);
2477 av_freep(&s->programs[i]);
2479 av_freep(&s->programs);
2480 flush_packet_queue(s);
2481 av_freep(&s->priv_data);
2482 while(s->nb_chapters--) {
2483 #if FF_API_OLD_METADATA
2484 av_free(s->chapters[s->nb_chapters]->title);
2486 av_metadata_free(&s->chapters[s->nb_chapters]->metadata);
2487 av_free(s->chapters[s->nb_chapters]);
2489 av_freep(&s->chapters);
2490 av_metadata_free(&s->metadata);
2495 void av_close_input_file(AVFormatContext *s)
2497 ByteIOContext *pb = s->iformat->flags & AVFMT_NOFILE ? NULL : s->pb;
2498 av_close_input_stream(s);
2503 AVStream *av_new_stream(AVFormatContext *s, int id)
2508 if (s->nb_streams >= MAX_STREAMS){
2509 av_log(s, AV_LOG_ERROR, "Too many streams\n");
2513 st = av_mallocz(sizeof(AVStream));
2517 st->codec= avcodec_alloc_context();
2519 /* no default bitrate if decoding */
2520 st->codec->bit_rate = 0;
2522 st->index = s->nb_streams;
2524 st->start_time = AV_NOPTS_VALUE;
2525 st->duration = AV_NOPTS_VALUE;
2526 /* we set the current DTS to 0 so that formats without any timestamps
2527 but durations get some timestamps, formats with some unknown
2528 timestamps have their first few packets buffered and the
2529 timestamps corrected before they are returned to the user */
2531 st->first_dts = AV_NOPTS_VALUE;
2532 st->probe_packets = MAX_PROBE_PACKETS;
2534 /* default pts setting is MPEG-like */
2535 av_set_pts_info(st, 33, 1, 90000);
2536 st->last_IP_pts = AV_NOPTS_VALUE;
2537 for(i=0; i<MAX_REORDER_DELAY+1; i++)
2538 st->pts_buffer[i]= AV_NOPTS_VALUE;
2539 st->reference_dts = AV_NOPTS_VALUE;
2541 st->sample_aspect_ratio = (AVRational){0,1};
2543 s->streams[s->nb_streams++] = st;
2547 AVProgram *av_new_program(AVFormatContext *ac, int id)
2549 AVProgram *program=NULL;
2553 av_log(ac, AV_LOG_DEBUG, "new_program: id=0x%04x\n", id);
2556 for(i=0; i<ac->nb_programs; i++)
2557 if(ac->programs[i]->id == id)
2558 program = ac->programs[i];
2561 program = av_mallocz(sizeof(AVProgram));
2564 dynarray_add(&ac->programs, &ac->nb_programs, program);
2565 program->discard = AVDISCARD_NONE;
2572 AVChapter *ff_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
2574 AVChapter *chapter = NULL;
2577 for(i=0; i<s->nb_chapters; i++)
2578 if(s->chapters[i]->id == id)
2579 chapter = s->chapters[i];
2582 chapter= av_mallocz(sizeof(AVChapter));
2585 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
2587 #if FF_API_OLD_METADATA
2588 av_free(chapter->title);
2590 av_metadata_set2(&chapter->metadata, "title", title, 0);
2592 chapter->time_base= time_base;
2593 chapter->start = start;
2599 /************************************************************/
2600 /* output media file */
2602 int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap)
2606 if (s->oformat->priv_data_size > 0) {
2607 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2609 return AVERROR(ENOMEM);
2611 s->priv_data = NULL;
2613 if (s->oformat->set_parameters) {
2614 ret = s->oformat->set_parameters(s, ap);
2621 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
2623 const AVCodecTag *avctag;
2625 enum CodecID id = CODEC_ID_NONE;
2626 unsigned int tag = 0;
2629 * Check that tag + id is in the table
2630 * If neither is in the table -> OK
2631 * If tag is in the table with another id -> FAIL
2632 * If id is in the table with another tag -> FAIL unless strict < normal
2634 for (n = 0; s->oformat->codec_tag[n]; n++) {
2635 avctag = s->oformat->codec_tag[n];
2636 while (avctag->id != CODEC_ID_NONE) {
2637 if (ff_toupper4(avctag->tag) == ff_toupper4(st->codec->codec_tag)) {
2639 if (id == st->codec->codec_id)
2642 if (avctag->id == st->codec->codec_id)
2647 if (id != CODEC_ID_NONE)
2649 if (tag && (st->codec->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
2654 int av_write_header(AVFormatContext *s)
2659 // some sanity checks
2660 if (s->nb_streams == 0) {
2661 av_log(s, AV_LOG_ERROR, "no streams\n");
2662 return AVERROR(EINVAL);
2665 for(i=0;i<s->nb_streams;i++) {
2668 switch (st->codec->codec_type) {
2669 case AVMEDIA_TYPE_AUDIO:
2670 if(st->codec->sample_rate<=0){
2671 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
2672 return AVERROR(EINVAL);
2674 if(!st->codec->block_align)
2675 st->codec->block_align = st->codec->channels *
2676 av_get_bits_per_sample(st->codec->codec_id) >> 3;
2678 case AVMEDIA_TYPE_VIDEO:
2679 if(st->codec->time_base.num<=0 || st->codec->time_base.den<=0){ //FIXME audio too?
2680 av_log(s, AV_LOG_ERROR, "time base not set\n");
2681 return AVERROR(EINVAL);
2683 if((st->codec->width<=0 || st->codec->height<=0) && !(s->oformat->flags & AVFMT_NODIMENSIONS)){
2684 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
2685 return AVERROR(EINVAL);
2687 if(av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)){
2688 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between encoder and muxer layer\n");
2689 return AVERROR(EINVAL);
2694 if(s->oformat->codec_tag){
2695 if(st->codec->codec_tag && st->codec->codec_id == CODEC_ID_RAWVIDEO && av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id) == 0 && !validate_codec_tag(s, st)){
2696 //the current rawvideo encoding system ends up setting the wrong codec_tag for avi, we override it here
2697 st->codec->codec_tag= 0;
2699 if(st->codec->codec_tag){
2700 if (!validate_codec_tag(s, st)) {
2702 av_get_codec_tag_string(tagbuf, sizeof(tagbuf), st->codec->codec_tag);
2703 av_log(s, AV_LOG_ERROR,
2704 "Tag %s/0x%08x incompatible with output codec id '%d'\n",
2705 tagbuf, st->codec->codec_tag, st->codec->codec_id);
2706 return AVERROR_INVALIDDATA;
2709 st->codec->codec_tag= av_codec_get_tag(s->oformat->codec_tag, st->codec->codec_id);
2712 if(s->oformat->flags & AVFMT_GLOBALHEADER &&
2713 !(st->codec->flags & CODEC_FLAG_GLOBAL_HEADER))
2714 av_log(s, AV_LOG_WARNING, "Codec for stream %d does not use global headers but container format requires global headers\n", i);
2717 if (!s->priv_data && s->oformat->priv_data_size > 0) {
2718 s->priv_data = av_mallocz(s->oformat->priv_data_size);
2720 return AVERROR(ENOMEM);
2723 #if FF_API_OLD_METADATA
2724 ff_metadata_mux_compat(s);
2727 /* set muxer identification string */
2728 if (!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT)) {
2732 if (!(m = av_mallocz(sizeof(AVMetadata))))
2733 return AVERROR(ENOMEM);
2734 av_metadata_set2(&m, "encoder", LIBAVFORMAT_IDENT, 0);
2735 metadata_conv(&m, s->oformat->metadata_conv, NULL);
2736 if ((t = av_metadata_get(m, "", NULL, AV_METADATA_IGNORE_SUFFIX)))
2737 av_metadata_set2(&s->metadata, t->key, t->value, 0);
2738 av_metadata_free(&m);
2741 if(s->oformat->write_header){
2742 ret = s->oformat->write_header(s);
2747 /* init PTS generation */
2748 for(i=0;i<s->nb_streams;i++) {
2749 int64_t den = AV_NOPTS_VALUE;
2752 switch (st->codec->codec_type) {
2753 case AVMEDIA_TYPE_AUDIO:
2754 den = (int64_t)st->time_base.num * st->codec->sample_rate;
2756 case AVMEDIA_TYPE_VIDEO:
2757 den = (int64_t)st->time_base.num * st->codec->time_base.den;
2762 if (den != AV_NOPTS_VALUE) {
2764 return AVERROR_INVALIDDATA;
2765 av_frac_init(&st->pts, 0, 0, den);
2771 //FIXME merge with compute_pkt_fields
2772 static int compute_pkt_fields2(AVFormatContext *s, AVStream *st, AVPacket *pkt){
2773 int delay = FFMAX(st->codec->has_b_frames, !!st->codec->max_b_frames);
2774 int num, den, frame_size, i;
2776 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%"PRId64" dts:%"PRId64" cur_dts:%"PRId64" b:%d size:%d st:%d\n", pkt->pts, pkt->dts, st->cur_dts, delay, pkt->size, pkt->stream_index);
2778 /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
2781 /* duration field */
2782 if (pkt->duration == 0) {
2783 compute_frame_duration(&num, &den, st, NULL, pkt);
2785 pkt->duration = av_rescale(1, num * (int64_t)st->time_base.den * st->codec->ticks_per_frame, den * (int64_t)st->time_base.num);
2789 if(pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay==0)
2792 //XXX/FIXME this is a temporary hack until all encoders output pts
2793 if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay){
2795 // pkt->pts= st->cur_dts;
2796 pkt->pts= st->pts.val;
2799 //calculate dts from pts
2800 if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY){
2801 st->pts_buffer[0]= pkt->pts;
2802 for(i=1; i<delay+1 && st->pts_buffer[i] == AV_NOPTS_VALUE; i++)
2803 st->pts_buffer[i]= pkt->pts + (i-delay-1) * pkt->duration;
2804 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
2805 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
2807 pkt->dts= st->pts_buffer[0];
2810 if(st->cur_dts && st->cur_dts != AV_NOPTS_VALUE && st->cur_dts >= pkt->dts){
2811 av_log(s, AV_LOG_ERROR,
2812 "st:%d error, non monotone timestamps %"PRId64" >= %"PRId64"\n",
2813 st->index, st->cur_dts, pkt->dts);
2816 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts){
2817 av_log(s, AV_LOG_ERROR, "st:%d error, pts < dts\n", st->index);
2821 // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%"PRId64" dts2:%"PRId64"\n", pkt->pts, pkt->dts);
2822 st->cur_dts= pkt->dts;
2823 st->pts.val= pkt->dts;
2826 switch (st->codec->codec_type) {
2827 case AVMEDIA_TYPE_AUDIO:
2828 frame_size = get_audio_frame_size(st->codec, pkt->size);
2830 /* HACK/FIXME, we skip the initial 0 size packets as they are most
2831 likely equal to the encoder delay, but it would be better if we
2832 had the real timestamps from the encoder */
2833 if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
2834 av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
2837 case AVMEDIA_TYPE_VIDEO:
2838 av_frac_add(&st->pts, (int64_t)st->time_base.den * st->codec->time_base.num);
2846 int av_write_frame(AVFormatContext *s, AVPacket *pkt)
2848 int ret = compute_pkt_fields2(s, s->streams[pkt->stream_index], pkt);
2850 if(ret<0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2853 ret= s->oformat->write_packet(s, pkt);
2855 ret= url_ferror(s->pb);
2859 void ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
2860 int (*compare)(AVFormatContext *, AVPacket *, AVPacket *))
2862 AVPacketList **next_point, *this_pktl;
2864 this_pktl = av_mallocz(sizeof(AVPacketList));
2865 this_pktl->pkt= *pkt;
2866 pkt->destruct= NULL; // do not free original but only the copy
2867 av_dup_packet(&this_pktl->pkt); // duplicate the packet if it uses non-alloced memory
2869 if(s->streams[pkt->stream_index]->last_in_packet_buffer){
2870 next_point = &(s->streams[pkt->stream_index]->last_in_packet_buffer->next);
2872 next_point = &s->packet_buffer;
2875 if(compare(s, &s->packet_buffer_end->pkt, pkt)){
2876 while(!compare(s, &(*next_point)->pkt, pkt)){
2877 next_point= &(*next_point)->next;
2881 next_point = &(s->packet_buffer_end->next);
2884 assert(!*next_point);
2886 s->packet_buffer_end= this_pktl;
2889 this_pktl->next= *next_point;
2891 s->streams[pkt->stream_index]->last_in_packet_buffer=
2892 *next_point= this_pktl;
2895 int ff_interleave_compare_dts(AVFormatContext *s, AVPacket *next, AVPacket *pkt)
2897 AVStream *st = s->streams[ pkt ->stream_index];
2898 AVStream *st2= s->streams[ next->stream_index];
2899 int64_t a= st2->time_base.num * (int64_t)st ->time_base.den;
2900 int64_t b= st ->time_base.num * (int64_t)st2->time_base.den;
2901 return av_rescale_rnd(pkt->dts, b, a, AV_ROUND_DOWN) < next->dts;
2904 int av_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out, AVPacket *pkt, int flush){
2910 ff_interleave_add_packet(s, pkt, ff_interleave_compare_dts);
2913 for(i=0; i < s->nb_streams; i++)
2914 stream_count+= !!s->streams[i]->last_in_packet_buffer;
2916 if(stream_count && (s->nb_streams == stream_count || flush)){
2917 pktl= s->packet_buffer;
2920 s->packet_buffer= pktl->next;
2921 if(!s->packet_buffer)
2922 s->packet_buffer_end= NULL;
2924 if(s->streams[out->stream_index]->last_in_packet_buffer == pktl)
2925 s->streams[out->stream_index]->last_in_packet_buffer= NULL;
2929 av_init_packet(out);
2935 * Interleave an AVPacket correctly so it can be muxed.
2936 * @param out the interleaved packet will be output here
2937 * @param in the input packet
2938 * @param flush 1 if no further packets are available as input and all
2939 * remaining packets should be output
2940 * @return 1 if a packet was output, 0 if no packet could be output,
2941 * < 0 if an error occurred
2943 static int av_interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush){
2944 if(s->oformat->interleave_packet)
2945 return s->oformat->interleave_packet(s, out, in, flush);
2947 return av_interleave_packet_per_dts(s, out, in, flush);
2950 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt){
2951 AVStream *st= s->streams[ pkt->stream_index];
2953 //FIXME/XXX/HACK drop zero sized packets
2954 if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->size==0)
2957 //av_log(NULL, AV_LOG_DEBUG, "av_interleaved_write_frame %d %"PRId64" %"PRId64"\n", pkt->size, pkt->dts, pkt->pts);
2958 if(compute_pkt_fields2(s, st, pkt) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2961 if(pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
2966 int ret= av_interleave_packet(s, &opkt, pkt, 0);
2967 if(ret<=0) //FIXME cleanup needed for ret<0 ?
2970 ret= s->oformat->write_packet(s, &opkt);
2972 av_free_packet(&opkt);
2977 if(url_ferror(s->pb))
2978 return url_ferror(s->pb);
2982 int av_write_trailer(AVFormatContext *s)
2988 ret= av_interleave_packet(s, &pkt, NULL, 1);
2989 if(ret<0) //FIXME cleanup needed for ret<0 ?
2994 ret= s->oformat->write_packet(s, &pkt);
2996 av_free_packet(&pkt);
3000 if(url_ferror(s->pb))
3004 if(s->oformat->write_trailer)
3005 ret = s->oformat->write_trailer(s);
3008 ret=url_ferror(s->pb);
3009 for(i=0;i<s->nb_streams;i++) {
3010 av_freep(&s->streams[i]->priv_data);
3011 av_freep(&s->streams[i]->index_entries);
3013 av_freep(&s->priv_data);
3017 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3020 AVProgram *program=NULL;
3023 if (idx >= ac->nb_streams) {
3024 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3028 for(i=0; i<ac->nb_programs; i++){
3029 if(ac->programs[i]->id != progid)
3031 program = ac->programs[i];
3032 for(j=0; j<program->nb_stream_indexes; j++)
3033 if(program->stream_index[j] == idx)
3036 tmp = av_realloc(program->stream_index, sizeof(unsigned int)*(program->nb_stream_indexes+1));
3039 program->stream_index = tmp;
3040 program->stream_index[program->nb_stream_indexes++] = idx;
3045 static void print_fps(double d, const char *postfix){
3046 uint64_t v= lrintf(d*100);
3047 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3048 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3049 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3052 static void dump_metadata(void *ctx, AVMetadata *m, const char *indent)
3054 if(m && !(m->count == 1 && av_metadata_get(m, "language", NULL, 0))){
3055 AVMetadataTag *tag=NULL;
3057 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3058 while((tag=av_metadata_get(m, "", tag, AV_METADATA_IGNORE_SUFFIX))) {
3059 if(strcmp("language", tag->key))
3060 av_log(ctx, AV_LOG_INFO, "%s %-16s: %s\n", indent, tag->key, tag->value);
3065 /* "user interface" functions */
3066 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3069 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3070 AVStream *st = ic->streams[i];
3071 int g = av_gcd(st->time_base.num, st->time_base.den);
3072 AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
3073 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3074 av_log(NULL, AV_LOG_INFO, " Stream #%d.%d", index, i);
3075 /* the pid is an important information, so we display it */
3076 /* XXX: add a generic system */
3077 if (flags & AVFMT_SHOW_IDS)
3078 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3080 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3081 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3082 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3083 if (st->sample_aspect_ratio.num && // default
3084 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3085 AVRational display_aspect_ratio;
3086 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3087 st->codec->width*st->sample_aspect_ratio.num,
3088 st->codec->height*st->sample_aspect_ratio.den,
3090 av_log(NULL, AV_LOG_INFO, ", PAR %d:%d DAR %d:%d",
3091 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3092 display_aspect_ratio.num, display_aspect_ratio.den);
3094 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3095 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3096 print_fps(av_q2d(st->avg_frame_rate), "fps");
3097 if(st->r_frame_rate.den && st->r_frame_rate.num)
3098 print_fps(av_q2d(st->r_frame_rate), "tbr");
3099 if(st->time_base.den && st->time_base.num)
3100 print_fps(1/av_q2d(st->time_base), "tbn");
3101 if(st->codec->time_base.den && st->codec->time_base.num)
3102 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3104 av_log(NULL, AV_LOG_INFO, "\n");
3105 dump_metadata(NULL, st->metadata, " ");
3108 void dump_format(AVFormatContext *ic,
3114 uint8_t *printed = av_mallocz(ic->nb_streams);
3115 if (ic->nb_streams && !printed)
3118 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3119 is_output ? "Output" : "Input",
3121 is_output ? ic->oformat->name : ic->iformat->name,
3122 is_output ? "to" : "from", url);
3123 dump_metadata(NULL, ic->metadata, " ");
3125 av_log(NULL, AV_LOG_INFO, " Duration: ");
3126 if (ic->duration != AV_NOPTS_VALUE) {
3127 int hours, mins, secs, us;
3128 secs = ic->duration / AV_TIME_BASE;
3129 us = ic->duration % AV_TIME_BASE;
3134 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3135 (100 * us) / AV_TIME_BASE);
3137 av_log(NULL, AV_LOG_INFO, "N/A");
3139 if (ic->start_time != AV_NOPTS_VALUE) {
3141 av_log(NULL, AV_LOG_INFO, ", start: ");
3142 secs = ic->start_time / AV_TIME_BASE;
3143 us = abs(ic->start_time % AV_TIME_BASE);
3144 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3145 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3147 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3149 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3151 av_log(NULL, AV_LOG_INFO, "N/A");
3153 av_log(NULL, AV_LOG_INFO, "\n");
3155 for (i = 0; i < ic->nb_chapters; i++) {
3156 AVChapter *ch = ic->chapters[i];
3157 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3158 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3159 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3161 dump_metadata(NULL, ch->metadata, " ");
3163 if(ic->nb_programs) {
3164 int j, k, total = 0;
3165 for(j=0; j<ic->nb_programs; j++) {
3166 AVMetadataTag *name = av_metadata_get(ic->programs[j]->metadata,
3168 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3169 name ? name->value : "");
3170 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3171 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3172 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3173 printed[ic->programs[j]->stream_index[k]] = 1;
3175 total += ic->programs[j]->nb_stream_indexes;
3177 if (total < ic->nb_streams)
3178 av_log(NULL, AV_LOG_INFO, " No Program\n");
3180 for(i=0;i<ic->nb_streams;i++)
3182 dump_stream_format(ic, i, index, is_output);
3187 #if LIBAVFORMAT_VERSION_MAJOR < 53
3188 #include "libavcore/parseutils.h"
3190 int parse_image_size(int *width_ptr, int *height_ptr, const char *str)
3192 return av_parse_video_size(width_ptr, height_ptr, str);
3195 int parse_frame_rate(int *frame_rate_num, int *frame_rate_den, const char *arg)
3197 AVRational frame_rate;
3198 int ret = av_parse_video_rate(&frame_rate, arg);
3199 *frame_rate_num= frame_rate.num;
3200 *frame_rate_den= frame_rate.den;
3205 int64_t av_gettime(void)
3208 gettimeofday(&tv,NULL);
3209 return (int64_t)tv.tv_sec * 1000000 + tv.tv_usec;
3212 uint64_t ff_ntp_time(void)
3214 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3217 int64_t parse_date(const char *datestr, int duration)
3223 static const char * const date_fmt[] = {
3227 static const char * const time_fmt[] = {
3237 time_t now = time(0);
3239 len = strlen(datestr);
3241 lastch = datestr[len - 1];
3244 is_utc = (lastch == 'z' || lastch == 'Z');
3246 memset(&dt, 0, sizeof(dt));
3251 if (!strncasecmp(datestr, "now", len))
3252 return (int64_t) now * 1000000;
3254 /* parse the year-month-day part */
3255 for (i = 0; i < FF_ARRAY_ELEMS(date_fmt); i++) {
3256 q = small_strptime(p, date_fmt[i], &dt);
3262 /* if the year-month-day part is missing, then take the
3263 * current year-month-day time */
3268 dt = *localtime(&now);
3270 dt.tm_hour = dt.tm_min = dt.tm_sec = 0;
3275 if (*p == 'T' || *p == 't' || *p == ' ')
3278 /* parse the hour-minute-second part */
3279 for (i = 0; i < FF_ARRAY_ELEMS(time_fmt); i++) {
3280 q = small_strptime(p, time_fmt[i], &dt);
3286 /* parse datestr as a duration */
3291 /* parse datestr as HH:MM:SS */
3292 q = small_strptime(p, time_fmt[0], &dt);
3294 /* parse datestr as S+ */
3295 dt.tm_sec = strtol(p, (char **)&q, 10);
3297 /* the parsing didn't succeed */
3304 /* Now we have all the fields that we can get */
3310 t = dt.tm_hour * 3600 + dt.tm_min * 60 + dt.tm_sec;
3312 dt.tm_isdst = -1; /* unknown */
3322 /* parse the .m... part */
3326 for (val = 0, n = 100000; n >= 1; n /= 10, q++) {
3329 val += n * (*q - '0');
3333 return negative ? -t : t;
3336 int find_info_tag(char *arg, int arg_size, const char *tag1, const char *info)
3346 while (*p != '\0' && *p != '=' && *p != '&') {
3347 if ((q - tag) < sizeof(tag) - 1)
3355 while (*p != '&' && *p != '\0') {
3356 if ((q - arg) < arg_size - 1) {
3366 if (!strcmp(tag, tag1))
3375 int av_get_frame_filename(char *buf, int buf_size,
3376 const char *path, int number)
3379 char *q, buf1[20], c;
3380 int nd, len, percentd_found;
3392 while (isdigit(*p)) {
3393 nd = nd * 10 + *p++ - '0';
3396 } while (isdigit(c));
3405 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3407 if ((q - buf + len) > buf_size - 1)
3409 memcpy(q, buf1, len);
3417 if ((q - buf) < buf_size - 1)
3421 if (!percentd_found)
3430 static void hex_dump_internal(void *avcl, FILE *f, int level, uint8_t *buf, int size)
3434 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3436 for(i=0;i<size;i+=16) {
3443 PRINT(" %02x", buf[i+j]);
3448 for(j=0;j<len;j++) {
3450 if (c < ' ' || c > '~')
3459 void av_hex_dump(FILE *f, uint8_t *buf, int size)
3461 hex_dump_internal(NULL, f, 0, buf, size);
3464 void av_hex_dump_log(void *avcl, int level, uint8_t *buf, int size)
3466 hex_dump_internal(avcl, NULL, level, buf, size);
3469 //FIXME needs to know the time_base
3470 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload)
3473 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3474 PRINT("stream #%d:\n", pkt->stream_index);
3475 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3476 PRINT(" duration=%0.3f\n", (double)pkt->duration / AV_TIME_BASE);
3477 /* DTS is _always_ valid after av_read_frame() */
3479 if (pkt->dts == AV_NOPTS_VALUE)
3482 PRINT("%0.3f", (double)pkt->dts / AV_TIME_BASE);
3483 /* PTS may not be known if B-frames are present. */
3485 if (pkt->pts == AV_NOPTS_VALUE)
3488 PRINT("%0.3f", (double)pkt->pts / AV_TIME_BASE);
3490 PRINT(" size=%d\n", pkt->size);
3493 av_hex_dump(f, pkt->data, pkt->size);
3496 void av_pkt_dump(FILE *f, AVPacket *pkt, int dump_payload)
3498 pkt_dump_internal(NULL, f, 0, pkt, dump_payload);
3501 void av_pkt_dump_log(void *avcl, int level, AVPacket *pkt, int dump_payload)
3503 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload);
3506 #if LIBAVFORMAT_VERSION_MAJOR < 53
3507 attribute_deprecated
3508 void ff_url_split(char *proto, int proto_size,
3509 char *authorization, int authorization_size,
3510 char *hostname, int hostname_size,
3512 char *path, int path_size,
3515 av_url_split(proto, proto_size,
3516 authorization, authorization_size,
3517 hostname, hostname_size,
3524 void av_url_split(char *proto, int proto_size,
3525 char *authorization, int authorization_size,
3526 char *hostname, int hostname_size,
3528 char *path, int path_size,
3531 const char *p, *ls, *at, *col, *brk;
3533 if (port_ptr) *port_ptr = -1;
3534 if (proto_size > 0) proto[0] = 0;
3535 if (authorization_size > 0) authorization[0] = 0;
3536 if (hostname_size > 0) hostname[0] = 0;
3537 if (path_size > 0) path[0] = 0;
3539 /* parse protocol */
3540 if ((p = strchr(url, ':'))) {
3541 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3546 /* no protocol means plain filename */
3547 av_strlcpy(path, url, path_size);
3551 /* separate path from hostname */
3552 ls = strchr(p, '/');
3554 ls = strchr(p, '?');
3556 av_strlcpy(path, ls, path_size);
3558 ls = &p[strlen(p)]; // XXX
3560 /* the rest is hostname, use that to parse auth/port */
3562 /* authorization (user[:pass]@hostname) */
3563 if ((at = strchr(p, '@')) && at < ls) {
3564 av_strlcpy(authorization, p,
3565 FFMIN(authorization_size, at + 1 - p));
3566 p = at + 1; /* skip '@' */
3569 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3571 av_strlcpy(hostname, p + 1,
3572 FFMIN(hostname_size, brk - p));
3573 if (brk[1] == ':' && port_ptr)
3574 *port_ptr = atoi(brk + 2);
3575 } else if ((col = strchr(p, ':')) && col < ls) {
3576 av_strlcpy(hostname, p,
3577 FFMIN(col + 1 - p, hostname_size));
3578 if (port_ptr) *port_ptr = atoi(col + 1);
3580 av_strlcpy(hostname, p,
3581 FFMIN(ls + 1 - p, hostname_size));
3585 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3588 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3591 'C', 'D', 'E', 'F' };
3592 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3595 'c', 'd', 'e', 'f' };
3596 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3598 for(i = 0; i < s; i++) {
3599 buff[i * 2] = hex_table[src[i] >> 4];
3600 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3606 int ff_hex_to_data(uint8_t *data, const char *p)
3613 p += strspn(p, SPACE_CHARS);
3616 c = toupper((unsigned char) *p++);
3617 if (c >= '0' && c <= '9')
3619 else if (c >= 'A' && c <= 'F')
3634 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3635 unsigned int pts_num, unsigned int pts_den)
3637 s->pts_wrap_bits = pts_wrap_bits;
3639 if(av_reduce(&s->time_base.num, &s->time_base.den, pts_num, pts_den, INT_MAX)){
3640 if(s->time_base.num != pts_num)
3641 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/s->time_base.num);
3643 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3645 if(!s->time_base.num || !s->time_base.den)
3646 s->time_base.num= s->time_base.den= 0;
3649 int ff_url_join(char *str, int size, const char *proto,
3650 const char *authorization, const char *hostname,
3651 int port, const char *fmt, ...)
3654 struct addrinfo hints, *ai;
3659 av_strlcatf(str, size, "%s://", proto);
3660 if (authorization && authorization[0])
3661 av_strlcatf(str, size, "%s@", authorization);
3662 #if CONFIG_NETWORK && defined(AF_INET6)
3663 /* Determine if hostname is a numerical IPv6 address,
3664 * properly escape it within [] in that case. */
3665 memset(&hints, 0, sizeof(hints));
3666 hints.ai_flags = AI_NUMERICHOST;
3667 if (!getaddrinfo(hostname, NULL, &hints, &ai)) {
3668 if (ai->ai_family == AF_INET6) {
3669 av_strlcat(str, "[", size);
3670 av_strlcat(str, hostname, size);
3671 av_strlcat(str, "]", size);
3673 av_strlcat(str, hostname, size);
3678 /* Not an IPv6 address, just output the plain string. */
3679 av_strlcat(str, hostname, size);
3682 av_strlcatf(str, size, ":%d", port);
3685 int len = strlen(str);
3688 vsnprintf(str + len, size > len ? size - len : 0, fmt, vl);
3694 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
3695 AVFormatContext *src)
3700 local_pkt.stream_index = dst_stream;
3701 if (pkt->pts != AV_NOPTS_VALUE)
3702 local_pkt.pts = av_rescale_q(pkt->pts,
3703 src->streams[pkt->stream_index]->time_base,
3704 dst->streams[dst_stream]->time_base);
3705 if (pkt->dts != AV_NOPTS_VALUE)
3706 local_pkt.dts = av_rescale_q(pkt->dts,
3707 src->streams[pkt->stream_index]->time_base,
3708 dst->streams[dst_stream]->time_base);
3709 return av_write_frame(dst, &local_pkt);
3712 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3715 const char *ptr = str;
3717 /* Parse key=value pairs. */
3720 char *dest = NULL, *dest_end;
3721 int key_len, dest_len = 0;
3723 /* Skip whitespace and potential commas. */
3724 while (*ptr && (isspace(*ptr) || *ptr == ','))
3731 if (!(ptr = strchr(key, '=')))
3734 key_len = ptr - key;
3736 callback_get_buf(context, key, key_len, &dest, &dest_len);
3737 dest_end = dest + dest_len - 1;
3741 while (*ptr && *ptr != '\"') {
3745 if (dest && dest < dest_end)
3749 if (dest && dest < dest_end)
3757 for (; *ptr && !(isspace(*ptr) || *ptr == ','); ptr++)
3758 if (dest && dest < dest_end)