OSDN Git Service

Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Thu, 18 Aug 2011 08:20:25 +0000 (10:20 +0200)
committerMichael Niedermayer <michaelni@gmx.at>
Thu, 18 Aug 2011 08:56:08 +0000 (10:56 +0200)
* qatar/master: (23 commits)
  h264: hide reference frame errors unless requested
  swscale: split hScale() function pointer into h[cy]Scale().
  Move clipd macros to x86util.asm.
  avconv: reindent.
  avconv: rescue poor abused start_time global.
  avconv: rescue poor abused recording_time global.
  avconv: merge two loops in output_packet().
  avconv: fix broken indentation.
  avconv: get rid of the arbitrary MAX_FILES limit.
  avconv: get rid of the output_streams_for_file vs. ost_table schizophrenia
  avconv: add a wrapper for output AVFormatContexts and merge output_opts into it
  avconv: make itsscale syntax consistent with other options.
  avconv: factor out adding input streams.
  avconv: Factorize combining auto vsync with format.
  avconv: Factorize video resampling.
  avconv: Don't unnecessarily convert ipts to a double.
  ffmpeg: remove unsed variable nopts
  RV3/4 parser: remove unused variable 'off'
  add XMV demuxer
  rmdec: parse FPS in RealMedia properly
  ...

Conflicts:
avconv.c
libavformat/version.h
libswscale/swscale.c
tests/ref/fate/lmlm4-demux

Merged-by: Michael Niedermayer <michaelni@gmx.at>
17 files changed:
1  2 
Changelog
avconv.c
doc/general.texi
ffmpeg.c
libavcodec/Makefile
libavcodec/allcodecs.c
libavcodec/h264_refs.c
libavcodec/x86/dsputil_yasm.asm
libavformat/Makefile
libavformat/allformats.c
libavformat/rmdec.c
libavformat/version.h
libavutil/x86/x86util.asm
libswscale/ppc/swscale_altivec.c
libswscale/swscale.c
libswscale/swscale_internal.h
libswscale/x86/swscale_template.c

diff --cc Changelog
+++ b/Changelog
@@@ -41,12 -39,35 +41,13 @@@ easier to use. The changes are
      * Presets in avconv are disabled, because only libx264 used them and
        presets for libx264 can now be specified using a private option
        '-preset <presetname>'.
+ - XMV demuxer
  
  
 -version 0.7:
 -
 -- E-AC-3 audio encoder
 -- ac3enc: add channel coupling support
 -- floating-point sample format support for (E-)AC-3, DCA, AAC, Vorbis decoders
 -- H.264/MPEG frame-level multithreading
 -- av_metadata_* functions renamed to av_dict_* and moved to libavutil
 -- 4:4:4 H.264 decoding support
 -- 10-bit H.264 optimizations for x86
 -- bump libswscale for recently reported ABI break
 +version 0.8:
  
  
 -version 0.7_beta2:
 -
 -- VP8 frame-level multithreading
 -- NEON optimizations for VP8
 -- removed a lot of deprecated API cruft
 -- FFT and IMDCT optimizations for AVX (Sandy Bridge) processors
 -- DPX image encoder
 -- SMPTE 302M AES3 audio decoder
 -- ffmpeg no longer quits after the 'q' key is pressed; use 'ctrl+c' instead
 -- 9bit and 10bit per sample support in the H.264 decoder
 -
 -
 -version 0.7_beta1:
 -
 +- many many things we forgot because we rather write code than changelogs
  - WebM support in Matroska de/muxer
  - low overhead Ogg muxing
  - MMS-TCP support
diff --cc avconv.c
+++ b/avconv.c
@@@ -109,15 -97,8 +109,9 @@@ typedef struct MetadataMap 
  
  static const OptionDef options[];
  
- #define MAX_FILES 100
 +#define MAX_STREAMS 1024    /* arbitrary sanity check value */
  static const char *last_asked_format = NULL;
- static double *ts_scale;
- static int  nb_ts_scale;
- static AVFormatContext *output_files[MAX_FILES];
- static AVDictionary *output_opts[MAX_FILES];
- static int nb_output_files = 0;
+ static AVDictionary *ts_scale;
  
  static StreamMap *stream_maps = NULL;
  static int nb_stream_maps;
@@@ -326,12 -301,14 +317,20 @@@ typedef struct InputFile 
      int64_t ts_offset;
  } InputFile;
  
 +#if HAVE_TERMIOS_H
 +
 +/* init terminal so that we can grab keys */
 +static struct termios oldtty;
 +#endif
 +
+ typedef struct OutputFile {
+     AVFormatContext *ctx;
+     AVDictionary *opts;
+     int ost_index;       /* index of the first stream in output_streams */
+     int64_t recording_time; /* desired length of the resulting file in microseconds */
+     int64_t start_time;     /* start time in microseconds */
+ } OutputFile;
  static InputStream *input_streams = NULL;
  static int         nb_input_streams = 0;
  static InputFile   *input_files   = NULL;
@@@ -1135,32 -1054,24 +1097,32 @@@ static void do_video_resample(OutputStr
                 ist->file_index, ist->st->index,
                 ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
                 dec->width         , dec->height         , av_get_pix_fmt_name(dec->pix_fmt));
 -        if(!ost->video_resample)
 -            ost->video_resample = 1;
 +        ost->resample_width   = dec->width;
 +        ost->resample_height  = dec->height;
 +        ost->resample_pix_fmt = dec->pix_fmt;
      }
  
 -#if !CONFIG_AVFILTER
 +    ost->video_resample = dec->width   != enc->width  ||
 +                          dec->height  != enc->height ||
 +                          dec->pix_fmt != enc->pix_fmt;
 +
      if (ost->video_resample) {
-         final_picture = &ost->resample_frame;
 -        *out_picture = &ost->pict_tmp;
 -        if (resample_changed) {
++        *out_picture = &ost->resample_frame;
 +        if (!ost->img_resample_ctx || resample_changed) {
 +            /* initialize the destination picture */
 +            if (!ost->resample_frame.data[0]) {
 +                avcodec_get_frame_defaults(&ost->resample_frame);
 +                if (avpicture_alloc((AVPicture *)&ost->resample_frame, enc->pix_fmt,
 +                                    enc->width, enc->height)) {
 +                    fprintf(stderr, "Cannot allocate temp picture, check pix fmt\n");
 +                    exit_program(1);
 +                }
 +            }
              /* initialize a new scaler context */
              sws_freeContext(ost->img_resample_ctx);
 -            ost->img_resample_ctx = sws_getContext(
 -                ist->st->codec->width,
 -                ist->st->codec->height,
 -                ist->st->codec->pix_fmt,
 -                ost->st->codec->width,
 -                ost->st->codec->height,
 -                ost->st->codec->pix_fmt,
 -                ost->sws_flags, NULL, NULL, NULL);
 +            ost->img_resample_ctx = sws_getContext(dec->width, dec->height, dec->pix_fmt,
 +                                                   enc->width, enc->height, enc->pix_fmt,
 +                                                   ost->sws_flags, NULL, NULL, NULL);
              if (ost->img_resample_ctx == NULL) {
                  fprintf(stderr, "Cannot get resampling context\n");
                  exit_program(1);
@@@ -1634,130 -1578,139 +1638,149 @@@ static int output_packet(InputStream *i
          }
          /* if output time reached then transcode raw format,
             encode packets and output them */
-         if (start_time == 0 || ist->pts >= start_time)
-             for(i=0;i<nb_ostreams;i++) {
-                 int frame_size;
+         for (i = 0; i < nb_ostreams; i++) {
+             OutputFile *of = &output_files[ost_table[i].file_index];
+             int frame_size;
+             ost = &ost_table[i];
+             if (ost->source_index != ist_index)
+                 continue;
+             if (of->start_time && ist->pts < of->start_time)
+                 continue;
+             if (of->recording_time != INT64_MAX &&
+                 av_compare_ts(ist->pts, AV_TIME_BASE_Q, of->recording_time + of->start_time,
+                               (AVRational){1, 1000000}) >= 0) {
+                 ost->is_past_recording_time = 1;
+                 continue;
+             }
  
-                 ost = ost_table[i];
-                 if (ost->source_index == ist_index) {
  #if CONFIG_AVFILTER
-                 frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
-                     !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
-                 while (frame_available) {
-                     if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
-                         AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
-                         if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
-                             goto cont;
-                         if (ost->picref) {
-                             avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
-                             ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
-                         }
+             if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
+                 ost->input_video_filter) {
 -                AVRational sar;
 -                if (ist->st->sample_aspect_ratio.num)
 -                    sar = ist->st->sample_aspect_ratio;
 -                else
 -                    sar = ist->st->codec->sample_aspect_ratio;
 -                av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, ist->pts, sar);
++                if (!picture.sample_aspect_ratio.num)
++                    picture.sample_aspect_ratio = ist->st->sample_aspect_ratio;
++                picture.pts = ist->pts;
++
++                av_vsrc_buffer_add_frame(ost->input_video_filter, &picture, AV_VSRC_BUF_FLAG_OVERWRITE);
+             }
+             frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
+                 !ost->output_video_filter || avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+             while (frame_available) {
 -                AVRational ist_pts_tb;
 -                if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter)
 -                    get_filtered_video_frame(ost->output_video_filter, &picture, &ost->picref, &ist_pts_tb);
 -                if (ost->picref)
 -                    ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
++                if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && ost->output_video_filter) {
++                    AVRational ist_pts_tb = ost->output_video_filter->inputs[0]->time_base;
++                    if (av_vsink_buffer_get_video_buffer_ref(ost->output_video_filter, &ost->picref, 0) < 0)
++                        goto cont;
++                    if (ost->picref) {
++                        avfilter_fill_frame_from_video_buffer_ref(&picture, ost->picref);
++                        ist->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
 +                    }
++                }
  #endif
-                     os = output_files[ost->file_index];
+                 os = output_files[ost->file_index].ctx;
  
-                     /* set the input output pts pairs */
-                     //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
+                 /* set the input output pts pairs */
+                 //ost->sync_ipts = (double)(ist->pts + input_files[ist->file_index].ts_offset - start_time)/ AV_TIME_BASE;
  
-                     if (ost->encoding_needed) {
-                         av_assert0(ist->decoding_needed);
-                         switch(ost->st->codec->codec_type) {
-                         case AVMEDIA_TYPE_AUDIO:
-                             do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
-                             break;
-                         case AVMEDIA_TYPE_VIDEO:
+                 if (ost->encoding_needed) {
+                     av_assert0(ist->decoding_needed);
+                     switch(ost->st->codec->codec_type) {
+                     case AVMEDIA_TYPE_AUDIO:
+                         do_audio_out(os, ost, ist, decoded_data_buf, decoded_data_size);
+                         break;
+                     case AVMEDIA_TYPE_VIDEO:
  #if CONFIG_AVFILTER
-                             if (ost->picref->video && !ost->frame_aspect_ratio)
-                                 ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
+                         if (ost->picref->video && !ost->frame_aspect_ratio)
 -                            ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
++                            ost->st->codec->sample_aspect_ratio = ost->picref->video->sample_aspect_ratio;
  #endif
-                             do_video_out(os, ost, ist, &picture, &frame_size,
-                                          same_quant ? quality : ost->st->codec->global_quality);
-                             if (vstats_filename && frame_size)
-                                 do_video_stats(os, ost, frame_size);
-                             break;
-                         case AVMEDIA_TYPE_SUBTITLE:
-                             do_subtitle_out(os, ost, ist, &subtitle,
-                                             pkt->pts);
-                             break;
-                         default:
-                             abort();
-                         }
-                     } else {
-                         AVFrame avframe; //FIXME/XXX remove this
-                         AVPicture pict;
-                         AVPacket opkt;
-                         int64_t ost_tb_start_time= av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
-                         av_init_packet(&opkt);
+                         do_video_out(os, ost, ist, &picture, &frame_size,
 -                                     same_quant ? quality : ost->st->codec->global_quality);
++                                        same_quant ? quality : ost->st->codec->global_quality);
+                         if (vstats_filename && frame_size)
+                             do_video_stats(os, ost, frame_size);
+                         break;
+                     case AVMEDIA_TYPE_SUBTITLE:
+                         do_subtitle_out(os, ost, ist, &subtitle,
+                                         pkt->pts);
+                         break;
+                     default:
+                         abort();
+                     }
+                 } else {
+                     AVFrame avframe; //FIXME/XXX remove this
++                    AVPicture pict;
+                     AVPacket opkt;
+                     int64_t ost_tb_start_time= av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
 -
+                     av_init_packet(&opkt);
  
-                         if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
+                     if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) && !copy_initial_nonkeyframes)
  #if !CONFIG_AVFILTER
-                             continue;
+                         continue;
  #else
-                             goto cont;
+                         goto cont;
  #endif
  
-                         /* no reencoding needed : output the packet directly */
-                         /* force the input stream PTS */
+                     /* no reencoding needed : output the packet directly */
+                     /* force the input stream PTS */
  
-                         avcodec_get_frame_defaults(&avframe);
-                         ost->st->codec->coded_frame= &avframe;
-                         avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
+                     avcodec_get_frame_defaults(&avframe);
+                     ost->st->codec->coded_frame= &avframe;
+                     avframe.key_frame = pkt->flags & AV_PKT_FLAG_KEY;
  
-                         if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
-                             audio_size += data_size;
-                         else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
-                             video_size += data_size;
-                             ost->sync_opts++;
-                         }
+                     if(ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
+                         audio_size += data_size;
+                     else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
+                         video_size += data_size;
+                         ost->sync_opts++;
+                     }
  
-                         opkt.stream_index= ost->index;
-                         if(pkt->pts != AV_NOPTS_VALUE)
-                             opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
-                         else
-                             opkt.pts= AV_NOPTS_VALUE;
-                         if (pkt->dts == AV_NOPTS_VALUE)
-                             opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
-                         else
-                             opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
-                         opkt.dts -= ost_tb_start_time;
-                         opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
-                         opkt.flags= pkt->flags;
-                         //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
-                         if(   ost->st->codec->codec_id != CODEC_ID_H264
-                            && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
-                            && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
-                            ) {
-                             if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
-                                 opkt.destruct= av_destruct_packet;
-                         } else {
-                             opkt.data = data_buf;
-                             opkt.size = data_size;
-                         }
+                     opkt.stream_index= ost->index;
+                     if(pkt->pts != AV_NOPTS_VALUE)
+                         opkt.pts= av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
+                     else
+                         opkt.pts= AV_NOPTS_VALUE;
+                     if (pkt->dts == AV_NOPTS_VALUE)
+                         opkt.dts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->st->time_base);
+                     else
+                         opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
+                     opkt.dts -= ost_tb_start_time;
+                     opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
+                     opkt.flags= pkt->flags;
+                     //FIXME remove the following 2 lines they shall be replaced by the bitstream filters
+                     if(   ost->st->codec->codec_id != CODEC_ID_H264
+                        && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
+                        && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
+                        ) {
+                         if(av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, data_buf, data_size, pkt->flags & AV_PKT_FLAG_KEY))
+                             opkt.destruct= av_destruct_packet;
+                     } else {
+                         opkt.data = data_buf;
+                         opkt.size = data_size;
+                     }
  
-                         if (os->oformat->flags & AVFMT_RAWPICTURE) {
-                             /* store AVPicture in AVPacket, as expected by the output format */
-                             avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
-                             opkt.data = (uint8_t *)&pict;
-                             opkt.size = sizeof(AVPicture);
-                             opkt.flags |= AV_PKT_FLAG_KEY;
-                         }
-                         write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
-                         ost->st->codec->frame_number++;
-                         ost->frame_number++;
-                         av_free_packet(&opkt);
++                    if (os->oformat->flags & AVFMT_RAWPICTURE) {
++                        /* store AVPicture in AVPacket, as expected by the output format */
++                        avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
++                        opkt.data = (uint8_t *)&pict;
++                        opkt.size = sizeof(AVPicture);
++                        opkt.flags |= AV_PKT_FLAG_KEY;
 +                    }
+                     write_frame(os, &opkt, ost->st->codec, ost->bitstream_filters);
+                     ost->st->codec->frame_number++;
+                     ost->frame_number++;
+                     av_free_packet(&opkt);
+                 }
  #if CONFIG_AVFILTER
-                     cont:
-                     frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
-                                        ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+                 cont:
+                 frame_available = (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) &&
+                                    ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
+                 if (ost->picref)
                      avfilter_unref_buffer(ost->picref);
-                 }
+             }
  #endif
-                 }
              }
  
          av_free(buffer_to_free);
@@@ -1873,20 -1834,19 +1904,22 @@@ static int transcode(OutputFile *output
                       InputFile *input_files,
                       int nb_input_files)
  {
-     int ret = 0, i, j, k, n, nb_ostreams = 0, step;
 -    int ret = 0, i, j;
++    int ret = 0, i, j, step;
      AVFormatContext *is, *os;
      AVCodecContext *codec, *icodec;
-     OutputStream *ost, **ost_table = NULL;
+     OutputStream *ost;
      InputStream *ist;
      char error[1024];
 +    int key;
      int want_sdp = 1;
-     uint8_t no_packet[MAX_FILES]={0};
+     uint8_t *no_packet;
      int no_packet_count=0;
 +    int nb_frame_threshold[AVMEDIA_TYPE_NB]={0};
 +    int nb_streams[AVMEDIA_TYPE_NB]={0};
  
+     if (!(no_packet = av_mallocz(nb_input_files)))
+         exit_program(1);
      if (rate_emu)
          for (i = 0; i < nb_input_streams; i++)
              input_streams[i].start = av_gettime();
                  ret = AVERROR(EINVAL);
                  goto dump_format;
              }
 -            /* update requested sample format for the decoder based on the
 -               corresponding encoder sample format */
 -            for (j = 0; j < nb_output_streams; j++) {
 -                ost = &output_streams[j];
 -                if (ost->source_index == i) {
 -                    update_sample_fmt(ist->st->codec, codec, ost->st->codec);
 -                    break;
 -                }
 -            }
 -
              if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
                  snprintf(error, sizeof(error), "Error while opening decoder for input stream #%d.%d",
                          ist->file_index, ist->st->index);
              ret = AVERROR(EINVAL);
              goto dump_format;
          }
- //        assert_avoptions(output_opts[i]);
-         if (strcmp(output_files[i]->oformat->name, "rtp")) {
 -        assert_avoptions(output_files[i].opts);
++//        assert_avoptions(output_files[i].opts);
+         if (strcmp(os->oformat->name, "rtp")) {
              want_sdp = 0;
          }
      }
          double opts_min;
  
      redo:
-         ipts_min= 1e100;
+         ipts_min = INT64_MAX;
          opts_min= 1e100;
-                 for(i=0;i<nb_ostreams;i++) {
-                     ost = ost_table[i];
 +        /* if 'q' pressed, exits */
 +        if (!using_stdin) {
 +            if (q_pressed)
 +                break;
 +            /* read_key() returns 0 on EOF */
 +            key = read_key();
 +            if (key == 'q')
 +                break;
 +            if (key == '+') verbose++;
 +            if (key == '-') verbose--;
 +            if (key == 's') qp_hist     ^= 1;
 +            if (key == 'h'){
 +                if (do_hex_dump){
 +                    do_hex_dump = do_pkt_dump = 0;
 +                } else if(do_pkt_dump){
 +                    do_hex_dump = 1;
 +                } else
 +                    do_pkt_dump = 1;
 +                av_log_set_level(AV_LOG_DEBUG);
 +            }
 +            if (key == 'd' || key == 'D'){
 +                int debug=0;
 +                if(key == 'D') {
 +                    debug = input_streams[0].st->codec->debug<<1;
 +                    if(!debug) debug = 1;
 +                    while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
 +                        debug += debug;
 +                }else
 +                    scanf("%d", &debug);
 +                for(i=0;i<nb_input_streams;i++) {
 +                    input_streams[i].st->codec->debug = debug;
 +                }
++                for(i=0;i<nb_output_streams;i++) {
++                    ost = &output_streams[i];
 +                    ost->st->codec->debug = debug;
 +                }
 +                if(debug) av_log_set_level(AV_LOG_DEBUG);
 +                fprintf(stderr,"debug=%d\n", debug);
 +            }
 +            if (key == '?'){
 +                fprintf(stderr, "key    function\n"
 +                                "?      show this help\n"
 +                                "+      increase verbosity\n"
 +                                "-      decrease verbosity\n"
 +                                "D      cycle through available debug modes\n"
 +                                "h      dump packets/hex press to cycle through the 3 states\n"
 +                                "q      quit\n"
 +                                "s      Show QP histogram\n"
 +                );
 +            }
 +        }
  
          /* select the stream that we must read now by looking at the
             smallest output pts */
@@@ -2998,6 -2847,88 +2958,92 @@@ static AVCodec *choose_codec(AVFormatCo
      return NULL;
  }
  
 -                dec->height >>= dec->lowres;
 -                dec->width  >>= dec->lowres;
+ /**
+  * Add all the streams from the given input file to the global
+  * list of input streams.
+  */
+ static void add_input_streams(AVFormatContext *ic)
+ {
+     int i, rfps, rfps_base, ret;
+     for (i = 0; i < ic->nb_streams; i++) {
+         AVStream *st = ic->streams[i];
+         AVCodecContext *dec = st->codec;
+         AVDictionaryEntry *e = NULL;
+         InputStream *ist;
+         char *scale = NULL;
+         dec->thread_count = thread_count;
+         input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
+         ist = &input_streams[nb_input_streams - 1];
+         ist->st = st;
+         ist->file_index = nb_input_files;
+         ist->discard = 1;
+         ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
+         while (e = av_dict_get(ts_scale, "", e, AV_DICT_IGNORE_SUFFIX)) {
+             char *p = strchr(e->key, ':');
+             if ((ret = check_stream_specifier(ic, st, p ? p + 1 : "")) > 0)
+                 scale = e->value;
+             else if (ret < 0)
+                 exit_program(1);
+         }
+         if (scale)
+             ist->ts_scale = strtod(scale, NULL);
+         ist->dec = choose_codec(ic, st, dec->codec_type, codec_names);
+         switch (dec->codec_type) {
+         case AVMEDIA_TYPE_AUDIO:
++            if(!ist->dec)
++                ist->dec = avcodec_find_decoder(dec->codec_id);
+             if(audio_disable)
+                 st->discard= AVDISCARD_ALL;
+             break;
+         case AVMEDIA_TYPE_VIDEO:
++            if(!ist->dec)
++                ist->dec = avcodec_find_decoder(dec->codec_id);
+             rfps      = ic->streams[i]->r_frame_rate.num;
+             rfps_base = ic->streams[i]->r_frame_rate.den;
+             if (dec->lowres) {
+                 dec->flags |= CODEC_FLAG_EMU_EDGE;
+             }
+             if(me_threshold)
+                 dec->debug |= FF_DEBUG_MV;
+             if (dec->time_base.den != rfps*dec->ticks_per_frame || dec->time_base.num != rfps_base) {
+                 if (verbose >= 0)
+                     fprintf(stderr,"\nSeems stream %d codec frame rate differs from container frame rate: %2.2f (%d/%d) -> %2.2f (%d/%d)\n",
+                             i, (float)dec->time_base.den / dec->time_base.num, dec->time_base.den, dec->time_base.num,
+                     (float)rfps / rfps_base, rfps, rfps_base);
+             }
+             if(video_disable)
+                 st->discard= AVDISCARD_ALL;
+             else if(video_discard)
+                 st->discard= video_discard;
+             break;
+         case AVMEDIA_TYPE_DATA:
+             break;
+         case AVMEDIA_TYPE_SUBTITLE:
++            if(!ist->dec)
++                ist->dec = avcodec_find_decoder(dec->codec_id);
+             if(subtitle_disable)
+                 st->discard = AVDISCARD_ALL;
+             break;
+         case AVMEDIA_TYPE_ATTACHMENT:
+         case AVMEDIA_TYPE_UNKNOWN:
+             break;
+         default:
+             abort();
+         }
+     }
+ }
  static int opt_input_file(const char *opt, const char *filename)
  {
      AVFormatContext *ic;
Simple merge
diff --cc ffmpeg.c
+++ b/ffmpeg.c
@@@ -1725,6 -1664,6 +1721,14 @@@ static int output_packet(InputStream *i
                  int frame_size;
  
                  ost = ost_table[i];
++
++                /* finish if recording time exhausted */
++                if (recording_time != INT64_MAX &&
++                        av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000})
++                    >= 0) {
++                    ist->is_past_recording_time = 1;
++                    continue;
++                }
                  if (ost->source_index == ist_index) {
  #if CONFIG_AVFILTER
                  frame_available = ist->st->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
@@@ -2804,17 -2656,13 +2808,6 @@@ static int transcode(AVFormatContext **
              }
          }
  
--        /* finish if recording time exhausted */
--        if (recording_time != INT64_MAX &&
-             (pkt.pts != AV_NOPTS_VALUE ?
-                 av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000})
-                     :
-                 av_compare_ts(ist->pts, AV_TIME_BASE_Q, recording_time + start_time, (AVRational){1, 1000000})
-             )>= 0) {
 -            av_compare_ts(pkt.pts, ist->st->time_base, recording_time + start_time, (AVRational){1, 1000000}) >= 0) {
--            ist->is_past_recording_time = 1;
--            goto discard_packet;
--        }
--
          //fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
          if (output_packet(ist, ist_index, ost_table, nb_ostreams, &pkt) < 0) {
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -24,7 -24,7 +24,7 @@@
  #include "libavutil/avutil.h"
  
  #define LIBAVFORMAT_VERSION_MAJOR 53
- #define LIBAVFORMAT_VERSION_MINOR  7
 -#define LIBAVFORMAT_VERSION_MINOR  5
++#define LIBAVFORMAT_VERSION_MINOR  8
  #define LIBAVFORMAT_VERSION_MICRO  0
  
  #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
Simple merge
Simple merge
@@@ -2208,11 -2086,8 +2208,11 @@@ static av_always_inline void hyscale(Sw
          src= formatConvBuffer;
      }
  
 -    if (!c->hyscale_fast) {
 +    if (c->hScale16) {
 +        int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
 +        c->hScale16(dst, dstWidth, (const uint16_t*)src, srcW, xInc, hLumFilter, hLumFilterPos, hLumFilterSize, shift);
 +    } else if (!c->hyscale_fast) {
-         c->hScale(c, dst, dstWidth, src, hLumFilter, hLumFilterPos, hLumFilterSize);
+         c->hyScale(c, dst, dstWidth, src, hLumFilter, hLumFilterPos, hLumFilterSize);
      } else { // fast bilinear upscale / crap downscale
          c->hyscale_fast(c, dst, dstWidth, src, srcW, xInc);
      }
@@@ -2253,13 -2124,9 +2253,13 @@@ static av_always_inline void hcscale(Sw
          src2= buf2;
      }
  
 -    if (!c->hcscale_fast) {
 +    if (c->hScale16) {
 +        int shift= isAnyRGB(c->srcFormat) || c->srcFormat==PIX_FMT_PAL8 ? 13 : av_pix_fmt_descriptors[c->srcFormat].comp[0].depth_minus1;
 +        c->hScale16(dst1, dstWidth, (const uint16_t*)src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift);
 +        c->hScale16(dst2, dstWidth, (const uint16_t*)src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize, shift);
 +    } else if (!c->hcscale_fast) {
-         c->hScale(c, dst1, dstWidth, src1, hChrFilter, hChrFilterPos, hChrFilterSize);
-         c->hScale(c, dst2, dstWidth, src2, hChrFilter, hChrFilterPos, hChrFilterSize);
+         c->hcScale(c, dst1, dstWidth, src1, hChrFilter, hChrFilterPos, hChrFilterSize);
+         c->hcScale(c, dst2, dstWidth, src2, hChrFilter, hChrFilterPos, hChrFilterSize);
      } else { // fast bilinear upscale / crap downscale
          c->hcscale_fast(c, dst1, dst2, dstWidth, src1, src2, srcW, xInc);
      }
@@@ -2926,10 -2787,9 +2926,10 @@@ static av_cold void sws_init_swScale_c(
          }
      }
  
 +
      if (c->srcBpc == 8) {
          if (c->dstBpc <= 10) {
-             c->hScale       = hScale8To15_c;
+             c->hyScale = c->hcScale = hScale8To15_c;
              if (c->flags & SWS_FAST_BILINEAR) {
                  c->hyscale_fast = hyscale_fast_c;
                  c->hcscale_fast = hcscale_fast_c;
@@@ -483,14 -481,15 +483,19 @@@ typedef struct SwsContext 
       *                   (and input coefficients thus padded with zeroes)
       *                   to simplify creating SIMD code.
       */
-     void (*hScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src,
-                    const int16_t *filter, const int16_t *filterPos,
-                    int filterSize);
+     /** @{ */
+     void (*hyScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src,
+                     const int16_t *filter, const int16_t *filterPos,
+                     int filterSize);
+     void (*hcScale)(struct SwsContext *c, int16_t *dst, int dstW, const uint8_t *src,
+                     const int16_t *filter, const int16_t *filterPos,
+                     int filterSize);
+     /** @} */
  
 +    void (*hScale16)(int16_t *dst, int dstW, const uint16_t *src, int srcW,
 +                   int xInc, const int16_t *filter, const int16_t *filterPos,
 +                   long filterSize, int shift);
 +
      void (*lumConvertRange)(int16_t *dst, int width); ///< Color range conversion function for luma plane if needed.
      void (*chrConvertRange)(int16_t *dst1, int16_t *dst2, int width); ///< Color range conversion function for chroma planes if needed.
  
Simple merge