@emph{any patch you make must be published}. The best way to proceed is
to send your patches to the FFmpeg mailing list.
+
@anchor{Coding Rules}
@section Coding Rules
@item We are busy and haven't had time yet to read your report or
investigate the issue.
@item You didn't follow @url{http://ffmpeg.org/bugreports.html}.
-@item You didn't use git HEAD.
+@item You didn't use git master.
@item You reported a segmentation fault without gdb output.
@item You describe a problem but not how to reproduce it.
@item It's unclear if you use ffmpeg as command line tool or use
@section ffmpeg does not work; what is wrong?
-Try a @code{make distclean} in the ffmpeg source directory before the build. If this does not help see
+Try a @code{make distclean} in the ffmpeg source directory before the build.
+If this does not help see
(@url{http://ffmpeg.org/bugreports.html}).
@section How do I encode single pictures into movies?
ffmpeg -i input.avs
@end example
-For ANY other help on Avisynth, please visit @url{http://www.avisynth.org/}.
+For ANY other help on Avisynth, please visit the
+@uref{http://www.avisynth.org/, Avisynth homepage}.
@section How can I join video files?
FFmpeg is already organized in a highly modular manner and does not need to
be rewritten in a formal object language. Further, many of the developers
favor straight C; it works for them. For more arguments on this matter,
-read "Programming Religion" at (@url{http://www.tux.org/lkml/#s15}).
+read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
@section Why are the ffmpeg programs devoid of debugging symbols?
@end example
Note that you must activate the right video source and channel before
-launching ffmpeg with any TV viewer such as xawtv
-(@url{http://linux.bytesex.org/xawtv/}) by Gerd Knorr. You also
+launching ffmpeg with any TV viewer such as
+@uref{http://linux.bytesex.org/xawtv/, xawtv} by Gerd Knorr. You also
have to set the audio recording levels correctly with a
standard mixer.
@code{make install}).
@item In order to compile FFplay, you must have the MinGW development library
-of SDL. Get it from @url{http://www.libsdl.org}.
+of @uref{http://www.libsdl.org/, SDL}.
Edit the @file{bin/sdl-config} script so that it points to the correct prefix
where SDL was installed. Verify that @file{sdl-config} can be launched from
the MSYS command line.
(you can change the cross-prefix according to the prefix chosen for the
MinGW tools).
-Then you can easily test FFmpeg with Wine
-(@url{http://www.winehq.com/}).
+Then you can easily test FFmpeg with @uref{http://www.winehq.com/, Wine}.
@subsection Compilation under Cygwin
libogg-devel, libvorbis-devel
@end example
-These library packages are only available from Cygwin Ports
-(@url{http://sourceware.org/cygwinports/}) :
+These library packages are only available from
+@uref{http://sourceware.org/cygwinports/, Cygwin Ports}:
@example
yasm, libSDL-devel, libdirac-devel, libfaac-devel, libgsm-devel,
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
supporting it (currently Darwin Streaming Server and Mischa Spiegelmock's
-RTSP server, @url{http://github.com/revmischa/rtsp-server}).
+@uref{http://github.com/revmischa/rtsp-server, RTSP server}).
The required syntax for a RTSP url is:
@example
#define MAX_STREAMS 1024 /* arbitrary sanity check value */
static const char *last_asked_format = NULL;
-static double *input_files_ts_scale[MAX_FILES] = {NULL};
-static int nb_input_files_ts_scale[MAX_FILES] = {0};
+static double *ts_scale;
+static int nb_ts_scale;
static AVFormatContext *output_files[MAX_FILES];
static int nb_output_files = 0;
static int intra_only = 0;
static int audio_sample_rate = 0;
-static int64_t channel_layout = 0;
#define QSCALE_NONE -99999
static float audio_qscale = QSCALE_NONE;
static int audio_disable = 0;
static int64_t recording_time = INT64_MAX;
static int64_t start_time = 0;
-static int64_t recording_timestamp = 0;
static int64_t input_ts_offset = 0;
static int file_overwrite = 0;
static AVDictionary *metadata;
int64_t next_pts; /* synthetic pts for cases where pkt.pts
is not defined */
int64_t pts; /* current pts */
+ double ts_scale;
int is_start; /* is 1 at the start and after a discontinuity */
int showed_multi_packet_warning;
int is_past_recording_time;
}
for(i=0;i<nb_input_files;i++) {
av_close_input_file(input_files[i].ctx);
- av_free(input_files_ts_scale[i]);
}
av_free(intra_matrix);
}
}
-static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx)
+static OutputStream *new_output_stream(AVFormatContext *oc, int file_idx, AVCodec *codec)
{
- int idx = oc->nb_streams - 1;
OutputStream *ost;
+ AVStream *st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
+ int idx = oc->nb_streams - 1;
+
+ if (!st) {
+ av_log(NULL, AV_LOG_ERROR, "Could not alloc stream.\n");
+ ffmpeg_exit(1);
+ }
output_streams_for_file[file_idx] =
grow_array(output_streams_for_file[file_idx],
}
ost->file_index = file_idx;
ost->index = idx;
+ ost->st = st;
+ ost->enc = codec;
+
+ avcodec_get_context_defaults3(st->codec, codec);
ost->sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
return ost;
if (err < 0)
return err;
/* copy stream format */
- s->nb_streams = 0;
- s->streams = av_mallocz(sizeof(AVStream *) * ic->nb_streams);
for(i=0;i<ic->nb_streams;i++) {
AVStream *st;
+ OutputStream *ost;
AVCodec *codec;
- s->nb_streams++;
+ codec = avcodec_find_encoder(ic->streams[i]->codec->codec_id);
+ ost = new_output_stream(s, nb_output_files, codec);
+ st = ost->st;
// FIXME: a more elegant solution is needed
- st = av_mallocz(sizeof(AVStream));
memcpy(st, ic->streams[i], sizeof(AVStream));
st->info = av_malloc(sizeof(*st->info));
memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
- st->codec = avcodec_alloc_context();
- if (!st->codec) {
- print_error(filename, AVERROR(ENOMEM));
- ffmpeg_exit(1);
- }
avcodec_copy_context(st->codec, ic->streams[i]->codec);
- s->streams[i] = st;
- codec = avcodec_find_encoder(st->codec->codec_id);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
if (audio_stream_copy) {
st->stream_copy = 1;
if(st->codec->flags & CODEC_FLAG_BITEXACT)
nopts = 1;
-
- new_output_stream(s, nb_output_files);
}
- if (!nopts)
- s->timestamp = av_gettime();
-
av_close_input_file(ic);
return 0;
}
for(i=0;i<os->nb_streams;i++,n++) {
int found;
ost = ost_table[n] = output_streams_for_file[k][i];
- ost->st = os->streams[i];
if (nb_stream_maps > 0) {
ost->source_index = input_files[stream_maps[n].file_index].ist_index +
stream_maps[n].stream_index;
}
choose_sample_rate(ost->st, ost->enc);
codec->time_base = (AVRational){1, codec->sample_rate};
+ if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
+ codec->sample_fmt = icodec->sample_fmt;
+ choose_sample_fmt(ost->st, ost->enc);
if (!codec->channels) {
codec->channels = icodec->channels;
codec->channel_layout = icodec->channel_layout;
if (pkt.pts != AV_NOPTS_VALUE)
pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
- if (pkt.stream_index < nb_input_files_ts_scale[file_index]
- && input_files_ts_scale[file_index][pkt.stream_index]){
+ if (ist->ts_scale) {
if(pkt.pts != AV_NOPTS_VALUE)
- pkt.pts *= input_files_ts_scale[file_index][pkt.stream_index];
+ pkt.pts *= ist->ts_scale;
if(pkt.dts != AV_NOPTS_VALUE)
- pkt.dts *= input_files_ts_scale[file_index][pkt.stream_index];
+ pkt.dts *= ist->ts_scale;
}
// fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n", ist->next_pts, pkt.dts, input_files[ist->file_index].ts_offset, ist->st->codec->codec_type);
if(stream >= MAX_STREAMS)
ffmpeg_exit(1);
- input_files_ts_scale[nb_input_files] = grow_array(input_files_ts_scale[nb_input_files], sizeof(*input_files_ts_scale[nb_input_files]), &nb_input_files_ts_scale[nb_input_files], stream + 1);
- input_files_ts_scale[nb_input_files][stream]= scale;
+ ts_scale = grow_array(ts_scale, sizeof(*ts_scale), &nb_ts_scale, stream + 1);
+ ts_scale[stream] = scale;
return 0;
}
static int opt_recording_timestamp(const char *opt, const char *arg)
{
- recording_timestamp = parse_time_or_die(opt, arg, 0) / 1000000;
+ char buf[128];
+ int64_t recording_timestamp = parse_time_or_die(opt, arg, 0) / 1E6;
+ struct tm time = *gmtime((time_t*)&recording_timestamp);
+ strftime(buf, sizeof(buf), "creation_time=%FT%T%z", &time);
+ opt_metadata("metadata", buf);
+
+ av_log(NULL, AV_LOG_WARNING, "%s is deprecated, set the 'creation_time' metadata "
+ "tag instead.\n", opt);
return 0;
}
ist->file_index = nb_input_files;
ist->discard = 1;
+ if (i < nb_ts_scale)
+ ist->ts_scale = ts_scale[i];
+
switch (dec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
ist->dec = avcodec_find_decoder_by_name(audio_codec_name);
if(!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
set_context_opts(dec, avcodec_opts[AVMEDIA_TYPE_AUDIO], AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_DECODING_PARAM, ist->dec);
- channel_layout = dec->channel_layout;
- audio_sample_fmt = dec->sample_fmt;
if(audio_disable)
st->discard= AVDISCARD_ALL;
break;
frame_width = 0;
audio_sample_rate = 0;
audio_channels = 0;
+ audio_sample_fmt = AV_SAMPLE_FMT_NONE;
+ av_freep(&ts_scale);
+ nb_ts_scale = 0;
av_freep(&video_codec_name);
av_freep(&audio_codec_name);
enum CodecID codec_id = CODEC_ID_NONE;
AVCodec *codec= NULL;
- st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- ffmpeg_exit(1);
- }
- ost = new_output_stream(oc, file_idx);
-
if(!video_stream_copy){
if (video_codec_name) {
codec_id = find_codec_or_die(video_codec_name, AVMEDIA_TYPE_VIDEO, 1,
avcodec_opts[AVMEDIA_TYPE_VIDEO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(video_codec_name);
- ost->enc = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
codec = avcodec_find_encoder(codec_id);
}
+ }
+
+ ost = new_output_stream(oc, file_idx, codec);
+ st = ost->st;
+ if (!video_stream_copy) {
ost->frame_aspect_ratio = frame_aspect_ratio;
frame_aspect_ratio = 0;
#if CONFIG_AVFILTER
#endif
}
- avcodec_get_context_defaults3(st->codec, codec);
ost->bitstream_filters = video_bitstream_filters;
video_bitstream_filters= NULL;
AVCodecContext *audio_enc;
enum CodecID codec_id = CODEC_ID_NONE;
- st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- ffmpeg_exit(1);
- }
- ost = new_output_stream(oc, file_idx);
-
if(!audio_stream_copy){
if (audio_codec_name) {
codec_id = find_codec_or_die(audio_codec_name, AVMEDIA_TYPE_AUDIO, 1,
avcodec_opts[AVMEDIA_TYPE_AUDIO]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(audio_codec_name);
- ost->enc = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_AUDIO);
codec = avcodec_find_encoder(codec_id);
}
}
-
- avcodec_get_context_defaults3(st->codec, codec);
+ ost = new_output_stream(oc, file_idx, codec);
+ st = ost->st;
ost->bitstream_filters = audio_bitstream_filters;
audio_bitstream_filters= NULL;
}
if (audio_channels)
audio_enc->channels = audio_channels;
- audio_enc->sample_fmt = audio_sample_fmt;
+ if (audio_sample_fmt != AV_SAMPLE_FMT_NONE)
+ audio_enc->sample_fmt = audio_sample_fmt;
if (audio_sample_rate)
audio_enc->sample_rate = audio_sample_rate;
- audio_enc->channel_layout = channel_layout;
- choose_sample_fmt(st, codec);
}
if (audio_language) {
av_dict_set(&st->metadata, "language", audio_language, 0);
static void new_data_stream(AVFormatContext *oc, int file_idx)
{
AVStream *st;
- AVCodec *codec=NULL;
+ OutputStream *ost;
AVCodecContext *data_enc;
- st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- ffmpeg_exit(1);
- }
- new_output_stream(oc, file_idx);
+ ost = new_output_stream(oc, file_idx, NULL);
+ st = ost->st;
data_enc = st->codec;
if (!data_stream_copy) {
fprintf(stderr, "Data stream encoding not supported yet (only streamcopy)\n");
ffmpeg_exit(1);
}
- avcodec_get_context_defaults3(st->codec, codec);
data_enc->codec_type = AVMEDIA_TYPE_DATA;
AVCodecContext *subtitle_enc;
enum CodecID codec_id = CODEC_ID_NONE;
- st = av_new_stream(oc, oc->nb_streams < nb_streamid_map ? streamid_map[oc->nb_streams] : 0);
- if (!st) {
- fprintf(stderr, "Could not alloc stream\n");
- ffmpeg_exit(1);
- }
- ost = new_output_stream(oc, file_idx);
- subtitle_enc = st->codec;
if(!subtitle_stream_copy){
if (subtitle_codec_name) {
codec_id = find_codec_or_die(subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 1,
avcodec_opts[AVMEDIA_TYPE_SUBTITLE]->strict_std_compliance);
codec = avcodec_find_encoder_by_name(subtitle_codec_name);
- ost->enc = codec;
} else {
codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_SUBTITLE);
codec = avcodec_find_encoder(codec_id);
}
}
- avcodec_get_context_defaults3(st->codec, codec);
+ ost = new_output_stream(oc, file_idx, codec);
+ st = ost->st;
+ subtitle_enc = st->codec;
ost->bitstream_filters = subtitle_bitstream_filters;
subtitle_bitstream_filters= NULL;
if (use_subtitle) new_subtitle_stream(oc, nb_output_files);
if (use_data) new_data_stream(oc, nb_output_files);
- oc->timestamp = recording_timestamp;
-
av_dict_copy(&oc->metadata, metadata, 0);
av_dict_free(&metadata);
}
frame_height = 0;
audio_sample_rate = 0;
audio_channels = 0;
+ audio_sample_fmt = AV_SAMPLE_FMT_NONE;
av_freep(&forced_key_frames);
uninit_opts();
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
{
- int len1 av_unused, got_picture, i;
+ int got_picture, i;
if (packet_queue_get(&is->videoq, pkt, 1) < 0)
return -1;
return 0;
}
- len1 = avcodec_decode_video2(is->video_st->codec,
- frame, &got_picture,
- pkt);
+ avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
if (got_picture) {
if (decoder_reorder_pts == -1) {
VideoState *is = arg;
SubPicture *sp;
AVPacket pkt1, *pkt = &pkt1;
- int len1 av_unused, got_subtitle;
+ int got_subtitle;
double pts;
int i, j;
int r, g, b, y, u, v, a;
if (pkt->pts != AV_NOPTS_VALUE)
pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
- len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
- &sp->sub, &got_subtitle,
- pkt);
+ avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
+ &got_subtitle, pkt);
+
if (got_subtitle && sp->sub.format == 0) {
sp->pts = pts;
#include "avcodec.h"
#include "dsputil.h"
#include "mpegvideo.h"
+#include "mpegvideo_common.h"
#include "dnxhdenc.h"
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
};
static const AVClass class = { "dnxhd", av_default_item_name, options, LIBAVUTIL_VERSION_INT };
-int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
-
#define LAMBDA_FRAC_BITS 10
static av_always_inline void dnxhd_get_pixels_8x4(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
return h->pps.chroma_qp_table[t][qscale];
}
-static av_always_inline void pred_pskip_motion(H264Context * const h);
-
static void fill_decode_neighbors(H264Context *h, int mb_type){
MpegEncContext * const s = &h->s;
const int mb_xy= h->mb_xy;
return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
}
-/**
- * decodes a P_SKIP or B_SKIP macroblock
- */
-static void av_unused decode_mb_skip(H264Context *h){
- MpegEncContext * const s = &h->s;
- const int mb_xy= h->mb_xy;
- int mb_type=0;
-
- memset(h->non_zero_count[mb_xy], 0, 48);
-
- if(MB_FIELD)
- mb_type|= MB_TYPE_INTERLACED;
-
- if( h->slice_type_nos == AV_PICTURE_TYPE_B )
- {
- // just for fill_caches. pred_direct_motion will set the real mb_type
- mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
- if(h->direct_spatial_mv_pred){
- fill_decode_neighbors(h, mb_type);
- fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
- }
- ff_h264_pred_direct_motion(h, &mb_type);
- mb_type|= MB_TYPE_SKIP;
- }
- else
- {
- mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
-
- fill_decode_neighbors(h, mb_type);
- pred_pskip_motion(h);
- }
-
- write_back_motion(h, mb_type);
- s->current_picture.f.mb_type[mb_xy] = mb_type;
- s->current_picture.f.qscale_table[mb_xy] = s->qscale;
- h->slice_table[ mb_xy ]= h->slice_num;
- h->prev_mb_skipped= 1;
-}
-
-#include "h264_mvpred.h" //For pred_pskip_motion()
-
#endif /* AVCODEC_H264_H */
return;
}
+/**
+ * decodes a P_SKIP or B_SKIP macroblock
+ */
+static void av_unused decode_mb_skip(H264Context *h){
+ MpegEncContext * const s = &h->s;
+ const int mb_xy= h->mb_xy;
+ int mb_type=0;
+
+ memset(h->non_zero_count[mb_xy], 0, 48);
+
+ if(MB_FIELD)
+ mb_type|= MB_TYPE_INTERLACED;
+
+ if( h->slice_type_nos == AV_PICTURE_TYPE_B )
+ {
+ // just for fill_caches. pred_direct_motion will set the real mb_type
+ mb_type|= MB_TYPE_L0L1|MB_TYPE_DIRECT2|MB_TYPE_SKIP;
+ if(h->direct_spatial_mv_pred){
+ fill_decode_neighbors(h, mb_type);
+ fill_decode_caches(h, mb_type); //FIXME check what is needed and what not ...
+ }
+ ff_h264_pred_direct_motion(h, &mb_type);
+ mb_type|= MB_TYPE_SKIP;
+ }
+ else
+ {
+ mb_type|= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P1L0|MB_TYPE_SKIP;
+
+ fill_decode_neighbors(h, mb_type);
+ pred_pskip_motion(h);
+ }
+
+ write_back_motion(h, mb_type);
+ s->current_picture.f.mb_type[mb_xy] = mb_type;
+ s->current_picture.f.qscale_table[mb_xy] = s->qscale;
+ h->slice_table[ mb_xy ]= h->slice_num;
+ h->prev_mb_skipped= 1;
+}
+
#endif /* AVCODEC_H264_MVPRED_H */
AVStream **streams;
char filename[1024]; /**< input or output filename */
/* stream info */
- int64_t timestamp;
+#if FF_API_TIMESTAMP
+ /**
+ * @deprecated use 'creation_time' metadata tag instead
+ */
+ attribute_deprecated int64_t timestamp;
+#endif
int ctx_flags; /**< Format-specific flags, see AVFMTCTX_xx */
/* private data for pts handling (do not modify directly). */
AVStream *ast[2]; /* stereo audio streams */
AVFifoBuffer *audio_data[2]; /* FIFO for storing excessive amounts of PCM */
int frames; /* current frame number */
- time_t start_time; /* recording start time */
+ int64_t start_time; /* recording start time */
int has_audio; /* frame under contruction has audio */
int has_video; /* frame under contruction has video */
uint8_t frame_buf[DV_MAX_FRAME_SIZE]; /* frame under contruction */
{
DVMuxContext *c = s->priv_data;
AVStream *vst = NULL;
+ AVDictionaryEntry *t;
int i;
/* we support at most 1 video and 2 audio streams */
c->frames = 0;
c->has_audio = 0;
c->has_video = 0;
- c->start_time = (time_t)s->timestamp;
+#if FF_API_TIMESTAMP
+ if (s->timestamp)
+ c->start_time = s->timestamp;
+ else
+#endif
+ if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
+ struct tm time = {0};
+ strptime(t->value, "%Y - %m - %dT%T", &time);
+ c->start_time = mktime(&time);
+ }
for (i=0; i < c->n_ast; i++) {
if (c->ast[i] && !(c->audio_data[i]=av_fifo_alloc(100*AVCODEC_MAX_AUDIO_FRAME_SIZE))) {
GXFContext *gxf = s->priv_data;
AVIOContext *pb = s->pb;
int timecode_base = gxf->time_base.den == 60000 ? 60 : 50;
+ int64_t timestamp = 0;
+ AVDictionaryEntry *t;
+
+#if FF_API_TIMESTAMP
+ if (s->timestamp)
+ timestamp = s->timestamp;
+ else
+#endif
+ if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
+ struct tm time = {0};
+ strptime(t->value, "%Y - %m - %dT%T", &time);
+ timestamp = mktime(&time);
+ }
+
// XXX drop frame
uint32_t timecode =
avio_wl32(pb, gxf->nb_fields); /* mark out */
avio_wl32(pb, 0); /* timecode mark in */
avio_wl32(pb, timecode); /* timecode mark out */
- avio_wl64(pb, s->timestamp); /* modification time */
- avio_wl64(pb, s->timestamp); /* creation time */
+ avio_wl64(pb, timestamp); /* modification time */
+ avio_wl64(pb, timestamp); /* creation time */
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, 0); /* reserved */
avio_wl16(pb, gxf->audio_tracks);
mkv_write_trailer,
.flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
.codec_tag = (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0},
- .subtitle_codec = CODEC_ID_TEXT,
+ .subtitle_codec = CODEC_ID_SSA,
};
#endif
{
AVIOContext *pb = s->pb;
MOVMuxContext *mov = s->priv_data;
+ AVDictionaryEntry *t;
int i, hint_track = 0;
if (!s->pb->seekable) {
}
mov_write_mdat_tag(pb, mov);
- mov->time = s->timestamp + 0x7C25B080; //1970 based -> 1904 based
+
+#if FF_API_TIMESTAMP
+ if (s->timestamp)
+ mov->time = s->timestamp;
+ else
+#endif
+ if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
+ struct tm time = {0};
+ strptime(t->value, "%Y - %m - %dT%T", &time);
+ mov->time = mktime(&time);
+ }
+ mov->time += 0x7C25B080; //1970 based -> 1904 based
if (mov->chapter_track)
mov_create_chapter_track(s, mov->chapter_track);
int i;
uint8_t present[FF_ARRAY_ELEMS(mxf_essence_container_uls)] = {0};
const int *samples_per_frame = NULL;
+ AVDictionaryEntry *t;
+ int64_t timestamp = 0;
if (!s->nb_streams)
return -1;
sc->order = AV_RB32(sc->track_essence_element_key+12);
}
+#if FF_API_TIMESTAMP
if (s->timestamp)
- mxf->timestamp = mxf_parse_timestamp(s->timestamp);
+ timestamp = s->timestamp;
+ else
+#endif
+ if (t = av_dict_get(s->metadata, "creation_time", NULL, 0)) {
+ struct tm time = {0};
+ strptime(t->value, "%Y - %m - %dT%T", &time);
+ timestamp = mktime(&time);
+ }
+ if (timestamp)
+ mxf->timestamp = mxf_parse_timestamp(timestamp);
mxf->duration = -1;
mxf->timecode_track = av_mallocz(sizeof(*mxf->timecode_track));
int64_t *pos_arg, int64_t pos_limit)
{
struct ogg *ogg = s->priv_data;
- struct ogg_stream *os = ogg->streams + stream_index;
AVIOContext *bc = s->pb;
int64_t pts = AV_NOPTS_VALUE;
- int i;
+ int i = -1;
avio_seek(bc, *pos_arg, SEEK_SET);
ogg_reset(ogg);
while (avio_tell(bc) < pos_limit && !ogg_packet(s, &i, NULL, NULL, pos_arg)) {
if (i == stream_index) {
+ struct ogg_stream *os = ogg->streams + stream_index;
pts = ogg_calc_pts(s, i, NULL);
if (os->keyframe_seek && !(os->pflags & AV_PKT_FLAG_KEY))
pts = AV_NOPTS_VALUE;
os->keyframe_seek = 1;
ret = av_seek_frame_binary(s, stream_index, timestamp, flags);
+ os = ogg->streams + stream_index;
if (ret < 0)
os->keyframe_seek = 0;
return ret;
#ifndef FF_API_LOOP_OUTPUT
#define FF_API_LOOP_OUTPUT (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
+#ifndef FF_API_TIMESTAMP
+#define FF_API_TIMESTAMP (LIBAVFORMAT_VERSION_MAJOR < 54)
+#endif
#endif /* AVFORMAT_VERSION_H */
.log2_chroma_w= 0,
.log2_chroma_h= 0,
.comp = {
- {0,1,1,0,9}, /* Y */
- {1,1,1,0,9}, /* U */
- {2,1,1,0,9}, /* V */
+ {0,1,1,0,8}, /* Y */
+ {1,1,1,0,8}, /* U */
+ {2,1,1,0,8}, /* V */
},
.flags = PIX_FMT_BE,
},