tools/sofa2wavs: fix build on Windows
[ffmpeg.git] / libavformat / mux.c
1 /*
2 * muxing functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include "avformat.h"
23 #include "avformat_internal.h"
24 #include "internal.h"
25 #include "mux.h"
26 #include "version.h"
27 #include "libavcodec/bsf.h"
28 #include "libavcodec/codec_desc.h"
29 #include "libavcodec/internal.h"
30 #include "libavcodec/packet_internal.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/dict.h"
34 #include "libavutil/timestamp.h"
35 #include "libavutil/avassert.h"
36 #include "libavutil/frame.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/mathematics.h"
39
40 /**
41 * @file
42 * muxing functions for use within libavformat
43 */
44
45 /* fraction handling */
46
47 /**
48 * f = val + (num / den) + 0.5.
49 *
50 * 'num' is normalized so that it is such as 0 <= num < den.
51 *
52 * @param f fractional number
53 * @param val integer value
54 * @param num must be >= 0
55 * @param den must be >= 1
56 */
57 static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
58 {
59 num += (den >> 1);
60 if (num >= den) {
61 val += num / den;
62 num = num % den;
63 }
64 f->val = val;
65 f->num = num;
66 f->den = den;
67 }
68
69 /**
70 * Fractional addition to f: f = f + (incr / f->den).
71 *
72 * @param f fractional number
73 * @param incr increment, can be positive or negative
74 */
75 static void frac_add(FFFrac *f, int64_t incr)
76 {
77 int64_t num, den;
78
79 num = f->num + incr;
80 den = f->den;
81 if (num < 0) {
82 f->val += num / den;
83 num = num % den;
84 if (num < 0) {
85 num += den;
86 f->val--;
87 }
88 } else if (num >= den) {
89 f->val += num / den;
90 num = num % den;
91 }
92 f->num = num;
93 }
94
95 int avformat_alloc_output_context2(AVFormatContext **avctx, const AVOutputFormat *oformat,
96 const char *format, const char *filename)
97 {
98 AVFormatContext *s = avformat_alloc_context();
99 int ret = 0;
100
101 *avctx = NULL;
102 if (!s)
103 goto nomem;
104
105 if (!oformat) {
106 if (format) {
107 oformat = av_guess_format(format, NULL, NULL);
108 if (!oformat) {
109 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not known.\n", format);
110 ret = AVERROR(EINVAL);
111 goto error;
112 }
113 } else {
114 oformat = av_guess_format(NULL, filename, NULL);
115 if (!oformat) {
116 ret = AVERROR(EINVAL);
117 av_log(s, AV_LOG_ERROR,
118 "Unable to choose an output format for '%s'; "
119 "use a standard extension for the filename or specify "
120 "the format manually.\n", filename);
121 goto error;
122 }
123 }
124 }
125
126 s->oformat = oformat;
127 if (ffofmt(s->oformat)->priv_data_size > 0) {
128 s->priv_data = av_mallocz(ffofmt(s->oformat)->priv_data_size);
129 if (!s->priv_data)
130 goto nomem;
131 if (s->oformat->priv_class) {
132 *(const AVClass**)s->priv_data= s->oformat->priv_class;
133 av_opt_set_defaults(s->priv_data);
134 }
135 } else
136 s->priv_data = NULL;
137
138 if (filename) {
139 if (!(s->url = av_strdup(filename)))
140 goto nomem;
141
142 }
143 *avctx = s;
144 return 0;
145 nomem:
146 av_log(s, AV_LOG_ERROR, "Out of memory\n");
147 ret = AVERROR(ENOMEM);
148 error:
149 avformat_free_context(s);
150 return ret;
151 }
152
153 static int validate_codec_tag(const AVFormatContext *s, const AVStream *st)
154 {
155 const AVCodecTag *avctag;
156 enum AVCodecID id = AV_CODEC_ID_NONE;
157 unsigned uppercase_tag = ff_toupper4(st->codecpar->codec_tag);
158 int64_t tag = -1;
159
160 /**
161 * Check that tag + id is in the table
162 * If neither is in the table -> OK
163 * If tag is in the table with another id -> FAIL
164 * If id is in the table with another tag -> FAIL unless strict < normal
165 */
166 for (int n = 0; s->oformat->codec_tag[n]; n++) {
167 avctag = s->oformat->codec_tag[n];
168 while (avctag->id != AV_CODEC_ID_NONE) {
169 if (ff_toupper4(avctag->tag) == uppercase_tag) {
170 id = avctag->id;
171 if (id == st->codecpar->codec_id)
172 return 1;
173 }
174 if (avctag->id == st->codecpar->codec_id)
175 tag = avctag->tag;
176 avctag++;
177 }
178 }
179 if (id != AV_CODEC_ID_NONE)
180 return 0;
181 if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
182 return 0;
183 return 1;
184 }
185
186
187 static int init_muxer(AVFormatContext *s, AVDictionary **options)
188 {
189 FormatContextInternal *const fci = ff_fc_internal(s);
190 AVDictionary *tmp = NULL;
191 const FFOutputFormat *of = ffofmt(s->oformat);
192 AVDictionaryEntry *e;
193 static const unsigned default_codec_offsets[] = {
194 [AVMEDIA_TYPE_VIDEO] = offsetof(AVOutputFormat, video_codec),
195 [AVMEDIA_TYPE_AUDIO] = offsetof(AVOutputFormat, audio_codec),
196 [AVMEDIA_TYPE_SUBTITLE] = offsetof(AVOutputFormat, subtitle_codec),
197 };
198 unsigned nb_type[FF_ARRAY_ELEMS(default_codec_offsets)] = { 0 };
199 int ret = 0;
200
201 if (options)
202 av_dict_copy(&tmp, *options, 0);
203
204 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
205 goto fail;
206 if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
207 (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
208 goto fail;
209
210 if (!s->url && !(s->url = av_strdup(""))) {
211 ret = AVERROR(ENOMEM);
212 goto fail;
213 }
214
215 // some sanity checks
216 if (s->nb_streams == 0 && !(of->p.flags & AVFMT_NOSTREAMS)) {
217 av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
218 ret = AVERROR(EINVAL);
219 goto fail;
220 }
221
222 for (unsigned i = 0; i < s->nb_streams; i++) {
223 AVStream *const st = s->streams[i];
224 FFStream *const sti = ffstream(st);
225 AVCodecParameters *const par = st->codecpar;
226 const AVCodecDescriptor *desc;
227
228 if (!st->time_base.num) {
229 /* fall back on the default timebase values */
230 if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
231 avpriv_set_pts_info(st, 64, 1, par->sample_rate);
232 else
233 avpriv_set_pts_info(st, 33, 1, 90000);
234 }
235
236 switch (par->codec_type) {
237 case AVMEDIA_TYPE_AUDIO:
238 if (par->sample_rate <= 0) {
239 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
240 ret = AVERROR(EINVAL);
241 goto fail;
242 }
243
244 if (!par->block_align)
245 par->block_align = par->ch_layout.nb_channels *
246 av_get_bits_per_sample(par->codec_id) >> 3;
247 break;
248 case AVMEDIA_TYPE_VIDEO:
249 if ((par->width <= 0 || par->height <= 0) &&
250 !(of->p.flags & AVFMT_NODIMENSIONS)) {
251 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
252 ret = AVERROR(EINVAL);
253 goto fail;
254 }
255 if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
256 && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
257 ) {
258 if (st->sample_aspect_ratio.num != 0 &&
259 st->sample_aspect_ratio.den != 0 &&
260 par->sample_aspect_ratio.num != 0 &&
261 par->sample_aspect_ratio.den != 0) {
262 av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
263 "(%d/%d) and encoder layer (%d/%d)\n",
264 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
265 par->sample_aspect_ratio.num,
266 par->sample_aspect_ratio.den);
267 ret = AVERROR(EINVAL);
268 goto fail;
269 }
270 }
271 break;
272 }
273 if (of->flags_internal & (FF_OFMT_FLAG_MAX_ONE_OF_EACH | FF_OFMT_FLAG_ONLY_DEFAULT_CODECS)) {
274 enum AVCodecID default_codec_id = AV_CODEC_ID_NONE;
275 unsigned nb;
276 if ((unsigned)par->codec_type < FF_ARRAY_ELEMS(default_codec_offsets)) {
277 nb = ++nb_type[par->codec_type];
278 if (default_codec_offsets[par->codec_type])
279 default_codec_id = *(const enum AVCodecID*)((const char*)of + default_codec_offsets[par->codec_type]);
280 }
281 if (of->flags_internal & FF_OFMT_FLAG_ONLY_DEFAULT_CODECS &&
282 default_codec_id != AV_CODEC_ID_NONE && par->codec_id != default_codec_id) {
283 av_log(s, AV_LOG_ERROR, "%s muxer supports only codec %s for type %s\n",
284 of->p.name, avcodec_get_name(default_codec_id), av_get_media_type_string(par->codec_type));
285 ret = AVERROR(EINVAL);
286 goto fail;
287 } else if (default_codec_id == AV_CODEC_ID_NONE ||
288 (of->flags_internal & FF_OFMT_FLAG_MAX_ONE_OF_EACH && nb > 1)) {
289 const char *type = av_get_media_type_string(par->codec_type);
290 av_log(s, AV_LOG_ERROR, "%s muxer does not support %s stream of type %s\n",
291 of->p.name, default_codec_id == AV_CODEC_ID_NONE ? "any" : "more than one",
292 type ? type : "unknown");
293 ret = AVERROR(EINVAL);
294 goto fail;
295 }
296 }
297
298 desc = avcodec_descriptor_get(par->codec_id);
299 if (desc && desc->props & AV_CODEC_PROP_REORDER)
300 sti->reorder = 1;
301
302 sti->is_intra_only = ff_is_intra_only(par->codec_id);
303
304 if (of->p.codec_tag) {
305 if ( par->codec_tag
306 && par->codec_id == AV_CODEC_ID_RAWVIDEO
307 && ( av_codec_get_tag(of->p.codec_tag, par->codec_id) == 0
308 || av_codec_get_tag(of->p.codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
309 && !validate_codec_tag(s, st)) {
310 // the current rawvideo encoding system ends up setting
311 // the wrong codec_tag for avi/mov, we override it here
312 par->codec_tag = 0;
313 }
314 if (par->codec_tag) {
315 if (!validate_codec_tag(s, st)) {
316 const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
317 av_log(s, AV_LOG_ERROR,
318 "Tag %s incompatible with output codec id '%d' (%s)\n",
319 av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
320 ret = AVERROR_INVALIDDATA;
321 goto fail;
322 }
323 } else
324 par->codec_tag = av_codec_get_tag(of->p.codec_tag, par->codec_id);
325 }
326
327 if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
328 par->codec_id != AV_CODEC_ID_SMPTE_2038)
329 fci->nb_interleaved_streams++;
330 }
331 fci->interleave_packet = of->interleave_packet;
332 if (!fci->interleave_packet)
333 fci->interleave_packet = fci->nb_interleaved_streams > 1 ?
334 ff_interleave_packet_per_dts :
335 ff_interleave_packet_passthrough;
336
337 if (!s->priv_data && of->priv_data_size > 0) {
338 s->priv_data = av_mallocz(of->priv_data_size);
339 if (!s->priv_data) {
340 ret = AVERROR(ENOMEM);
341 goto fail;
342 }
343 if (of->p.priv_class) {
344 *(const AVClass **)s->priv_data = of->p.priv_class;
345 av_opt_set_defaults(s->priv_data);
346 if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
347 goto fail;
348 }
349 }
350
351 /* set muxer identification string */
352 if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
353 av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
354 } else {
355 av_dict_set(&s->metadata, "encoder", NULL, 0);
356 }
357
358 for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
359 av_dict_set(&s->metadata, e->key, NULL, 0);
360 }
361
362 if (options) {
363 av_dict_free(options);
364 *options = tmp;
365 }
366
367 if (of->init) {
368 if ((ret = of->init(s)) < 0) {
369 if (of->deinit)
370 of->deinit(s);
371 return ret;
372 }
373 return ret == 0;
374 }
375
376 return 0;
377
378 fail:
379 av_dict_free(&tmp);
380 return ret;
381 }
382
383 static int init_pts(AVFormatContext *s)
384 {
385 FFFormatContext *const si = ffformatcontext(s);
386
387 /* init PTS generation */
388 for (unsigned i = 0; i < s->nb_streams; i++) {
389 AVStream *const st = s->streams[i];
390 FFStream *const sti = ffstream(st);
391 int64_t den = AV_NOPTS_VALUE;
392
393 switch (st->codecpar->codec_type) {
394 case AVMEDIA_TYPE_AUDIO:
395 den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
396 break;
397 case AVMEDIA_TYPE_VIDEO:
398 den = (int64_t)st->time_base.num * st->time_base.den;
399 break;
400 default:
401 break;
402 }
403
404 if (den != AV_NOPTS_VALUE) {
405 if (den <= 0)
406 return AVERROR_INVALIDDATA;
407
408 frac_init(&sti->priv_pts, 0, 0, den);
409 }
410 }
411
412 si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_UNKNOWN;
413 if (s->avoid_negative_ts < 0) {
414 av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
415 if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
416 s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_DISABLED;
417 si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_DISABLED;
418 } else
419 s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
420 } else if (s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_DISABLED)
421 si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_DISABLED;
422
423 return 0;
424 }
425
426 static void flush_if_needed(AVFormatContext *s)
427 {
428 if (s->pb && s->pb->error >= 0) {
429 if (s->flush_packets == 1 || s->flags & AVFMT_FLAG_FLUSH_PACKETS)
430 avio_flush(s->pb);
431 else if (s->flush_packets && !(s->oformat->flags & AVFMT_NOFILE))
432 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT);
433 }
434 }
435
436 static void deinit_muxer(AVFormatContext *s)
437 {
438 FormatContextInternal *const fci = ff_fc_internal(s);
439 const FFOutputFormat *const of = ffofmt(s->oformat);
440 if (of && of->deinit && fci->initialized)
441 of->deinit(s);
442 fci->initialized =
443 fci->streams_initialized = 0;
444 }
445
446 int avformat_init_output(AVFormatContext *s, AVDictionary **options)
447 {
448 FormatContextInternal *const fci = ff_fc_internal(s);
449 int ret = 0;
450
451 if ((ret = init_muxer(s, options)) < 0)
452 return ret;
453
454 fci->initialized = 1;
455 fci->streams_initialized = ret;
456
457 if (ffofmt(s->oformat)->init && ret) {
458 if ((ret = init_pts(s)) < 0)
459 return ret;
460
461 return AVSTREAM_INIT_IN_INIT_OUTPUT;
462 }
463
464 return AVSTREAM_INIT_IN_WRITE_HEADER;
465 }
466
467 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
468 {
469 FormatContextInternal *const fci = ff_fc_internal(s);
470 int already_initialized = fci->initialized;
471 int streams_already_initialized = fci->streams_initialized;
472 int ret = 0;
473
474 if (!already_initialized)
475 if ((ret = avformat_init_output(s, options)) < 0)
476 return ret;
477
478 if (ffofmt(s->oformat)->write_header) {
479 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
480 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
481 ret = ffofmt(s->oformat)->write_header(s);
482 if (ret >= 0 && s->pb && s->pb->error < 0)
483 ret = s->pb->error;
484 if (ret < 0)
485 goto fail;
486 flush_if_needed(s);
487 }
488 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
489 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
490
491 if (!fci->streams_initialized) {
492 if ((ret = init_pts(s)) < 0)
493 goto fail;
494 }
495
496 return streams_already_initialized;
497
498 fail:
499 deinit_muxer(s);
500 return ret;
501 }
502
503 #define AV_PKT_FLAG_UNCODED_FRAME 0x2000
504
505
506 #if FF_API_COMPUTE_PKT_FIELDS2
507 FF_DISABLE_DEPRECATION_WARNINGS
508 //FIXME merge with compute_pkt_fields
509 static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
510 {
511 FormatContextInternal *const fci = ff_fc_internal(s);
512 FFStream *const sti = ffstream(st);
513 int delay = st->codecpar->video_delay;
514 int frame_size;
515
516 if (!fci->missing_ts_warning &&
517 !(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
518 (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
519 (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
520 av_log(s, AV_LOG_WARNING,
521 "Timestamps are unset in a packet for stream %d. "
522 "This is deprecated and will stop working in the future. "
523 "Fix your code to set the timestamps properly\n", st->index);
524 fci->missing_ts_warning = 1;
525 }
526
527 if (s->debug & FF_FDEBUG_TS)
528 av_log(s, AV_LOG_DEBUG, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
529 av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(sti->cur_dts), delay, pkt->size, pkt->stream_index);
530
531 if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
532 pkt->pts = pkt->dts;
533
534 //XXX/FIXME this is a temporary hack until all encoders output pts
535 if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
536 static int warned;
537 if (!warned) {
538 av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
539 warned = 1;
540 }
541 pkt->dts =
542 // pkt->pts= st->cur_dts;
543 pkt->pts = sti->priv_pts.val;
544 }
545
546 //calculate dts from pts
547 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
548 sti->pts_buffer[0] = pkt->pts;
549 for (int i = 1; i < delay + 1 && sti->pts_buffer[i] == AV_NOPTS_VALUE; i++)
550 sti->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
551 for (int i = 0; i<delay && sti->pts_buffer[i] > sti->pts_buffer[i + 1]; i++)
552 FFSWAP(int64_t, sti->pts_buffer[i], sti->pts_buffer[i + 1]);
553
554 pkt->dts = sti->pts_buffer[0];
555 }
556
557 if (sti->cur_dts && sti->cur_dts != AV_NOPTS_VALUE &&
558 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
559 st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
560 st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
561 sti->cur_dts >= pkt->dts) || sti->cur_dts > pkt->dts)) {
562 av_log(s, AV_LOG_ERROR,
563 "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
564 st->index, av_ts2str(sti->cur_dts), av_ts2str(pkt->dts));
565 return AVERROR(EINVAL);
566 }
567 if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
568 av_log(s, AV_LOG_ERROR,
569 "pts (%s) < dts (%s) in stream %d\n",
570 av_ts2str(pkt->pts), av_ts2str(pkt->dts),
571 st->index);
572 return AVERROR(EINVAL);
573 }
574
575 if (s->debug & FF_FDEBUG_TS)
576 av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n",
577 av_ts2str(pkt->pts), av_ts2str(pkt->dts));
578
579 sti->cur_dts = pkt->dts;
580 sti->priv_pts.val = pkt->dts;
581
582 /* update pts */
583 switch (st->codecpar->codec_type) {
584 case AVMEDIA_TYPE_AUDIO:
585 frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
586 (*(AVFrame **)pkt->data)->nb_samples :
587 av_get_audio_frame_duration2(st->codecpar, pkt->size);
588
589 /* HACK/FIXME, we skip the initial 0 size packets as they are most
590 * likely equal to the encoder delay, but it would be better if we
591 * had the real timestamps from the encoder */
592 if (frame_size >= 0 && (pkt->size || sti->priv_pts.num != sti->priv_pts.den >> 1 || sti->priv_pts.val)) {
593 frac_add(&sti->priv_pts, (int64_t)st->time_base.den * frame_size);
594 }
595 break;
596 case AVMEDIA_TYPE_VIDEO:
597 frac_add(&sti->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
598 break;
599 }
600 return 0;
601 }
602 FF_ENABLE_DEPRECATION_WARNINGS
603 #endif
604
605 static void guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
606 {
607 if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
608 av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
609 pkt->duration, pkt->stream_index);
610 pkt->duration = 0;
611 }
612
613 if (pkt->duration)
614 return;
615
616 switch (st->codecpar->codec_type) {
617 case AVMEDIA_TYPE_VIDEO:
618 if (st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0) {
619 pkt->duration = av_rescale_q(1, av_inv_q(st->avg_frame_rate),
620 st->time_base);
621 } else if (st->time_base.num * 1000LL > st->time_base.den)
622 pkt->duration = 1;
623 break;
624 case AVMEDIA_TYPE_AUDIO: {
625 int frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
626 if (frame_size && st->codecpar->sample_rate) {
627 pkt->duration = av_rescale_q(frame_size,
628 (AVRational){1, st->codecpar->sample_rate},
629 st->time_base);
630 }
631 break;
632 }
633 }
634 }
635
636 static void handle_avoid_negative_ts(FFFormatContext *si, FFStream *sti,
637 AVPacket *pkt)
638 {
639 AVFormatContext *const s = &si->pub;
640 int64_t offset;
641
642 if (!AVOID_NEGATIVE_TS_ENABLED(si->avoid_negative_ts_status))
643 return;
644
645 if (si->avoid_negative_ts_status == AVOID_NEGATIVE_TS_UNKNOWN) {
646 int use_pts = si->avoid_negative_ts_use_pts;
647 int64_t ts = use_pts ? pkt->pts : pkt->dts;
648 AVRational tb = sti->pub.time_base;
649
650 if (ts == AV_NOPTS_VALUE)
651 return;
652
653 ts -= sti->lowest_ts_allowed;
654
655 /* Peek into the muxing queue to improve our estimate
656 * of the lowest timestamp if av_interleaved_write_frame() is used. */
657 for (const PacketListEntry *pktl = si->packet_buffer.head;
658 pktl; pktl = pktl->next) {
659 AVRational cmp_tb = s->streams[pktl->pkt.stream_index]->time_base;
660 int64_t cmp_ts = use_pts ? pktl->pkt.pts : pktl->pkt.dts;
661 if (cmp_ts == AV_NOPTS_VALUE)
662 continue;
663 cmp_ts -= ffstream(s->streams[pktl->pkt.stream_index])->lowest_ts_allowed;
664 if (s->output_ts_offset)
665 cmp_ts += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, cmp_tb);
666 if (av_compare_ts(cmp_ts, cmp_tb, ts, tb) < 0) {
667 ts = cmp_ts;
668 tb = cmp_tb;
669 }
670 }
671
672 if (ts < 0 ||
673 ts > 0 && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
674 for (unsigned i = 0; i < s->nb_streams; i++) {
675 AVStream *const st2 = s->streams[i];
676 FFStream *const sti2 = ffstream(st2);
677 sti2->mux_ts_offset = av_rescale_q_rnd(-ts, tb,
678 st2->time_base,
679 AV_ROUND_UP);
680 }
681 }
682 si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_KNOWN;
683 }
684
685 offset = sti->mux_ts_offset;
686
687 if (pkt->dts != AV_NOPTS_VALUE)
688 pkt->dts += offset;
689 if (pkt->pts != AV_NOPTS_VALUE)
690 pkt->pts += offset;
691
692 if (si->avoid_negative_ts_use_pts) {
693 if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < sti->lowest_ts_allowed) {
694 av_log(s, AV_LOG_WARNING, "failed to avoid negative "
695 "pts %s in stream %d.\n"
696 "Try -avoid_negative_ts 1 as a possible workaround.\n",
697 av_ts2str(pkt->pts),
698 pkt->stream_index
699 );
700 }
701 } else {
702 if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < sti->lowest_ts_allowed) {
703 av_log(s, AV_LOG_WARNING,
704 "Packets poorly interleaved, failed to avoid negative "
705 "timestamp %s in stream %d.\n"
706 "Try -max_interleave_delta 0 as a possible workaround.\n",
707 av_ts2str(pkt->dts),
708 pkt->stream_index
709 );
710 }
711 }
712 }
713
714 /**
715 * Shift timestamps and call muxer; the original pts/dts are not kept.
716 *
717 * FIXME: this function should NEVER get undefined pts/dts beside when the
718 * AVFMT_NOTIMESTAMPS is set.
719 * Those additional safety checks should be dropped once the correct checks
720 * are set in the callers.
721 */
722 static int write_packet(AVFormatContext *s, AVPacket *pkt)
723 {
724 FFFormatContext *const si = ffformatcontext(s);
725 AVStream *const st = s->streams[pkt->stream_index];
726 FFStream *const sti = ffstream(st);
727 int ret;
728
729 // If the timestamp offsetting below is adjusted, adjust
730 // ff_interleaved_peek similarly.
731 if (s->output_ts_offset) {
732 int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
733
734 if (pkt->dts != AV_NOPTS_VALUE)
735 pkt->dts += offset;
736 if (pkt->pts != AV_NOPTS_VALUE)
737 pkt->pts += offset;
738 }
739 handle_avoid_negative_ts(si, sti, pkt);
740
741 if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
742 AVFrame **frame = (AVFrame **)pkt->data;
743 av_assert0(pkt->size == sizeof(*frame));
744 ret = ffofmt(s->oformat)->write_uncoded_frame(s, pkt->stream_index, frame, 0);
745 } else {
746 ret = ffofmt(s->oformat)->write_packet(s, pkt);
747 }
748
749 if (s->pb && ret >= 0) {
750 flush_if_needed(s);
751 if (s->pb->error < 0)
752 ret = s->pb->error;
753 }
754
755 if (ret >= 0)
756 st->nb_frames++;
757
758 return ret;
759 }
760
761 static int check_packet(AVFormatContext *s, AVPacket *pkt)
762 {
763 if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
764 av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
765 pkt->stream_index);
766 return AVERROR(EINVAL);
767 }
768
769 if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
770 av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
771 return AVERROR(EINVAL);
772 }
773
774 return 0;
775 }
776
777 static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
778 {
779 FFStream *const sti = ffstream(st);
780 #if !FF_API_COMPUTE_PKT_FIELDS2
781 /* sanitize the timestamps */
782 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
783
784 /* when there is no reordering (so dts is equal to pts), but
785 * only one of them is set, set the other as well */
786 if (!sti->reorder) {
787 if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
788 pkt->pts = pkt->dts;
789 if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
790 pkt->dts = pkt->pts;
791 }
792
793 /* check that the timestamps are set */
794 if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
795 av_log(s, AV_LOG_ERROR,
796 "Timestamps are unset in a packet for stream %d\n", st->index);
797 return AVERROR(EINVAL);
798 }
799
800 /* check that the dts are increasing (or at least non-decreasing,
801 * if the format allows it */
802 if (sti->cur_dts != AV_NOPTS_VALUE &&
803 ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && sti->cur_dts >= pkt->dts) ||
804 sti->cur_dts > pkt->dts)) {
805 av_log(s, AV_LOG_ERROR,
806 "Application provided invalid, non monotonically increasing "
807 "dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
808 st->index, sti->cur_dts, pkt->dts);
809 return AVERROR(EINVAL);
810 }
811
812 if (pkt->pts < pkt->dts) {
813 av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
814 pkt->pts, pkt->dts, st->index);
815 return AVERROR(EINVAL);
816 }
817 }
818 #endif
819 /* update flags */
820 if (sti->is_intra_only)
821 pkt->flags |= AV_PKT_FLAG_KEY;
822
823 if (!pkt->data && !pkt->side_data_elems) {
824 /* Such empty packets signal EOS for the BSF API; so sanitize
825 * the packet by allocating data of size 0 (+ padding). */
826 av_buffer_unref(&pkt->buf);
827 return av_packet_make_refcounted(pkt);
828 }
829
830 return 0;
831 }
832
833 #define CHUNK_START 0x1000
834
835 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
836 int (*compare)(AVFormatContext *, const AVPacket *, const AVPacket *))
837 {
838 int ret;
839 FFFormatContext *const si = ffformatcontext(s);
840 PacketListEntry **next_point, *this_pktl;
841 AVStream *st = s->streams[pkt->stream_index];
842 FFStream *const sti = ffstream(st);
843 int chunked = s->max_chunk_size || s->max_chunk_duration;
844
845 this_pktl = av_malloc(sizeof(*this_pktl));
846 if (!this_pktl) {
847 av_packet_unref(pkt);
848 return AVERROR(ENOMEM);
849 }
850 if ((ret = av_packet_make_refcounted(pkt)) < 0) {
851 av_free(this_pktl);
852 av_packet_unref(pkt);
853 return ret;
854 }
855
856 av_packet_move_ref(&this_pktl->pkt, pkt);
857 pkt = &this_pktl->pkt;
858
859 if (sti->last_in_packet_buffer) {
860 next_point = &(sti->last_in_packet_buffer->next);
861 } else {
862 next_point = &si->packet_buffer.head;
863 }
864
865 if (chunked) {
866 uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
867 sti->interleaver_chunk_size += pkt->size;
868 sti->interleaver_chunk_duration += pkt->duration;
869 if ( (s->max_chunk_size && sti->interleaver_chunk_size > s->max_chunk_size)
870 || (max && sti->interleaver_chunk_duration > max)) {
871 sti->interleaver_chunk_size = 0;
872 pkt->flags |= CHUNK_START;
873 if (max && sti->interleaver_chunk_duration > max) {
874 int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
875 int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
876
877 sti->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
878 } else
879 sti->interleaver_chunk_duration = 0;
880 }
881 }
882 if (*next_point) {
883 if (chunked && !(pkt->flags & CHUNK_START))
884 goto next_non_null;
885
886 if (compare(s, &si->packet_buffer.tail->pkt, pkt)) {
887 while ( *next_point
888 && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
889 || !compare(s, &(*next_point)->pkt, pkt)))
890 next_point = &(*next_point)->next;
891 if (*next_point)
892 goto next_non_null;
893 } else {
894 next_point = &(si->packet_buffer.tail->next);
895 }
896 }
897 av_assert1(!*next_point);
898
899 si->packet_buffer.tail = this_pktl;
900 next_non_null:
901
902 this_pktl->next = *next_point;
903
904 sti->last_in_packet_buffer = *next_point = this_pktl;
905
906 return 0;
907 }
908
909 static int interleave_compare_dts(AVFormatContext *s, const AVPacket *next,
910 const AVPacket *pkt)
911 {
912 AVStream *st = s->streams[pkt->stream_index];
913 AVStream *st2 = s->streams[next->stream_index];
914 int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
915 st->time_base);
916 if (s->audio_preload) {
917 int preload = st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
918 int preload2 = st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
919 if (preload != preload2) {
920 int64_t ts, ts2;
921 preload *= s->audio_preload;
922 preload2 *= s->audio_preload;
923 ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - preload;
924 ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - preload2;
925 if (ts == ts2) {
926 ts = ((uint64_t)pkt ->dts*st ->time_base.num*AV_TIME_BASE - (uint64_t)preload *st ->time_base.den)*st2->time_base.den
927 - ((uint64_t)next->dts*st2->time_base.num*AV_TIME_BASE - (uint64_t)preload2*st2->time_base.den)*st ->time_base.den;
928 ts2 = 0;
929 }
930 comp = (ts2 > ts) - (ts2 < ts);
931 }
932 }
933
934 if (comp == 0)
935 return pkt->stream_index < next->stream_index;
936 return comp > 0;
937 }
938
939 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *pkt,
940 int flush, int has_packet)
941 {
942 FormatContextInternal *const fci = ff_fc_internal(s);
943 FFFormatContext *const si = &fci->fc;
944 int stream_count = 0;
945 int noninterleaved_count = 0;
946 int ret;
947
948 if (has_packet) {
949 if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
950 return ret;
951 }
952
953 for (unsigned i = 0; i < s->nb_streams; i++) {
954 const AVStream *const st = s->streams[i];
955 const FFStream *const sti = cffstream(st);
956 const AVCodecParameters *const par = st->codecpar;
957 if (sti->last_in_packet_buffer) {
958 ++stream_count;
959 } else if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
960 par->codec_id != AV_CODEC_ID_VP8 &&
961 par->codec_id != AV_CODEC_ID_VP9 &&
962 par->codec_id != AV_CODEC_ID_SMPTE_2038) {
963 ++noninterleaved_count;
964 }
965 }
966
967 if (fci->nb_interleaved_streams == stream_count)
968 flush = 1;
969
970 if (s->max_interleave_delta > 0 &&
971 si->packet_buffer.head &&
972 si->packet_buffer.head->pkt.dts != AV_NOPTS_VALUE &&
973 !flush &&
974 fci->nb_interleaved_streams == stream_count+noninterleaved_count
975 ) {
976 AVPacket *const top_pkt = &si->packet_buffer.head->pkt;
977 int64_t delta_dts = INT64_MIN;
978 int64_t top_dts = av_rescale_q(top_pkt->dts,
979 s->streams[top_pkt->stream_index]->time_base,
980 AV_TIME_BASE_Q);
981
982 for (unsigned i = 0; i < s->nb_streams; i++) {
983 const AVStream *const st = s->streams[i];
984 const FFStream *const sti = cffstream(st);
985 const PacketListEntry *const last = sti->last_in_packet_buffer;
986 int64_t last_dts;
987
988 if (!last || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
989 continue;
990
991 last_dts = av_rescale_q(last->pkt.dts,
992 st->time_base,
993 AV_TIME_BASE_Q);
994 delta_dts = FFMAX(delta_dts, last_dts - top_dts);
995 }
996
997 if (delta_dts > s->max_interleave_delta) {
998 av_log(s, AV_LOG_DEBUG,
999 "Delay between the first packet and last packet in the "
1000 "muxing queue is %"PRId64" > %"PRId64": forcing output\n",
1001 delta_dts, s->max_interleave_delta);
1002 flush = 1;
1003 }
1004 }
1005
1006 if (stream_count && flush) {
1007 PacketListEntry *pktl = si->packet_buffer.head;
1008 AVStream *const st = s->streams[pktl->pkt.stream_index];
1009 FFStream *const sti = ffstream(st);
1010
1011 if (sti->last_in_packet_buffer == pktl)
1012 sti->last_in_packet_buffer = NULL;
1013 avpriv_packet_list_get(&si->packet_buffer, pkt);
1014
1015 return 1;
1016 } else {
1017 return 0;
1018 }
1019 }
1020
1021 int ff_interleave_packet_passthrough(AVFormatContext *s, AVPacket *pkt,
1022 int flush, int has_packet)
1023 {
1024 return has_packet;
1025 }
1026
1027 int ff_get_muxer_ts_offset(AVFormatContext *s, int stream_index, int64_t *offset)
1028 {
1029 AVStream *st;
1030
1031 if (stream_index < 0 || stream_index >= s->nb_streams)
1032 return AVERROR(EINVAL);
1033
1034 st = s->streams[stream_index];
1035 *offset = ffstream(st)->mux_ts_offset;
1036
1037 if (s->output_ts_offset)
1038 *offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
1039
1040 return 0;
1041 }
1042
1043 const AVPacket *ff_interleaved_peek(AVFormatContext *s, int stream)
1044 {
1045 FFFormatContext *const si = ffformatcontext(s);
1046 PacketListEntry *pktl = si->packet_buffer.head;
1047 while (pktl) {
1048 if (pktl->pkt.stream_index == stream) {
1049 return &pktl->pkt;
1050 }
1051 pktl = pktl->next;
1052 }
1053 return NULL;
1054 }
1055
1056 static int check_bitstream(AVFormatContext *s, FFStream *sti, AVPacket *pkt)
1057 {
1058 int ret;
1059
1060 if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
1061 return 1;
1062
1063 if (ffofmt(s->oformat)->check_bitstream) {
1064 if (!sti->bitstream_checked) {
1065 if ((ret = ffofmt(s->oformat)->check_bitstream(s, &sti->pub, pkt)) < 0)
1066 return ret;
1067 else if (ret == 1)
1068 sti->bitstream_checked = 1;
1069 }
1070 }
1071
1072 return 1;
1073 }
1074
1075 static int interleaved_write_packet(AVFormatContext *s, AVPacket *pkt,
1076 int flush, int has_packet)
1077 {
1078 FormatContextInternal *const fci = ff_fc_internal(s);
1079
1080 for (;; ) {
1081 int ret = fci->interleave_packet(s, pkt, flush, has_packet);
1082 if (ret <= 0)
1083 return ret;
1084
1085 has_packet = 0;
1086
1087 ret = write_packet(s, pkt);
1088 av_packet_unref(pkt);
1089 if (ret < 0)
1090 return ret;
1091 }
1092 }
1093
1094 static int write_packet_common(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
1095 {
1096 int ret;
1097
1098 if (s->debug & FF_FDEBUG_TS)
1099 av_log(s, AV_LOG_DEBUG, "%s size:%d dts:%s pts:%s\n", __func__,
1100 pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
1101
1102 guess_pkt_duration(s, st, pkt);
1103
1104 #if FF_API_COMPUTE_PKT_FIELDS2
1105 if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1106 return ret;
1107 #endif
1108
1109 if (interleaved) {
1110 if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1111 return AVERROR(EINVAL);
1112 return interleaved_write_packet(s, pkt, 0, 1);
1113 } else {
1114 return write_packet(s, pkt);
1115 }
1116 }
1117
1118 static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
1119 {
1120 FFStream *const sti = ffstream(st);
1121 AVBSFContext *const bsfc = sti->bsfc;
1122 int ret;
1123
1124 if ((ret = av_bsf_send_packet(bsfc, pkt)) < 0) {
1125 av_log(s, AV_LOG_ERROR,
1126 "Failed to send packet to filter %s for stream %d\n",
1127 bsfc->filter->name, st->index);
1128 return ret;
1129 }
1130
1131 do {
1132 ret = av_bsf_receive_packet(bsfc, pkt);
1133 if (ret < 0) {
1134 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
1135 return 0;
1136 av_log(s, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1137 "packet for stream #%d: %s\n", st->index, av_err2str(ret));
1138 if (!(s->error_recognition & AV_EF_EXPLODE) && ret != AVERROR(ENOMEM))
1139 continue;
1140 return ret;
1141 }
1142 av_packet_rescale_ts(pkt, bsfc->time_base_out, st->time_base);
1143 ret = write_packet_common(s, st, pkt, interleaved);
1144 if (ret >= 0 && !interleaved) // a successful write_packet_common already unrefed pkt for interleaved
1145 av_packet_unref(pkt);
1146 } while (ret >= 0);
1147
1148 return ret;
1149 }
1150
1151 static int write_packets_common(AVFormatContext *s, AVPacket *pkt, int interleaved)
1152 {
1153 AVStream *st;
1154 FFStream *sti;
1155 int ret = check_packet(s, pkt);
1156 if (ret < 0)
1157 return ret;
1158 st = s->streams[pkt->stream_index];
1159 sti = ffstream(st);
1160
1161 ret = prepare_input_packet(s, st, pkt);
1162 if (ret < 0)
1163 return ret;
1164
1165 ret = check_bitstream(s, sti, pkt);
1166 if (ret < 0)
1167 return ret;
1168
1169 if (sti->bsfc) {
1170 return write_packets_from_bsfs(s, st, pkt, interleaved);
1171 } else {
1172 return write_packet_common(s, st, pkt, interleaved);
1173 }
1174 }
1175
1176 int av_write_frame(AVFormatContext *s, AVPacket *in)
1177 {
1178 FFFormatContext *const si = ffformatcontext(s);
1179 AVPacket *pkt = si->parse_pkt;
1180 int ret;
1181
1182 if (!in) {
1183 if (ffofmt(s->oformat)->flags_internal & FF_OFMT_FLAG_ALLOW_FLUSH) {
1184 ret = ffofmt(s->oformat)->write_packet(s, NULL);
1185 flush_if_needed(s);
1186 if (ret >= 0 && s->pb && s->pb->error < 0)
1187 ret = s->pb->error;
1188 return ret;
1189 }
1190 return 1;
1191 }
1192
1193 if (in->flags & AV_PKT_FLAG_UNCODED_FRAME) {
1194 pkt = in;
1195 } else {
1196 /* We don't own in, so we have to make sure not to modify it.
1197 * (ff_write_chained() relies on this fact.)
1198 * The following avoids copying in's data unnecessarily.
1199 * Copying side data is unavoidable as a bitstream filter
1200 * may change it, e.g. free it on errors. */
1201 pkt->data = in->data;
1202 pkt->size = in->size;
1203 ret = av_packet_copy_props(pkt, in);
1204 if (ret < 0)
1205 return ret;
1206 if (in->buf) {
1207 pkt->buf = av_buffer_ref(in->buf);
1208 if (!pkt->buf) {
1209 ret = AVERROR(ENOMEM);
1210 goto fail;
1211 }
1212 }
1213 }
1214
1215 ret = write_packets_common(s, pkt, 0/*non-interleaved*/);
1216
1217 fail:
1218 // Uncoded frames using the noninterleaved codepath are also freed here
1219 av_packet_unref(pkt);
1220 return ret;
1221 }
1222
1223 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
1224 {
1225 int ret;
1226
1227 if (pkt) {
1228 ret = write_packets_common(s, pkt, 1/*interleaved*/);
1229 if (ret < 0)
1230 av_packet_unref(pkt);
1231 return ret;
1232 } else {
1233 av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
1234 return interleaved_write_packet(s, ffformatcontext(s)->parse_pkt, 1/*flush*/, 0);
1235 }
1236 }
1237
1238 int av_write_trailer(AVFormatContext *s)
1239 {
1240 FFFormatContext *const si = ffformatcontext(s);
1241 AVPacket *const pkt = si->parse_pkt;
1242 int ret1, ret = 0;
1243
1244 for (unsigned i = 0; i < s->nb_streams; i++) {
1245 AVStream *const st = s->streams[i];
1246 FFStream *const sti = ffstream(st);
1247 if (sti->bsfc) {
1248 ret1 = write_packets_from_bsfs(s, st, pkt, 1/*interleaved*/);
1249 if (ret1 < 0)
1250 av_packet_unref(pkt);
1251 if (ret >= 0)
1252 ret = ret1;
1253 }
1254 }
1255 ret1 = interleaved_write_packet(s, pkt, 1, 0);
1256 if (ret >= 0)
1257 ret = ret1;
1258
1259 if (ffofmt(s->oformat)->write_trailer) {
1260 if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1261 avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
1262 ret1 = ffofmt(s->oformat)->write_trailer(s);
1263 if (ret >= 0)
1264 ret = ret1;
1265 }
1266
1267 deinit_muxer(s);
1268
1269 if (s->pb)
1270 avio_flush(s->pb);
1271 if (ret == 0)
1272 ret = s->pb ? s->pb->error : 0;
1273 for (unsigned i = 0; i < s->nb_streams; i++) {
1274 av_freep(&s->streams[i]->priv_data);
1275 av_freep(&ffstream(s->streams[i])->index_entries);
1276 }
1277 if (s->oformat->priv_class)
1278 av_opt_free(s->priv_data);
1279 av_freep(&s->priv_data);
1280 av_packet_unref(si->pkt);
1281 return ret;
1282 }
1283
1284 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
1285 int64_t *dts, int64_t *wall)
1286 {
1287 const FFOutputFormat *const of = ffofmt(s->oformat);
1288 if (!of || !of->get_output_timestamp)
1289 return AVERROR(ENOSYS);
1290 of->get_output_timestamp(s, stream, dts, wall);
1291 return 0;
1292 }
1293
1294 int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args)
1295 {
1296 int ret;
1297 const AVBitStreamFilter *bsf;
1298 FFStream *const sti = ffstream(st);
1299 AVBSFContext *bsfc;
1300
1301 av_assert0(!sti->bsfc);
1302
1303 if (!(bsf = av_bsf_get_by_name(name))) {
1304 av_log(NULL, AV_LOG_ERROR, "Unknown bitstream filter '%s'\n", name);
1305 return AVERROR_BSF_NOT_FOUND;
1306 }
1307
1308 if ((ret = av_bsf_alloc(bsf, &bsfc)) < 0)
1309 return ret;
1310
1311 bsfc->time_base_in = st->time_base;
1312 if ((ret = avcodec_parameters_copy(bsfc->par_in, st->codecpar)) < 0) {
1313 av_bsf_free(&bsfc);
1314 return ret;
1315 }
1316
1317 if (args && bsfc->filter->priv_class) {
1318 if ((ret = av_set_options_string(bsfc->priv_data, args, "=", ":")) < 0) {
1319 av_bsf_free(&bsfc);
1320 return ret;
1321 }
1322 }
1323
1324 if ((ret = av_bsf_init(bsfc)) < 0) {
1325 av_bsf_free(&bsfc);
1326 return ret;
1327 }
1328
1329 sti->bsfc = bsfc;
1330
1331 av_log(NULL, AV_LOG_VERBOSE,
1332 "Automatically inserted bitstream filter '%s'; args='%s'\n",
1333 name, args ? args : "");
1334 return 1;
1335 }
1336
1337 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
1338 AVFormatContext *src, int interleave)
1339 {
1340 int64_t pts = pkt->pts, dts = pkt->dts, duration = pkt->duration;
1341 int stream_index = pkt->stream_index;
1342 AVRational time_base = pkt->time_base;
1343 int ret;
1344
1345 pkt->stream_index = dst_stream;
1346
1347 av_packet_rescale_ts(pkt,
1348 src->streams[stream_index]->time_base,
1349 dst->streams[dst_stream]->time_base);
1350
1351 if (!interleave) {
1352 ret = av_write_frame(dst, pkt);
1353 /* We only have to backup and restore the fields that
1354 * we changed ourselves, because av_write_frame() does not
1355 * modify the packet given to it. */
1356 pkt->pts = pts;
1357 pkt->dts = dts;
1358 pkt->duration = duration;
1359 pkt->stream_index = stream_index;
1360 pkt->time_base = time_base;
1361 } else
1362 ret = av_interleaved_write_frame(dst, pkt);
1363
1364 return ret;
1365 }
1366
1367 static void uncoded_frame_free(void *unused, uint8_t *data)
1368 {
1369 av_frame_free((AVFrame **)data);
1370 av_free(data);
1371 }
1372
1373 static int write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
1374 AVFrame *frame, int interleaved)
1375 {
1376 FFFormatContext *const si = ffformatcontext(s);
1377 AVPacket *pkt = si->parse_pkt;
1378
1379 av_assert0(s->oformat);
1380 if (!ffofmt(s->oformat)->write_uncoded_frame) {
1381 av_frame_free(&frame);
1382 return AVERROR(ENOSYS);
1383 }
1384
1385 if (!frame) {
1386 pkt = NULL;
1387 } else {
1388 size_t bufsize = sizeof(frame) + AV_INPUT_BUFFER_PADDING_SIZE;
1389 AVFrame **framep = av_mallocz(bufsize);
1390
1391 if (!framep)
1392 goto fail;
1393 pkt->buf = av_buffer_create((void *)framep, bufsize,
1394 uncoded_frame_free, NULL, 0);
1395 if (!pkt->buf) {
1396 av_free(framep);
1397 fail:
1398 av_frame_free(&frame);
1399 return AVERROR(ENOMEM);
1400 }
1401 *framep = frame;
1402
1403 pkt->data = (void *)framep;
1404 pkt->size = sizeof(frame);
1405 pkt->pts =
1406 pkt->dts = frame->pts;
1407 pkt->duration = frame->duration;
1408 pkt->stream_index = stream_index;
1409 pkt->flags |= AV_PKT_FLAG_UNCODED_FRAME;
1410 }
1411
1412 return interleaved ? av_interleaved_write_frame(s, pkt) :
1413 av_write_frame(s, pkt);
1414 }
1415
1416 int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
1417 AVFrame *frame)
1418 {
1419 return write_uncoded_frame_internal(s, stream_index, frame, 0);
1420 }
1421
1422 int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
1423 AVFrame *frame)
1424 {
1425 return write_uncoded_frame_internal(s, stream_index, frame, 1);
1426 }
1427
1428 int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
1429 {
1430 const FFOutputFormat *const of = ffofmt(s->oformat);
1431 av_assert0(of);
1432 if (!of->write_uncoded_frame)
1433 return AVERROR(ENOSYS);
1434 return of->write_uncoded_frame(s, stream_index, NULL,
1435 AV_WRITE_UNCODED_FRAME_QUERY);
1436 }