avcodec/x86/h264_idct: Fix ff_h264_luma_dc_dequant_idct_sse2 checkasm failures
[ffmpeg.git] / fftools / ffmpeg_demux.c
1 /*
2 * This file is part of FFmpeg.
3 *
4 * FFmpeg is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation; either
7 * version 2.1 of the License, or (at your option) any later version.
8 *
9 * FFmpeg is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with FFmpeg; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <float.h>
20 #include <stdint.h>
21
22 #include "ffmpeg.h"
23 #include "ffmpeg_sched.h"
24 #include "ffmpeg_utils.h"
25
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
28 #include "libavutil/display.h"
29 #include "libavutil/error.h"
30 #include "libavutil/intreadwrite.h"
31 #include "libavutil/mem.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/parseutils.h"
34 #include "libavutil/pixdesc.h"
35 #include "libavutil/time.h"
36 #include "libavutil/timestamp.h"
37
38 #include "libavcodec/bsf.h"
39 #include "libavcodec/packet.h"
40
41 #include "libavformat/avformat.h"
42
43 typedef struct DemuxStream {
44 InputStream ist;
45
46 // name used for logging
47 char log_name[32];
48
49 int sch_idx_stream;
50 int sch_idx_dec;
51
52 double ts_scale;
53
54 /* non zero if the packets must be decoded in 'raw_fifo', see DECODING_FOR_* */
55 int decoding_needed;
56 #define DECODING_FOR_OST 1
57 #define DECODING_FOR_FILTER 2
58
59 /* true if stream data should be discarded */
60 int discard;
61
62 // scheduler returned EOF for this stream
63 int finished;
64
65 int streamcopy_needed;
66 int have_sub2video;
67 int reinit_filters;
68 int autorotate;
69 int apply_cropping;
70 int force_display_matrix;
71 int drop_changed;
72
73
74 int wrap_correction_done;
75 int saw_first_ts;
76 /// dts of the first packet read for this stream (in AV_TIME_BASE units)
77 int64_t first_dts;
78
79 /* predicted dts of the next packet read for this stream or (when there are
80 * several frames in a packet) of the next frame in current packet (in AV_TIME_BASE units) */
81 int64_t next_dts;
82 /// dts of the last packet read for this stream (in AV_TIME_BASE units)
83 int64_t dts;
84
85 const AVCodecDescriptor *codec_desc;
86
87 AVDictionary *decoder_opts;
88 DecoderOpts dec_opts;
89 char dec_name[16];
90 // decoded media properties, as estimated by opening the decoder
91 AVFrame *decoded_params;
92
93 AVBSFContext *bsf;
94
95 /* number of packets successfully read for this stream */
96 uint64_t nb_packets;
97 // combined size of all the packets read
98 uint64_t data_size;
99 // latest wallclock time at which packet reading resumed after a stall - used for readrate
100 int64_t resume_wc;
101 // timestamp of first packet sent after the latest stall - used for readrate
102 int64_t resume_pts;
103 // measure of how far behind packet reading is against spceified readrate
104 int64_t lag;
105 } DemuxStream;
106
107 typedef struct DemuxStreamGroup {
108 InputStreamGroup istg;
109
110 // name used for logging
111 char log_name[32];
112 } DemuxStreamGroup;
113
114 typedef struct Demuxer {
115 InputFile f;
116
117 // name used for logging
118 char log_name[32];
119
120 int64_t wallclock_start;
121
122 /**
123 * Extra timestamp offset added by discontinuity handling.
124 */
125 int64_t ts_offset_discont;
126 int64_t last_ts;
127
128 int64_t recording_time;
129 int accurate_seek;
130
131 /* number of times input stream should be looped */
132 int loop;
133 int have_audio_dec;
134 /* duration of the looped segment of the input file */
135 Timestamp duration;
136 /* pts with the smallest/largest values ever seen */
137 Timestamp min_pts;
138 Timestamp max_pts;
139
140 /* number of streams that the user was warned of */
141 int nb_streams_warn;
142
143 float readrate;
144 double readrate_initial_burst;
145 float readrate_catchup;
146
147 Scheduler *sch;
148
149 AVPacket *pkt_heartbeat;
150
151 int read_started;
152 int nb_streams_used;
153 int nb_streams_finished;
154 } Demuxer;
155
156 typedef struct DemuxThreadContext {
157 // packet used for reading from the demuxer
158 AVPacket *pkt_demux;
159 // packet for reading from BSFs
160 AVPacket *pkt_bsf;
161 } DemuxThreadContext;
162
163 static DemuxStream *ds_from_ist(InputStream *ist)
164 {
165 return (DemuxStream*)ist;
166 }
167
168 static Demuxer *demuxer_from_ifile(InputFile *f)
169 {
170 return (Demuxer*)f;
171 }
172
173 InputStream *ist_find_unused(enum AVMediaType type)
174 {
175 for (InputStream *ist = ist_iter(NULL); ist; ist = ist_iter(ist)) {
176 DemuxStream *ds = ds_from_ist(ist);
177 if (ist->par->codec_type == type && ds->discard &&
178 ist->user_set_discard != AVDISCARD_ALL)
179 return ist;
180 }
181 return NULL;
182 }
183
184 static void report_new_stream(Demuxer *d, const AVPacket *pkt)
185 {
186 const AVStream *st = d->f.ctx->streams[pkt->stream_index];
187
188 if (pkt->stream_index < d->nb_streams_warn)
189 return;
190 av_log(d, AV_LOG_WARNING,
191 "New %s stream with index %d at pos:%"PRId64" and DTS:%ss\n",
192 av_get_media_type_string(st->codecpar->codec_type),
193 pkt->stream_index, pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
194 d->nb_streams_warn = pkt->stream_index + 1;
195 }
196
197 static int seek_to_start(Demuxer *d, Timestamp end_pts)
198 {
199 InputFile *ifile = &d->f;
200 AVFormatContext *is = ifile->ctx;
201 int ret;
202
203 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
204 if (ret < 0)
205 return ret;
206
207 if (end_pts.ts != AV_NOPTS_VALUE &&
208 (d->max_pts.ts == AV_NOPTS_VALUE ||
209 av_compare_ts(d->max_pts.ts, d->max_pts.tb, end_pts.ts, end_pts.tb) < 0))
210 d->max_pts = end_pts;
211
212 if (d->max_pts.ts != AV_NOPTS_VALUE) {
213 int64_t min_pts = d->min_pts.ts == AV_NOPTS_VALUE ? 0 : d->min_pts.ts;
214 d->duration.ts = d->max_pts.ts - av_rescale_q(min_pts, d->min_pts.tb, d->max_pts.tb);
215 }
216 d->duration.tb = d->max_pts.tb;
217
218 if (d->loop > 0)
219 d->loop--;
220
221 return ret;
222 }
223
224 static void ts_discontinuity_detect(Demuxer *d, InputStream *ist,
225 AVPacket *pkt)
226 {
227 InputFile *ifile = &d->f;
228 DemuxStream *ds = ds_from_ist(ist);
229 const int fmt_is_discont = ifile->ctx->iformat->flags & AVFMT_TS_DISCONT;
230 int disable_discontinuity_correction = copy_ts;
231 int64_t pkt_dts = av_rescale_q_rnd(pkt->dts, pkt->time_base, AV_TIME_BASE_Q,
232 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
233
234 if (copy_ts && ds->next_dts != AV_NOPTS_VALUE &&
235 fmt_is_discont && ist->st->pts_wrap_bits < 60) {
236 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
237 pkt->time_base, AV_TIME_BASE_Q,
238 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
239 if (FFABS(wrap_dts - ds->next_dts) < FFABS(pkt_dts - ds->next_dts)/10)
240 disable_discontinuity_correction = 0;
241 }
242
243 if (ds->next_dts != AV_NOPTS_VALUE && !disable_discontinuity_correction) {
244 int64_t delta = pkt_dts - ds->next_dts;
245 if (fmt_is_discont) {
246 if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE ||
247 pkt_dts + AV_TIME_BASE/10 < ds->dts) {
248 d->ts_offset_discont -= delta;
249 av_log(ist, AV_LOG_WARNING,
250 "timestamp discontinuity "
251 "(stream id=%d): %"PRId64", new offset= %"PRId64"\n",
252 ist->st->id, delta, d->ts_offset_discont);
253 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
254 if (pkt->pts != AV_NOPTS_VALUE)
255 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
256 }
257 } else {
258 if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
259 av_log(ist, AV_LOG_WARNING,
260 "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n",
261 pkt->dts, ds->next_dts, pkt->stream_index);
262 pkt->dts = AV_NOPTS_VALUE;
263 }
264 if (pkt->pts != AV_NOPTS_VALUE){
265 int64_t pkt_pts = av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q);
266 delta = pkt_pts - ds->next_dts;
267 if (FFABS(delta) > 1LL * dts_error_threshold * AV_TIME_BASE) {
268 av_log(ist, AV_LOG_WARNING,
269 "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n",
270 pkt->pts, ds->next_dts, pkt->stream_index);
271 pkt->pts = AV_NOPTS_VALUE;
272 }
273 }
274 }
275 } else if (ds->next_dts == AV_NOPTS_VALUE && !copy_ts &&
276 fmt_is_discont && d->last_ts != AV_NOPTS_VALUE) {
277 int64_t delta = pkt_dts - d->last_ts;
278 if (FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE) {
279 d->ts_offset_discont -= delta;
280 av_log(ist, AV_LOG_DEBUG,
281 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
282 delta, d->ts_offset_discont);
283 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
284 if (pkt->pts != AV_NOPTS_VALUE)
285 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, pkt->time_base);
286 }
287 }
288
289 d->last_ts = av_rescale_q(pkt->dts, pkt->time_base, AV_TIME_BASE_Q);
290 }
291
292 static void ts_discontinuity_process(Demuxer *d, InputStream *ist,
293 AVPacket *pkt)
294 {
295 int64_t offset = av_rescale_q(d->ts_offset_discont, AV_TIME_BASE_Q,
296 pkt->time_base);
297
298 // apply previously-detected timestamp-discontinuity offset
299 // (to all streams, not just audio/video)
300 if (pkt->dts != AV_NOPTS_VALUE)
301 pkt->dts += offset;
302 if (pkt->pts != AV_NOPTS_VALUE)
303 pkt->pts += offset;
304
305 // detect timestamp discontinuities for audio/video
306 if ((ist->par->codec_type == AVMEDIA_TYPE_VIDEO ||
307 ist->par->codec_type == AVMEDIA_TYPE_AUDIO) &&
308 pkt->dts != AV_NOPTS_VALUE)
309 ts_discontinuity_detect(d, ist, pkt);
310 }
311
312 static int ist_dts_update(DemuxStream *ds, AVPacket *pkt, FrameData *fd)
313 {
314 InputStream *ist = &ds->ist;
315 const AVCodecParameters *par = ist->par;
316
317 if (!ds->saw_first_ts) {
318 ds->first_dts =
319 ds->dts = ist->st->avg_frame_rate.num ? - ist->par->video_delay * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
320 if (pkt->pts != AV_NOPTS_VALUE) {
321 ds->first_dts =
322 ds->dts += av_rescale_q(pkt->pts, pkt->time_base, AV_TIME_BASE_Q);
323 }
324 ds->saw_first_ts = 1;
325 }
326
327 if (ds->next_dts == AV_NOPTS_VALUE)
328 ds->next_dts = ds->dts;
329
330 if (pkt->dts != AV_NOPTS_VALUE)
331 ds->next_dts = ds->dts = av_rescale_q(pkt->dts, pkt->time_base, AV_TIME_BASE_Q);
332
333 ds->dts = ds->next_dts;
334 switch (par->codec_type) {
335 case AVMEDIA_TYPE_AUDIO:
336 av_assert1(pkt->duration >= 0);
337 if (par->sample_rate) {
338 ds->next_dts += ((int64_t)AV_TIME_BASE * par->frame_size) /
339 par->sample_rate;
340 } else {
341 ds->next_dts += av_rescale_q(pkt->duration, pkt->time_base, AV_TIME_BASE_Q);
342 }
343 break;
344 case AVMEDIA_TYPE_VIDEO:
345 if (ist->framerate.num) {
346 // TODO: Remove work-around for c99-to-c89 issue 7
347 AVRational time_base_q = AV_TIME_BASE_Q;
348 int64_t next_dts = av_rescale_q(ds->next_dts, time_base_q, av_inv_q(ist->framerate));
349 ds->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
350 } else if (pkt->duration) {
351 ds->next_dts += av_rescale_q(pkt->duration, pkt->time_base, AV_TIME_BASE_Q);
352 } else if (ist->par->framerate.num != 0) {
353 AVRational field_rate = av_mul_q(ist->par->framerate,
354 (AVRational){ 2, 1 });
355 int fields = 2;
356
357 if (ds->codec_desc &&
358 (ds->codec_desc->props & AV_CODEC_PROP_FIELDS) &&
359 av_stream_get_parser(ist->st))
360 fields = 1 + av_stream_get_parser(ist->st)->repeat_pict;
361
362 ds->next_dts += av_rescale_q(fields, av_inv_q(field_rate), AV_TIME_BASE_Q);
363 }
364 break;
365 }
366
367 fd->dts_est = ds->dts;
368
369 return 0;
370 }
371
372 static int ts_fixup(Demuxer *d, AVPacket *pkt, FrameData *fd)
373 {
374 InputFile *ifile = &d->f;
375 InputStream *ist = ifile->streams[pkt->stream_index];
376 DemuxStream *ds = ds_from_ist(ist);
377 const int64_t start_time = ifile->start_time_effective;
378 int64_t duration;
379 int ret;
380
381 pkt->time_base = ist->st->time_base;
382
383 #define SHOW_TS_DEBUG(tag_) \
384 if (debug_ts) { \
385 av_log(ist, AV_LOG_INFO, "%s -> ist_index:%d:%d type:%s " \
386 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s\n", \
387 tag_, ifile->index, pkt->stream_index, \
388 av_get_media_type_string(ist->st->codecpar->codec_type), \
389 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base), \
390 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &pkt->time_base), \
391 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &pkt->time_base)); \
392 }
393
394 SHOW_TS_DEBUG("demuxer");
395
396 if (!ds->wrap_correction_done && start_time != AV_NOPTS_VALUE &&
397 ist->st->pts_wrap_bits < 64) {
398 int64_t stime, stime2;
399
400 stime = av_rescale_q(start_time, AV_TIME_BASE_Q, pkt->time_base);
401 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
402 ds->wrap_correction_done = 1;
403
404 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
405 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
406 ds->wrap_correction_done = 0;
407 }
408 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
409 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
410 ds->wrap_correction_done = 0;
411 }
412 }
413
414 if (pkt->dts != AV_NOPTS_VALUE)
415 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, pkt->time_base);
416 if (pkt->pts != AV_NOPTS_VALUE)
417 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, pkt->time_base);
418
419 if (pkt->pts != AV_NOPTS_VALUE)
420 pkt->pts *= ds->ts_scale;
421 if (pkt->dts != AV_NOPTS_VALUE)
422 pkt->dts *= ds->ts_scale;
423
424 duration = av_rescale_q(d->duration.ts, d->duration.tb, pkt->time_base);
425 if (pkt->pts != AV_NOPTS_VALUE) {
426 // audio decoders take precedence for estimating total file duration
427 int64_t pkt_duration = d->have_audio_dec ? 0 : pkt->duration;
428
429 pkt->pts += duration;
430
431 // update max/min pts that will be used to compute total file duration
432 // when using -stream_loop
433 if (d->max_pts.ts == AV_NOPTS_VALUE ||
434 av_compare_ts(d->max_pts.ts, d->max_pts.tb,
435 pkt->pts + pkt_duration, pkt->time_base) < 0) {
436 d->max_pts = (Timestamp){ .ts = pkt->pts + pkt_duration,
437 .tb = pkt->time_base };
438 }
439 if (d->min_pts.ts == AV_NOPTS_VALUE ||
440 av_compare_ts(d->min_pts.ts, d->min_pts.tb,
441 pkt->pts, pkt->time_base) > 0) {
442 d->min_pts = (Timestamp){ .ts = pkt->pts,
443 .tb = pkt->time_base };
444 }
445 }
446
447 if (pkt->dts != AV_NOPTS_VALUE)
448 pkt->dts += duration;
449
450 SHOW_TS_DEBUG("demuxer+tsfixup");
451
452 // detect and try to correct for timestamp discontinuities
453 ts_discontinuity_process(d, ist, pkt);
454
455 // update estimated/predicted dts
456 ret = ist_dts_update(ds, pkt, fd);
457 if (ret < 0)
458 return ret;
459
460 return 0;
461 }
462
463 static int input_packet_process(Demuxer *d, AVPacket *pkt, unsigned *send_flags)
464 {
465 InputFile *f = &d->f;
466 InputStream *ist = f->streams[pkt->stream_index];
467 DemuxStream *ds = ds_from_ist(ist);
468 FrameData *fd;
469 int ret = 0;
470
471 fd = packet_data(pkt);
472 if (!fd)
473 return AVERROR(ENOMEM);
474
475 ret = ts_fixup(d, pkt, fd);
476 if (ret < 0)
477 return ret;
478
479 if (d->recording_time != INT64_MAX) {
480 int64_t start_time = 0;
481 if (copy_ts) {
482 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
483 start_time += start_at_zero ? 0 : f->start_time_effective;
484 }
485 if (ds->dts >= d->recording_time + start_time)
486 *send_flags |= DEMUX_SEND_STREAMCOPY_EOF;
487 }
488
489 ds->data_size += pkt->size;
490 ds->nb_packets++;
491
492 fd->wallclock[LATENCY_PROBE_DEMUX] = av_gettime_relative();
493
494 if (debug_ts) {
495 av_log(ist, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
496 f->index, pkt->stream_index,
497 av_get_media_type_string(ist->par->codec_type),
498 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &pkt->time_base),
499 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &pkt->time_base),
500 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &pkt->time_base),
501 av_ts2str(f->ts_offset), av_ts2timestr(f->ts_offset, &AV_TIME_BASE_Q));
502 }
503
504 return 0;
505 }
506
507 static void readrate_sleep(Demuxer *d)
508 {
509 InputFile *f = &d->f;
510 int64_t file_start = copy_ts * (
511 (f->start_time_effective != AV_NOPTS_VALUE ? f->start_time_effective * !start_at_zero : 0) +
512 (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
513 );
514 int64_t initial_burst = AV_TIME_BASE * d->readrate_initial_burst;
515 int resume_warn = 0;
516
517 for (int i = 0; i < f->nb_streams; i++) {
518 InputStream *ist = f->streams[i];
519 DemuxStream *ds = ds_from_ist(ist);
520 int64_t stream_ts_offset, pts, now, wc_elapsed, elapsed, lag, max_pts, limit_pts;
521
522 if (ds->discard) continue;
523
524 stream_ts_offset = FFMAX(ds->first_dts != AV_NOPTS_VALUE ? ds->first_dts : 0, file_start);
525 pts = av_rescale(ds->dts, 1000000, AV_TIME_BASE);
526 now = av_gettime_relative();
527 wc_elapsed = now - d->wallclock_start;
528 max_pts = stream_ts_offset + initial_burst + wc_elapsed * d->readrate;
529 lag = FFMAX(max_pts - pts, 0);
530 if ( (!ds->lag && lag > 0.3 * AV_TIME_BASE) || ( lag > ds->lag + 0.3 * AV_TIME_BASE) ) {
531 ds->lag = lag;
532 ds->resume_wc = now;
533 ds->resume_pts = pts;
534 av_log_once(ds, AV_LOG_WARNING, AV_LOG_DEBUG, &resume_warn,
535 "Resumed reading at pts %0.3f with rate %0.3f after a lag of %0.3fs\n",
536 (float)pts/AV_TIME_BASE, d->readrate_catchup, (float)lag/AV_TIME_BASE);
537 }
538 if (ds->lag && !lag)
539 ds->lag = ds->resume_wc = ds->resume_pts = 0;
540 if (ds->resume_wc) {
541 elapsed = now - ds->resume_wc;
542 limit_pts = ds->resume_pts + elapsed * d->readrate_catchup;
543 } else {
544 elapsed = wc_elapsed;
545 limit_pts = max_pts;
546 }
547
548 if (pts > limit_pts)
549 av_usleep(pts - limit_pts);
550 }
551 }
552
553 static int do_send(Demuxer *d, DemuxStream *ds, AVPacket *pkt, unsigned flags,
554 const char *pkt_desc)
555 {
556 int ret;
557
558 pkt->stream_index = ds->sch_idx_stream;
559
560 ret = sch_demux_send(d->sch, d->f.index, pkt, flags);
561 if (ret == AVERROR_EOF) {
562 av_packet_unref(pkt);
563
564 av_log(ds, AV_LOG_VERBOSE, "All consumers of this stream are done\n");
565 ds->finished = 1;
566
567 if (++d->nb_streams_finished == d->nb_streams_used) {
568 av_log(d, AV_LOG_VERBOSE, "All consumers are done\n");
569 return AVERROR_EOF;
570 }
571 } else if (ret < 0) {
572 if (ret != AVERROR_EXIT)
573 av_log(d, AV_LOG_ERROR,
574 "Unable to send %s packet to consumers: %s\n",
575 pkt_desc, av_err2str(ret));
576 return ret;
577 }
578
579 return 0;
580 }
581
582 static int demux_send(Demuxer *d, DemuxThreadContext *dt, DemuxStream *ds,
583 AVPacket *pkt, unsigned flags)
584 {
585 InputFile *f = &d->f;
586 int ret;
587
588 // pkt can be NULL only when flushing BSFs
589 av_assert0(ds->bsf || pkt);
590
591 // send heartbeat for sub2video streams
592 if (d->pkt_heartbeat && pkt && pkt->pts != AV_NOPTS_VALUE) {
593 for (int i = 0; i < f->nb_streams; i++) {
594 DemuxStream *ds1 = ds_from_ist(f->streams[i]);
595
596 if (ds1->finished || !ds1->have_sub2video)
597 continue;
598
599 d->pkt_heartbeat->pts = pkt->pts;
600 d->pkt_heartbeat->time_base = pkt->time_base;
601 d->pkt_heartbeat->opaque = (void*)(intptr_t)PKT_OPAQUE_SUB_HEARTBEAT;
602
603 ret = do_send(d, ds1, d->pkt_heartbeat, 0, "heartbeat");
604 if (ret < 0)
605 return ret;
606 }
607 }
608
609 if (ds->bsf) {
610 if (pkt)
611 av_packet_rescale_ts(pkt, pkt->time_base, ds->bsf->time_base_in);
612
613 ret = av_bsf_send_packet(ds->bsf, pkt);
614 if (ret < 0) {
615 if (pkt)
616 av_packet_unref(pkt);
617 av_log(ds, AV_LOG_ERROR, "Error submitting a packet for filtering: %s\n",
618 av_err2str(ret));
619 return ret;
620 }
621
622 while (1) {
623 ret = av_bsf_receive_packet(ds->bsf, dt->pkt_bsf);
624 if (ret == AVERROR(EAGAIN))
625 return 0;
626 else if (ret < 0) {
627 if (ret != AVERROR_EOF)
628 av_log(ds, AV_LOG_ERROR,
629 "Error applying bitstream filters to a packet: %s\n",
630 av_err2str(ret));
631 return ret;
632 }
633
634 dt->pkt_bsf->time_base = ds->bsf->time_base_out;
635
636 ret = do_send(d, ds, dt->pkt_bsf, 0, "filtered");
637 if (ret < 0) {
638 av_packet_unref(dt->pkt_bsf);
639 return ret;
640 }
641 }
642 } else {
643 ret = do_send(d, ds, pkt, flags, "demuxed");
644 if (ret < 0)
645 return ret;
646 }
647
648 return 0;
649 }
650
651 static int demux_bsf_flush(Demuxer *d, DemuxThreadContext *dt)
652 {
653 InputFile *f = &d->f;
654 int ret;
655
656 for (unsigned i = 0; i < f->nb_streams; i++) {
657 DemuxStream *ds = ds_from_ist(f->streams[i]);
658
659 if (!ds->bsf)
660 continue;
661
662 ret = demux_send(d, dt, ds, NULL, 0);
663 ret = (ret == AVERROR_EOF) ? 0 : (ret < 0) ? ret : AVERROR_BUG;
664 if (ret < 0) {
665 av_log(ds, AV_LOG_ERROR, "Error flushing BSFs: %s\n",
666 av_err2str(ret));
667 return ret;
668 }
669
670 av_bsf_flush(ds->bsf);
671 }
672
673 return 0;
674 }
675
676 static void discard_unused_programs(InputFile *ifile)
677 {
678 for (int j = 0; j < ifile->ctx->nb_programs; j++) {
679 AVProgram *p = ifile->ctx->programs[j];
680 int discard = AVDISCARD_ALL;
681
682 for (int k = 0; k < p->nb_stream_indexes; k++) {
683 DemuxStream *ds = ds_from_ist(ifile->streams[p->stream_index[k]]);
684
685 if (!ds->discard) {
686 discard = AVDISCARD_DEFAULT;
687 break;
688 }
689 }
690 p->discard = discard;
691 }
692 }
693
694 static void thread_set_name(InputFile *f)
695 {
696 char name[16];
697 snprintf(name, sizeof(name), "dmx%d:%s", f->index, f->ctx->iformat->name);
698 ff_thread_setname(name);
699 }
700
701 static void demux_thread_uninit(DemuxThreadContext *dt)
702 {
703 av_packet_free(&dt->pkt_demux);
704 av_packet_free(&dt->pkt_bsf);
705
706 memset(dt, 0, sizeof(*dt));
707 }
708
709 static int demux_thread_init(DemuxThreadContext *dt)
710 {
711 memset(dt, 0, sizeof(*dt));
712
713 dt->pkt_demux = av_packet_alloc();
714 if (!dt->pkt_demux)
715 return AVERROR(ENOMEM);
716
717 dt->pkt_bsf = av_packet_alloc();
718 if (!dt->pkt_bsf)
719 return AVERROR(ENOMEM);
720
721 return 0;
722 }
723
724 static int input_thread(void *arg)
725 {
726 Demuxer *d = arg;
727 InputFile *f = &d->f;
728
729 DemuxThreadContext dt;
730
731 int ret = 0;
732
733 ret = demux_thread_init(&dt);
734 if (ret < 0)
735 goto finish;
736
737 thread_set_name(f);
738
739 discard_unused_programs(f);
740
741 d->read_started = 1;
742 d->wallclock_start = av_gettime_relative();
743
744 while (1) {
745 DemuxStream *ds;
746 unsigned send_flags = 0;
747
748 ret = av_read_frame(f->ctx, dt.pkt_demux);
749
750 if (ret == AVERROR(EAGAIN)) {
751 av_usleep(10000);
752 continue;
753 }
754 if (ret < 0) {
755 int ret_bsf;
756
757 if (ret == AVERROR_EOF)
758 av_log(d, AV_LOG_VERBOSE, "EOF while reading input\n");
759 else {
760 av_log(d, AV_LOG_ERROR, "Error during demuxing: %s\n",
761 av_err2str(ret));
762 ret = exit_on_error ? ret : 0;
763 }
764
765 ret_bsf = demux_bsf_flush(d, &dt);
766 ret = err_merge(ret == AVERROR_EOF ? 0 : ret, ret_bsf);
767
768 if (d->loop) {
769 /* signal looping to our consumers */
770 dt.pkt_demux->stream_index = -1;
771 ret = sch_demux_send(d->sch, f->index, dt.pkt_demux, 0);
772 if (ret >= 0)
773 ret = seek_to_start(d, (Timestamp){ .ts = dt.pkt_demux->pts,
774 .tb = dt.pkt_demux->time_base });
775 if (ret >= 0)
776 continue;
777
778 /* fallthrough to the error path */
779 }
780
781 break;
782 }
783
784 if (do_pkt_dump) {
785 av_pkt_dump_log2(NULL, AV_LOG_INFO, dt.pkt_demux, do_hex_dump,
786 f->ctx->streams[dt.pkt_demux->stream_index]);
787 }
788
789 /* the following test is needed in case new streams appear
790 dynamically in stream : we ignore them */
791 ds = dt.pkt_demux->stream_index < f->nb_streams ?
792 ds_from_ist(f->streams[dt.pkt_demux->stream_index]) : NULL;
793 if (!ds || ds->discard || ds->finished) {
794 report_new_stream(d, dt.pkt_demux);
795 av_packet_unref(dt.pkt_demux);
796 continue;
797 }
798
799 if (dt.pkt_demux->flags & AV_PKT_FLAG_CORRUPT) {
800 av_log(d, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
801 "corrupt input packet in stream %d\n",
802 dt.pkt_demux->stream_index);
803 if (exit_on_error) {
804 av_packet_unref(dt.pkt_demux);
805 ret = AVERROR_INVALIDDATA;
806 break;
807 }
808 }
809
810 ret = input_packet_process(d, dt.pkt_demux, &send_flags);
811 if (ret < 0)
812 break;
813
814 if (d->readrate)
815 readrate_sleep(d);
816
817 ret = demux_send(d, &dt, ds, dt.pkt_demux, send_flags);
818 if (ret < 0)
819 break;
820 }
821
822 // EOF/EXIT is normal termination
823 if (ret == AVERROR_EOF || ret == AVERROR_EXIT)
824 ret = 0;
825
826 finish:
827 demux_thread_uninit(&dt);
828
829 return ret;
830 }
831
832 static void demux_final_stats(Demuxer *d)
833 {
834 InputFile *f = &d->f;
835 uint64_t total_packets = 0, total_size = 0;
836
837 av_log(f, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
838 f->index, f->ctx->url);
839
840 for (int j = 0; j < f->nb_streams; j++) {
841 InputStream *ist = f->streams[j];
842 DemuxStream *ds = ds_from_ist(ist);
843 enum AVMediaType type = ist->par->codec_type;
844
845 if (ds->discard || type == AVMEDIA_TYPE_ATTACHMENT)
846 continue;
847
848 total_size += ds->data_size;
849 total_packets += ds->nb_packets;
850
851 av_log(f, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
852 f->index, j, av_get_media_type_string(type));
853 av_log(f, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
854 ds->nb_packets, ds->data_size);
855
856 if (ds->decoding_needed) {
857 av_log(f, AV_LOG_VERBOSE,
858 "%"PRIu64" frames decoded; %"PRIu64" decode errors",
859 ist->decoder->frames_decoded, ist->decoder->decode_errors);
860 if (type == AVMEDIA_TYPE_AUDIO)
861 av_log(f, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->decoder->samples_decoded);
862 av_log(f, AV_LOG_VERBOSE, "; ");
863 }
864
865 av_log(f, AV_LOG_VERBOSE, "\n");
866 }
867
868 av_log(f, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
869 total_packets, total_size);
870 }
871
872 static void ist_free(InputStream **pist)
873 {
874 InputStream *ist = *pist;
875 DemuxStream *ds;
876
877 if (!ist)
878 return;
879 ds = ds_from_ist(ist);
880
881 dec_free(&ist->decoder);
882
883 av_dict_free(&ds->decoder_opts);
884 av_freep(&ist->filters);
885 av_freep(&ds->dec_opts.hwaccel_device);
886
887 avcodec_parameters_free(&ist->par);
888
889 av_frame_free(&ds->decoded_params);
890
891 av_bsf_free(&ds->bsf);
892
893 av_freep(pist);
894 }
895
896 static void istg_free(InputStreamGroup **pistg)
897 {
898 InputStreamGroup *istg = *pistg;
899
900 if (!istg)
901 return;
902
903 av_freep(pistg);
904 }
905
906 void ifile_close(InputFile **pf)
907 {
908 InputFile *f = *pf;
909 Demuxer *d = demuxer_from_ifile(f);
910
911 if (!f)
912 return;
913
914 if (d->read_started)
915 demux_final_stats(d);
916
917 for (int i = 0; i < f->nb_streams; i++)
918 ist_free(&f->streams[i]);
919 av_freep(&f->streams);
920
921 for (int i = 0; i < f->nb_stream_groups; i++)
922 istg_free(&f->stream_groups[i]);
923 av_freep(&f->stream_groups);
924
925 avformat_close_input(&f->ctx);
926
927 av_packet_free(&d->pkt_heartbeat);
928
929 av_freep(pf);
930 }
931
932 int ist_use(InputStream *ist, int decoding_needed,
933 const ViewSpecifier *vs, SchedulerNode *src)
934 {
935 Demuxer *d = demuxer_from_ifile(ist->file);
936 DemuxStream *ds = ds_from_ist(ist);
937 int ret;
938
939 if (ist->user_set_discard == AVDISCARD_ALL) {
940 av_log(ist, AV_LOG_ERROR, "Cannot %s a disabled input stream\n",
941 decoding_needed ? "decode" : "streamcopy");
942 return AVERROR(EINVAL);
943 }
944
945 if (decoding_needed && !ist->dec) {
946 av_log(ist, AV_LOG_ERROR,
947 "Decoding requested, but no decoder found for: %s\n",
948 avcodec_get_name(ist->par->codec_id));
949 return AVERROR(EINVAL);
950 }
951
952 if (ds->sch_idx_stream < 0) {
953 ret = sch_add_demux_stream(d->sch, d->f.index);
954 if (ret < 0)
955 return ret;
956 ds->sch_idx_stream = ret;
957 }
958
959 if (ds->discard) {
960 ds->discard = 0;
961 d->nb_streams_used++;
962 }
963
964 ist->st->discard = ist->user_set_discard;
965 ds->decoding_needed |= decoding_needed;
966 ds->streamcopy_needed |= !decoding_needed;
967
968 if (decoding_needed && ds->sch_idx_dec < 0) {
969 int is_audio = ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
970 int is_unreliable = !!(d->f.ctx->iformat->flags & AVFMT_NOTIMESTAMPS);
971 int64_t use_wallclock_as_timestamps;
972
973 ret = av_opt_get_int(d->f.ctx, "use_wallclock_as_timestamps", 0, &use_wallclock_as_timestamps);
974 if (ret < 0)
975 return ret;
976
977 if (use_wallclock_as_timestamps)
978 is_unreliable = 0;
979
980 ds->dec_opts.flags |= (!!ist->fix_sub_duration * DECODER_FLAG_FIX_SUB_DURATION) |
981 (!!is_unreliable * DECODER_FLAG_TS_UNRELIABLE) |
982 (!!(d->loop && is_audio) * DECODER_FLAG_SEND_END_TS)
983 #if FFMPEG_OPT_TOP
984 | ((ist->top_field_first >= 0) * DECODER_FLAG_TOP_FIELD_FIRST)
985 #endif
986 ;
987
988 if (ist->framerate.num) {
989 ds->dec_opts.flags |= DECODER_FLAG_FRAMERATE_FORCED;
990 ds->dec_opts.framerate = ist->framerate;
991 } else
992 ds->dec_opts.framerate = ist->st->avg_frame_rate;
993
994 if (ist->dec->id == AV_CODEC_ID_DVB_SUBTITLE &&
995 (ds->decoding_needed & DECODING_FOR_OST)) {
996 av_dict_set(&ds->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
997 if (ds->decoding_needed & DECODING_FOR_FILTER)
998 av_log(ist, AV_LOG_WARNING,
999 "Warning using DVB subtitles for filtering and output at the "
1000 "same time is not fully supported, also see -compute_edt [0|1]\n");
1001 }
1002
1003 snprintf(ds->dec_name, sizeof(ds->dec_name), "%d:%d", ist->file->index, ist->index);
1004 ds->dec_opts.name = ds->dec_name;
1005
1006 ds->dec_opts.codec = ist->dec;
1007 ds->dec_opts.par = ist->par;
1008
1009 ds->dec_opts.log_parent = ist;
1010
1011 ds->decoded_params = av_frame_alloc();
1012 if (!ds->decoded_params)
1013 return AVERROR(ENOMEM);
1014
1015 ret = dec_init(&ist->decoder, d->sch,
1016 &ds->decoder_opts, &ds->dec_opts, ds->decoded_params);
1017 if (ret < 0)
1018 return ret;
1019 ds->sch_idx_dec = ret;
1020
1021 ret = sch_connect(d->sch, SCH_DSTREAM(d->f.index, ds->sch_idx_stream),
1022 SCH_DEC_IN(ds->sch_idx_dec));
1023 if (ret < 0)
1024 return ret;
1025
1026 d->have_audio_dec |= is_audio;
1027 }
1028
1029 if (decoding_needed && ist->par->codec_type == AVMEDIA_TYPE_VIDEO) {
1030 ret = dec_request_view(ist->decoder, vs, src);
1031 if (ret < 0)
1032 return ret;
1033 } else {
1034 *src = decoding_needed ?
1035 SCH_DEC_OUT(ds->sch_idx_dec, 0) :
1036 SCH_DSTREAM(d->f.index, ds->sch_idx_stream);
1037 }
1038
1039 return 0;
1040 }
1041
1042 int ist_filter_add(InputStream *ist, InputFilter *ifilter, int is_simple,
1043 const ViewSpecifier *vs, InputFilterOptions *opts,
1044 SchedulerNode *src)
1045 {
1046 Demuxer *d = demuxer_from_ifile(ist->file);
1047 DemuxStream *ds = ds_from_ist(ist);
1048 int64_t tsoffset = 0;
1049 int ret;
1050
1051 ret = ist_use(ist, is_simple ? DECODING_FOR_OST : DECODING_FOR_FILTER,
1052 vs, src);
1053 if (ret < 0)
1054 return ret;
1055
1056 ret = GROW_ARRAY(ist->filters, ist->nb_filters);
1057 if (ret < 0)
1058 return ret;
1059
1060 ist->filters[ist->nb_filters - 1] = ifilter;
1061
1062 if (ist->par->codec_type == AVMEDIA_TYPE_VIDEO) {
1063 const AVPacketSideData *sd = av_packet_side_data_get(ist->par->coded_side_data,
1064 ist->par->nb_coded_side_data,
1065 AV_PKT_DATA_FRAME_CROPPING);
1066 if (ist->framerate.num > 0 && ist->framerate.den > 0) {
1067 opts->framerate = ist->framerate;
1068 opts->flags |= IFILTER_FLAG_CFR;
1069 } else
1070 opts->framerate = av_guess_frame_rate(d->f.ctx, ist->st, NULL);
1071 if (sd && sd->size >= sizeof(uint32_t) * 4) {
1072 opts->crop_top = AV_RL32(sd->data + 0);
1073 opts->crop_bottom = AV_RL32(sd->data + 4);
1074 opts->crop_left = AV_RL32(sd->data + 8);
1075 opts->crop_right = AV_RL32(sd->data + 12);
1076 if (ds->apply_cropping && ds->apply_cropping != CROP_CODEC &&
1077 (opts->crop_top | opts->crop_bottom | opts->crop_left | opts->crop_right))
1078 opts->flags |= IFILTER_FLAG_CROP;
1079 }
1080 } else if (ist->par->codec_type == AVMEDIA_TYPE_SUBTITLE) {
1081 /* Compute the size of the canvas for the subtitles stream.
1082 If the subtitles codecpar has set a size, use it. Otherwise use the
1083 maximum dimensions of the video streams in the same file. */
1084 opts->sub2video_width = ist->par->width;
1085 opts->sub2video_height = ist->par->height;
1086 if (!(opts->sub2video_width && opts->sub2video_height)) {
1087 for (int j = 0; j < d->f.nb_streams; j++) {
1088 AVCodecParameters *par1 = d->f.streams[j]->par;
1089 if (par1->codec_type == AVMEDIA_TYPE_VIDEO) {
1090 opts->sub2video_width = FFMAX(opts->sub2video_width, par1->width);
1091 opts->sub2video_height = FFMAX(opts->sub2video_height, par1->height);
1092 }
1093 }
1094 }
1095
1096 if (!(opts->sub2video_width && opts->sub2video_height)) {
1097 opts->sub2video_width = FFMAX(opts->sub2video_width, 720);
1098 opts->sub2video_height = FFMAX(opts->sub2video_height, 576);
1099 }
1100
1101 if (!d->pkt_heartbeat) {
1102 d->pkt_heartbeat = av_packet_alloc();
1103 if (!d->pkt_heartbeat)
1104 return AVERROR(ENOMEM);
1105 }
1106 ds->have_sub2video = 1;
1107 }
1108
1109 ret = av_frame_copy_props(opts->fallback, ds->decoded_params);
1110 if (ret < 0)
1111 return ret;
1112 opts->fallback->format = ds->decoded_params->format;
1113 opts->fallback->width = ds->decoded_params->width;
1114 opts->fallback->height = ds->decoded_params->height;
1115
1116 ret = av_channel_layout_copy(&opts->fallback->ch_layout, &ds->decoded_params->ch_layout);
1117 if (ret < 0)
1118 return ret;
1119
1120 if (copy_ts) {
1121 tsoffset = d->f.start_time == AV_NOPTS_VALUE ? 0 : d->f.start_time;
1122 if (!start_at_zero && d->f.ctx->start_time != AV_NOPTS_VALUE)
1123 tsoffset += d->f.ctx->start_time;
1124 }
1125 opts->trim_start_us = ((d->f.start_time == AV_NOPTS_VALUE) || !d->accurate_seek) ?
1126 AV_NOPTS_VALUE : tsoffset;
1127 opts->trim_end_us = d->recording_time;
1128
1129 opts->name = av_strdup(ds->dec_name);
1130 if (!opts->name)
1131 return AVERROR(ENOMEM);
1132
1133 opts->flags |= IFILTER_FLAG_AUTOROTATE * !!(ds->autorotate) |
1134 IFILTER_FLAG_REINIT * !!(ds->reinit_filters) |
1135 IFILTER_FLAG_DROPCHANGED* !!(ds->drop_changed);
1136
1137 return 0;
1138 }
1139
1140 static int choose_decoder(const OptionsContext *o, void *logctx,
1141 AVFormatContext *s, AVStream *st,
1142 enum HWAccelID hwaccel_id, enum AVHWDeviceType hwaccel_device_type,
1143 const AVCodec **pcodec)
1144
1145 {
1146 const char *codec_name = NULL;
1147
1148 opt_match_per_stream_str(logctx, &o->codec_names, s, st, &codec_name);
1149 if (codec_name) {
1150 int ret = find_codec(NULL, codec_name, st->codecpar->codec_type, 0, pcodec);
1151 if (ret < 0)
1152 return ret;
1153 st->codecpar->codec_id = (*pcodec)->id;
1154 if (recast_media && st->codecpar->codec_type != (*pcodec)->type)
1155 st->codecpar->codec_type = (*pcodec)->type;
1156 return 0;
1157 } else {
1158 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
1159 hwaccel_id == HWACCEL_GENERIC &&
1160 hwaccel_device_type != AV_HWDEVICE_TYPE_NONE) {
1161 const AVCodec *c;
1162 void *i = NULL;
1163
1164 while ((c = av_codec_iterate(&i))) {
1165 const AVCodecHWConfig *config;
1166
1167 if (c->id != st->codecpar->codec_id ||
1168 !av_codec_is_decoder(c))
1169 continue;
1170
1171 for (int j = 0; config = avcodec_get_hw_config(c, j); j++) {
1172 if (config->device_type == hwaccel_device_type) {
1173 av_log(logctx, AV_LOG_VERBOSE, "Selecting decoder '%s' because of requested hwaccel method %s\n",
1174 c->name, av_hwdevice_get_type_name(hwaccel_device_type));
1175 *pcodec = c;
1176 return 0;
1177 }
1178 }
1179 }
1180 }
1181
1182 *pcodec = avcodec_find_decoder(st->codecpar->codec_id);
1183 return 0;
1184 }
1185 }
1186
1187 static int guess_input_channel_layout(InputStream *ist, AVCodecParameters *par,
1188 int guess_layout_max)
1189 {
1190 if (par->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1191 char layout_name[256];
1192
1193 if (par->ch_layout.nb_channels > guess_layout_max)
1194 return 0;
1195 av_channel_layout_default(&par->ch_layout, par->ch_layout.nb_channels);
1196 if (par->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
1197 return 0;
1198 av_channel_layout_describe(&par->ch_layout, layout_name, sizeof(layout_name));
1199 av_log(ist, AV_LOG_WARNING, "Guessed Channel Layout: %s\n", layout_name);
1200 }
1201 return 1;
1202 }
1203
1204 static int add_display_matrix_to_stream(const OptionsContext *o,
1205 AVFormatContext *ctx, InputStream *ist)
1206 {
1207 AVStream *st = ist->st;
1208 DemuxStream *ds = ds_from_ist(ist);
1209 AVPacketSideData *sd;
1210 double rotation = DBL_MAX;
1211 int hflip = -1, vflip = -1;
1212 int hflip_set = 0, vflip_set = 0, rotation_set = 0;
1213 int32_t *buf;
1214
1215 opt_match_per_stream_dbl(ist, &o->display_rotations, ctx, st, &rotation);
1216 opt_match_per_stream_int(ist, &o->display_hflips, ctx, st, &hflip);
1217 opt_match_per_stream_int(ist, &o->display_vflips, ctx, st, &vflip);
1218
1219 rotation_set = rotation != DBL_MAX;
1220 hflip_set = hflip != -1;
1221 vflip_set = vflip != -1;
1222
1223 if (!rotation_set && !hflip_set && !vflip_set)
1224 return 0;
1225
1226 sd = av_packet_side_data_new(&st->codecpar->coded_side_data,
1227 &st->codecpar->nb_coded_side_data,
1228 AV_PKT_DATA_DISPLAYMATRIX,
1229 sizeof(int32_t) * 9, 0);
1230 if (!sd) {
1231 av_log(ist, AV_LOG_FATAL, "Failed to generate a display matrix!\n");
1232 return AVERROR(ENOMEM);
1233 }
1234
1235 buf = (int32_t *)sd->data;
1236 av_display_rotation_set(buf,
1237 rotation_set ? -(rotation) : -0.0f);
1238
1239 av_display_matrix_flip(buf,
1240 hflip_set ? hflip : 0,
1241 vflip_set ? vflip : 0);
1242
1243 ds->force_display_matrix = 1;
1244
1245 return 0;
1246 }
1247
1248 static const char *input_stream_item_name(void *obj)
1249 {
1250 const DemuxStream *ds = obj;
1251
1252 return ds->log_name;
1253 }
1254
1255 static const AVClass input_stream_class = {
1256 .class_name = "InputStream",
1257 .version = LIBAVUTIL_VERSION_INT,
1258 .item_name = input_stream_item_name,
1259 .category = AV_CLASS_CATEGORY_DEMUXER,
1260 };
1261
1262 static DemuxStream *demux_stream_alloc(Demuxer *d, AVStream *st)
1263 {
1264 const char *type_str = av_get_media_type_string(st->codecpar->codec_type);
1265 InputFile *f = &d->f;
1266 DemuxStream *ds;
1267
1268 ds = allocate_array_elem(&f->streams, sizeof(*ds), &f->nb_streams);
1269 if (!ds)
1270 return NULL;
1271
1272 ds->sch_idx_stream = -1;
1273 ds->sch_idx_dec = -1;
1274
1275 ds->ist.st = st;
1276 ds->ist.file = f;
1277 ds->ist.index = st->index;
1278 ds->ist.class = &input_stream_class;
1279
1280 snprintf(ds->log_name, sizeof(ds->log_name), "%cist#%d:%d/%s",
1281 type_str ? *type_str : '?', d->f.index, st->index,
1282 avcodec_get_name(st->codecpar->codec_id));
1283
1284 return ds;
1285 }
1286
1287 static int ist_add(const OptionsContext *o, Demuxer *d, AVStream *st, AVDictionary **opts_used)
1288 {
1289 AVFormatContext *ic = d->f.ctx;
1290 AVCodecParameters *par = st->codecpar;
1291 DemuxStream *ds;
1292 InputStream *ist;
1293 const char *framerate = NULL, *hwaccel_device = NULL;
1294 const char *hwaccel = NULL;
1295 const char *apply_cropping = NULL;
1296 const char *hwaccel_output_format = NULL;
1297 const char *codec_tag = NULL;
1298 const char *bsfs = NULL;
1299 char *next;
1300 const char *discard_str = NULL;
1301 int ret;
1302
1303 ds = demux_stream_alloc(d, st);
1304 if (!ds)
1305 return AVERROR(ENOMEM);
1306
1307 ist = &ds->ist;
1308
1309 ds->discard = 1;
1310 st->discard = AVDISCARD_ALL;
1311 ds->first_dts = AV_NOPTS_VALUE;
1312 ds->next_dts = AV_NOPTS_VALUE;
1313
1314 ds->dec_opts.time_base = st->time_base;
1315
1316 ds->ts_scale = 1.0;
1317 opt_match_per_stream_dbl(ist, &o->ts_scale, ic, st, &ds->ts_scale);
1318
1319 ds->autorotate = 1;
1320 opt_match_per_stream_int(ist, &o->autorotate, ic, st, &ds->autorotate);
1321
1322 ds->apply_cropping = CROP_ALL;
1323 opt_match_per_stream_str(ist, &o->apply_cropping, ic, st, &apply_cropping);
1324 if (apply_cropping) {
1325 const AVOption opts[] = {
1326 { "apply_cropping", NULL, 0, AV_OPT_TYPE_INT,
1327 { .i64 = CROP_ALL }, CROP_DISABLED, CROP_CONTAINER, AV_OPT_FLAG_DECODING_PARAM, .unit = "apply_cropping" },
1328 { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CROP_DISABLED }, .unit = "apply_cropping" },
1329 { "all", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CROP_ALL }, .unit = "apply_cropping" },
1330 { "codec", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CROP_CODEC }, .unit = "apply_cropping" },
1331 { "container", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = CROP_CONTAINER }, .unit = "apply_cropping" },
1332 { NULL },
1333 };
1334 const AVClass class = {
1335 .class_name = "apply_cropping",
1336 .item_name = av_default_item_name,
1337 .option = opts,
1338 .version = LIBAVUTIL_VERSION_INT,
1339 };
1340 const AVClass *pclass = &class;
1341
1342 ret = av_opt_eval_int(&pclass, opts, apply_cropping, &ds->apply_cropping);
1343 if (ret < 0) {
1344 av_log(ist, AV_LOG_ERROR, "Invalid apply_cropping value '%s'.\n", apply_cropping);
1345 return ret;
1346 }
1347 }
1348
1349 opt_match_per_stream_str(ist, &o->codec_tags, ic, st, &codec_tag);
1350 if (codec_tag) {
1351 uint32_t tag = strtol(codec_tag, &next, 0);
1352 if (*next) {
1353 uint8_t buf[4] = { 0 };
1354 memcpy(buf, codec_tag, FFMIN(sizeof(buf), strlen(codec_tag)));
1355 tag = AV_RL32(buf);
1356 }
1357
1358 st->codecpar->codec_tag = tag;
1359 }
1360
1361 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
1362 ret = add_display_matrix_to_stream(o, ic, ist);
1363 if (ret < 0)
1364 return ret;
1365
1366 opt_match_per_stream_str(ist, &o->hwaccels, ic, st, &hwaccel);
1367 opt_match_per_stream_str(ist, &o->hwaccel_output_formats, ic, st,
1368 &hwaccel_output_format);
1369 if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "cuvid")) {
1370 av_log(ist, AV_LOG_WARNING,
1371 "WARNING: defaulting hwaccel_output_format to cuda for compatibility "
1372 "with old commandlines. This behaviour is DEPRECATED and will be removed "
1373 "in the future. Please explicitly set \"-hwaccel_output_format cuda\".\n");
1374 ds->dec_opts.hwaccel_output_format = AV_PIX_FMT_CUDA;
1375 } else if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "qsv")) {
1376 av_log(ist, AV_LOG_WARNING,
1377 "WARNING: defaulting hwaccel_output_format to qsv for compatibility "
1378 "with old commandlines. This behaviour is DEPRECATED and will be removed "
1379 "in the future. Please explicitly set \"-hwaccel_output_format qsv\".\n");
1380 ds->dec_opts.hwaccel_output_format = AV_PIX_FMT_QSV;
1381 } else if (!hwaccel_output_format && hwaccel && !strcmp(hwaccel, "mediacodec")) {
1382 // There is no real AVHWFrameContext implementation. Set
1383 // hwaccel_output_format to avoid av_hwframe_transfer_data error.
1384 ds->dec_opts.hwaccel_output_format = AV_PIX_FMT_MEDIACODEC;
1385 } else if (hwaccel_output_format) {
1386 ds->dec_opts.hwaccel_output_format = av_get_pix_fmt(hwaccel_output_format);
1387 if (ds->dec_opts.hwaccel_output_format == AV_PIX_FMT_NONE) {
1388 av_log(ist, AV_LOG_FATAL, "Unrecognised hwaccel output "
1389 "format: %s", hwaccel_output_format);
1390 }
1391 } else {
1392 ds->dec_opts.hwaccel_output_format = AV_PIX_FMT_NONE;
1393 }
1394
1395 if (hwaccel) {
1396 // The NVDEC hwaccels use a CUDA device, so remap the name here.
1397 if (!strcmp(hwaccel, "nvdec") || !strcmp(hwaccel, "cuvid"))
1398 hwaccel = "cuda";
1399
1400 if (!strcmp(hwaccel, "none"))
1401 ds->dec_opts.hwaccel_id = HWACCEL_NONE;
1402 else if (!strcmp(hwaccel, "auto"))
1403 ds->dec_opts.hwaccel_id = HWACCEL_AUTO;
1404 else {
1405 enum AVHWDeviceType type = av_hwdevice_find_type_by_name(hwaccel);
1406 if (type != AV_HWDEVICE_TYPE_NONE) {
1407 ds->dec_opts.hwaccel_id = HWACCEL_GENERIC;
1408 ds->dec_opts.hwaccel_device_type = type;
1409 }
1410
1411 if (!ds->dec_opts.hwaccel_id) {
1412 av_log(ist, AV_LOG_FATAL, "Unrecognized hwaccel: %s.\n",
1413 hwaccel);
1414 av_log(ist, AV_LOG_FATAL, "Supported hwaccels: ");
1415 type = AV_HWDEVICE_TYPE_NONE;
1416 while ((type = av_hwdevice_iterate_types(type)) !=
1417 AV_HWDEVICE_TYPE_NONE)
1418 av_log(ist, AV_LOG_FATAL, "%s ",
1419 av_hwdevice_get_type_name(type));
1420 av_log(ist, AV_LOG_FATAL, "\n");
1421 return AVERROR(EINVAL);
1422 }
1423 }
1424 }
1425
1426 opt_match_per_stream_str(ist, &o->hwaccel_devices, ic, st, &hwaccel_device);
1427 if (hwaccel_device) {
1428 ds->dec_opts.hwaccel_device = av_strdup(hwaccel_device);
1429 if (!ds->dec_opts.hwaccel_device)
1430 return AVERROR(ENOMEM);
1431 }
1432 }
1433
1434 ret = choose_decoder(o, ist, ic, st, ds->dec_opts.hwaccel_id,
1435 ds->dec_opts.hwaccel_device_type, &ist->dec);
1436 if (ret < 0)
1437 return ret;
1438
1439 if (ist->dec) {
1440 ret = filter_codec_opts(o->g->codec_opts, ist->st->codecpar->codec_id,
1441 ic, st, ist->dec, &ds->decoder_opts, opts_used);
1442 if (ret < 0)
1443 return ret;
1444 }
1445
1446 ds->reinit_filters = -1;
1447 opt_match_per_stream_int(ist, &o->reinit_filters, ic, st, &ds->reinit_filters);
1448
1449 ds->drop_changed = 0;
1450 opt_match_per_stream_int(ist, &o->drop_changed, ic, st, &ds->drop_changed);
1451
1452 if (ds->drop_changed && ds->reinit_filters) {
1453 if (ds->reinit_filters > 0) {
1454 av_log(ist, AV_LOG_ERROR, "drop_changed and reinit_filters both enabled. These are mutually exclusive.\n");
1455 return AVERROR(EINVAL);
1456 }
1457 ds->reinit_filters = 0;
1458 }
1459
1460 ist->user_set_discard = AVDISCARD_NONE;
1461
1462 if ((o->video_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) ||
1463 (o->audio_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ||
1464 (o->subtitle_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) ||
1465 (o->data_disable && ist->st->codecpar->codec_type == AVMEDIA_TYPE_DATA))
1466 ist->user_set_discard = AVDISCARD_ALL;
1467
1468 opt_match_per_stream_str(ist, &o->discard, ic, st, &discard_str);
1469 if (discard_str) {
1470 ret = av_opt_set(ist->st, "discard", discard_str, 0);
1471 if (ret < 0) {
1472 av_log(ist, AV_LOG_ERROR, "Error parsing discard %s.\n", discard_str);
1473 return ret;
1474 }
1475 ist->user_set_discard = ist->st->discard;
1476 }
1477
1478 ds->dec_opts.flags |= DECODER_FLAG_BITEXACT * !!o->bitexact;
1479
1480 av_dict_set_int(&ds->decoder_opts, "apply_cropping",
1481 ds->apply_cropping && ds->apply_cropping != CROP_CONTAINER, 0);
1482
1483 if (ds->force_display_matrix) {
1484 char buf[32];
1485 if (av_dict_get(ds->decoder_opts, "side_data_prefer_packet", NULL, 0))
1486 buf[0] = ',';
1487 else
1488 buf[0] = '\0';
1489 av_strlcat(buf, "displaymatrix", sizeof(buf));
1490 av_dict_set(&ds->decoder_opts, "side_data_prefer_packet", buf, AV_DICT_APPEND);
1491 }
1492 /* Attached pics are sparse, therefore we would not want to delay their decoding
1493 * till EOF. */
1494 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
1495 av_dict_set(&ds->decoder_opts, "thread_type", "-frame", 0);
1496
1497 switch (par->codec_type) {
1498 case AVMEDIA_TYPE_VIDEO:
1499 opt_match_per_stream_str(ist, &o->frame_rates, ic, st, &framerate);
1500 if (framerate) {
1501 ret = av_parse_video_rate(&ist->framerate, framerate);
1502 if (ret < 0) {
1503 av_log(ist, AV_LOG_ERROR, "Error parsing framerate %s.\n",
1504 framerate);
1505 return ret;
1506 }
1507 }
1508
1509 #if FFMPEG_OPT_TOP
1510 ist->top_field_first = -1;
1511 opt_match_per_stream_int(ist, &o->top_field_first, ic, st, &ist->top_field_first);
1512 #endif
1513
1514 break;
1515 case AVMEDIA_TYPE_AUDIO: {
1516 const char *ch_layout_str = NULL;
1517
1518 opt_match_per_stream_str(ist, &o->audio_ch_layouts, ic, st, &ch_layout_str);
1519 if (ch_layout_str) {
1520 AVChannelLayout ch_layout;
1521 ret = av_channel_layout_from_string(&ch_layout, ch_layout_str);
1522 if (ret < 0) {
1523 av_log(ist, AV_LOG_ERROR, "Error parsing channel layout %s.\n", ch_layout_str);
1524 return ret;
1525 }
1526 if (par->ch_layout.nb_channels <= 0 || par->ch_layout.nb_channels == ch_layout.nb_channels) {
1527 av_channel_layout_uninit(&par->ch_layout);
1528 par->ch_layout = ch_layout;
1529 } else {
1530 av_log(ist, AV_LOG_ERROR,
1531 "Specified channel layout '%s' has %d channels, but input has %d channels.\n",
1532 ch_layout_str, ch_layout.nb_channels, par->ch_layout.nb_channels);
1533 av_channel_layout_uninit(&ch_layout);
1534 return AVERROR(EINVAL);
1535 }
1536 } else {
1537 int guess_layout_max = INT_MAX;
1538 opt_match_per_stream_int(ist, &o->guess_layout_max, ic, st, &guess_layout_max);
1539 guess_input_channel_layout(ist, par, guess_layout_max);
1540 }
1541 break;
1542 }
1543 case AVMEDIA_TYPE_DATA:
1544 case AVMEDIA_TYPE_SUBTITLE: {
1545 const char *canvas_size = NULL;
1546
1547 opt_match_per_stream_int(ist, &o->fix_sub_duration, ic, st, &ist->fix_sub_duration);
1548 opt_match_per_stream_str(ist, &o->canvas_sizes, ic, st, &canvas_size);
1549 if (canvas_size) {
1550 ret = av_parse_video_size(&par->width, &par->height,
1551 canvas_size);
1552 if (ret < 0) {
1553 av_log(ist, AV_LOG_FATAL, "Invalid canvas size: %s.\n", canvas_size);
1554 return ret;
1555 }
1556 }
1557 break;
1558 }
1559 case AVMEDIA_TYPE_ATTACHMENT:
1560 case AVMEDIA_TYPE_UNKNOWN:
1561 break;
1562 default: av_assert0(0);
1563 }
1564
1565 ist->par = avcodec_parameters_alloc();
1566 if (!ist->par)
1567 return AVERROR(ENOMEM);
1568
1569 ret = avcodec_parameters_copy(ist->par, par);
1570 if (ret < 0) {
1571 av_log(ist, AV_LOG_ERROR, "Error exporting stream parameters.\n");
1572 return ret;
1573 }
1574
1575 if (ist->st->sample_aspect_ratio.num)
1576 ist->par->sample_aspect_ratio = ist->st->sample_aspect_ratio;
1577
1578 opt_match_per_stream_str(ist, &o->bitstream_filters, ic, st, &bsfs);
1579 if (bsfs) {
1580 ret = av_bsf_list_parse_str(bsfs, &ds->bsf);
1581 if (ret < 0) {
1582 av_log(ist, AV_LOG_ERROR,
1583 "Error parsing bitstream filter sequence '%s': %s\n",
1584 bsfs, av_err2str(ret));
1585 return ret;
1586 }
1587
1588 ret = avcodec_parameters_copy(ds->bsf->par_in, ist->par);
1589 if (ret < 0)
1590 return ret;
1591 ds->bsf->time_base_in = ist->st->time_base;
1592
1593 ret = av_bsf_init(ds->bsf);
1594 if (ret < 0) {
1595 av_log(ist, AV_LOG_ERROR, "Error initializing bitstream filters: %s\n",
1596 av_err2str(ret));
1597 return ret;
1598 }
1599
1600 ret = avcodec_parameters_copy(ist->par, ds->bsf->par_out);
1601 if (ret < 0)
1602 return ret;
1603 }
1604
1605 ds->codec_desc = avcodec_descriptor_get(ist->par->codec_id);
1606
1607 return 0;
1608 }
1609
1610 static const char *input_stream_group_item_name(void *obj)
1611 {
1612 const DemuxStreamGroup *dsg = obj;
1613
1614 return dsg->log_name;
1615 }
1616
1617 static const AVClass input_stream_group_class = {
1618 .class_name = "InputStreamGroup",
1619 .version = LIBAVUTIL_VERSION_INT,
1620 .item_name = input_stream_group_item_name,
1621 .category = AV_CLASS_CATEGORY_DEMUXER,
1622 };
1623
1624 static DemuxStreamGroup *demux_stream_group_alloc(Demuxer *d, AVStreamGroup *stg)
1625 {
1626 InputFile *f = &d->f;
1627 DemuxStreamGroup *dsg;
1628
1629 dsg = allocate_array_elem(&f->stream_groups, sizeof(*dsg), &f->nb_stream_groups);
1630 if (!dsg)
1631 return NULL;
1632
1633 dsg->istg.stg = stg;
1634 dsg->istg.file = f;
1635 dsg->istg.index = stg->index;
1636 dsg->istg.class = &input_stream_group_class;
1637
1638 snprintf(dsg->log_name, sizeof(dsg->log_name), "istg#%d:%d/%s",
1639 d->f.index, stg->index, avformat_stream_group_name(stg->type));
1640
1641 return dsg;
1642 }
1643
1644 static int istg_parse_tile_grid(const OptionsContext *o, Demuxer *d, InputStreamGroup *istg)
1645 {
1646 InputFile *f = &d->f;
1647 AVFormatContext *ic = d->f.ctx;
1648 AVStreamGroup *stg = istg->stg;
1649 const AVStreamGroupTileGrid *tg = stg->params.tile_grid;
1650 OutputFilterOptions opts;
1651 AVBPrint bp;
1652 char *graph_str;
1653 int autorotate = 1;
1654 const char *apply_cropping = NULL;
1655 int ret;
1656
1657 if (tg->nb_tiles == 1)
1658 return 0;
1659
1660 memset(&opts, 0, sizeof(opts));
1661
1662 opt_match_per_stream_group_int(istg, &o->autorotate, ic, stg, &autorotate);
1663 if (autorotate)
1664 opts.flags |= OFILTER_FLAG_AUTOROTATE;
1665
1666 opts.flags |= OFILTER_FLAG_CROP;
1667 opt_match_per_stream_group_str(istg, &o->apply_cropping, ic, stg, &apply_cropping);
1668 if (apply_cropping) {
1669 char *p;
1670 int crop = strtol(apply_cropping, &p, 0);
1671 if (*p)
1672 return AVERROR(EINVAL);
1673 if (!crop)
1674 opts.flags &= ~OFILTER_FLAG_CROP;
1675 }
1676
1677 av_bprint_init(&bp, 0, AV_BPRINT_SIZE_UNLIMITED);
1678 for (int i = 0; i < tg->nb_tiles; i++)
1679 av_bprintf(&bp, "[%d:g:%d:%d]", f->index, stg->index, tg->offsets[i].idx);
1680 av_bprintf(&bp, "xstack=inputs=%d:layout=", tg->nb_tiles);
1681 for (int i = 0; i < tg->nb_tiles - 1; i++)
1682 av_bprintf(&bp, "%d_%d|", tg->offsets[i].horizontal,
1683 tg->offsets[i].vertical);
1684 av_bprintf(&bp, "%d_%d:fill=0x%02X%02X%02X@0x%02X", tg->offsets[tg->nb_tiles - 1].horizontal,
1685 tg->offsets[tg->nb_tiles - 1].vertical,
1686 tg->background[0], tg->background[1],
1687 tg->background[2], tg->background[3]);
1688 av_bprintf(&bp, "[%d:g:%d]", f->index, stg->index);
1689 ret = av_bprint_finalize(&bp, &graph_str);
1690 if (ret < 0)
1691 return ret;
1692
1693 if (tg->coded_width != tg->width || tg->coded_height != tg->height) {
1694 opts.crop_top = tg->vertical_offset;
1695 opts.crop_bottom = tg->coded_height - tg->height - tg->vertical_offset;
1696 opts.crop_left = tg->horizontal_offset;
1697 opts.crop_right = tg->coded_width - tg->width - tg->horizontal_offset;
1698 }
1699
1700 for (int i = 0; i < tg->nb_coded_side_data; i++) {
1701 const AVPacketSideData *sd = &tg->coded_side_data[i];
1702
1703 ret = av_packet_side_data_to_frame(&opts.side_data, &opts.nb_side_data, sd, 0);
1704 if (ret < 0 && ret != AVERROR(EINVAL))
1705 goto fail;
1706 }
1707
1708 ret = fg_create(NULL, &graph_str, d->sch, &opts);
1709 if (ret < 0)
1710 goto fail;
1711
1712 istg->fg = filtergraphs[nb_filtergraphs-1];
1713 istg->fg->is_internal = 1;
1714
1715 ret = 0;
1716 fail:
1717 if (ret < 0)
1718 av_freep(&graph_str);
1719
1720 return ret;
1721 }
1722
1723 static int istg_add(const OptionsContext *o, Demuxer *d, AVStreamGroup *stg)
1724 {
1725 DemuxStreamGroup *dsg;
1726 InputStreamGroup *istg;
1727 int ret;
1728
1729 dsg = demux_stream_group_alloc(d, stg);
1730 if (!dsg)
1731 return AVERROR(ENOMEM);
1732
1733 istg = &dsg->istg;
1734
1735 switch (stg->type) {
1736 case AV_STREAM_GROUP_PARAMS_TILE_GRID:
1737 ret = istg_parse_tile_grid(o, d, istg);
1738 if (ret < 0)
1739 return ret;
1740 break;
1741 default:
1742 break;
1743 }
1744
1745 return 0;
1746 }
1747
1748 static int dump_attachment(InputStream *ist, const char *filename)
1749 {
1750 AVStream *st = ist->st;
1751 int ret;
1752 AVIOContext *out = NULL;
1753 const AVDictionaryEntry *e;
1754
1755 if (!st->codecpar->extradata_size) {
1756 av_log(ist, AV_LOG_WARNING, "No extradata to dump.\n");
1757 return 0;
1758 }
1759 if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
1760 filename = e->value;
1761 if (!*filename) {
1762 av_log(ist, AV_LOG_FATAL, "No filename specified and no 'filename' tag");
1763 return AVERROR(EINVAL);
1764 }
1765
1766 ret = assert_file_overwrite(filename);
1767 if (ret < 0)
1768 return ret;
1769
1770 if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
1771 av_log(ist, AV_LOG_FATAL, "Could not open file %s for writing.\n",
1772 filename);
1773 return ret;
1774 }
1775
1776 avio_write(out, st->codecpar->extradata, st->codecpar->extradata_size);
1777 ret = avio_close(out);
1778
1779 if (ret >= 0)
1780 av_log(ist, AV_LOG_INFO, "Wrote attachment (%d bytes) to '%s'\n",
1781 st->codecpar->extradata_size, filename);
1782
1783 return ret;
1784 }
1785
1786 static const char *input_file_item_name(void *obj)
1787 {
1788 const Demuxer *d = obj;
1789
1790 return d->log_name;
1791 }
1792
1793 static const AVClass input_file_class = {
1794 .class_name = "InputFile",
1795 .version = LIBAVUTIL_VERSION_INT,
1796 .item_name = input_file_item_name,
1797 .category = AV_CLASS_CATEGORY_DEMUXER,
1798 };
1799
1800 static Demuxer *demux_alloc(void)
1801 {
1802 Demuxer *d = allocate_array_elem(&input_files, sizeof(*d), &nb_input_files);
1803
1804 if (!d)
1805 return NULL;
1806
1807 d->f.class = &input_file_class;
1808 d->f.index = nb_input_files - 1;
1809
1810 snprintf(d->log_name, sizeof(d->log_name), "in#%d", d->f.index);
1811
1812 return d;
1813 }
1814
1815 int ifile_open(const OptionsContext *o, const char *filename, Scheduler *sch)
1816 {
1817 Demuxer *d;
1818 InputFile *f;
1819 AVFormatContext *ic;
1820 const AVInputFormat *file_iformat = NULL;
1821 int err, ret = 0;
1822 int64_t timestamp;
1823 AVDictionary *opts_used = NULL;
1824 const char* video_codec_name = NULL;
1825 const char* audio_codec_name = NULL;
1826 const char* subtitle_codec_name = NULL;
1827 const char* data_codec_name = NULL;
1828 int scan_all_pmts_set = 0;
1829
1830 int64_t start_time = o->start_time;
1831 int64_t start_time_eof = o->start_time_eof;
1832 int64_t stop_time = o->stop_time;
1833 int64_t recording_time = o->recording_time;
1834
1835 d = demux_alloc();
1836 if (!d)
1837 return AVERROR(ENOMEM);
1838
1839 f = &d->f;
1840
1841 ret = sch_add_demux(sch, input_thread, d);
1842 if (ret < 0)
1843 return ret;
1844 d->sch = sch;
1845
1846 if (stop_time != INT64_MAX && recording_time != INT64_MAX) {
1847 stop_time = INT64_MAX;
1848 av_log(d, AV_LOG_WARNING, "-t and -to cannot be used together; using -t.\n");
1849 }
1850
1851 if (stop_time != INT64_MAX && recording_time == INT64_MAX) {
1852 int64_t start = start_time == AV_NOPTS_VALUE ? 0 : start_time;
1853 if (stop_time <= start) {
1854 av_log(d, AV_LOG_ERROR, "-to value smaller than -ss; aborting.\n");
1855 return AVERROR(EINVAL);
1856 } else {
1857 recording_time = stop_time - start;
1858 }
1859 }
1860
1861 if (o->format) {
1862 if (!(file_iformat = av_find_input_format(o->format))) {
1863 av_log(d, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
1864 return AVERROR(EINVAL);
1865 }
1866 }
1867
1868 if (!strcmp(filename, "-"))
1869 filename = "fd:";
1870
1871 stdin_interaction &= strncmp(filename, "pipe:", 5) &&
1872 strcmp(filename, "fd:") &&
1873 strcmp(filename, "/dev/stdin");
1874
1875 /* get default parameters from command line */
1876 ic = avformat_alloc_context();
1877 if (!ic)
1878 return AVERROR(ENOMEM);
1879 if (o->audio_sample_rate.nb_opt) {
1880 av_dict_set_int(&o->g->format_opts, "sample_rate", o->audio_sample_rate.opt[o->audio_sample_rate.nb_opt - 1].u.i, 0);
1881 }
1882 if (o->audio_channels.nb_opt) {
1883 const AVClass *priv_class;
1884 if (file_iformat && (priv_class = file_iformat->priv_class) &&
1885 av_opt_find(&priv_class, "ch_layout", NULL, 0,
1886 AV_OPT_SEARCH_FAKE_OBJ)) {
1887 char buf[32];
1888 snprintf(buf, sizeof(buf), "%dC", o->audio_channels.opt[o->audio_channels.nb_opt - 1].u.i);
1889 av_dict_set(&o->g->format_opts, "ch_layout", buf, 0);
1890 }
1891 }
1892 if (o->audio_ch_layouts.nb_opt) {
1893 const AVClass *priv_class;
1894 if (file_iformat && (priv_class = file_iformat->priv_class) &&
1895 av_opt_find(&priv_class, "ch_layout", NULL, 0,
1896 AV_OPT_SEARCH_FAKE_OBJ)) {
1897 av_dict_set(&o->g->format_opts, "ch_layout", o->audio_ch_layouts.opt[o->audio_ch_layouts.nb_opt - 1].u.str, 0);
1898 }
1899 }
1900 if (o->frame_rates.nb_opt) {
1901 const AVClass *priv_class;
1902 /* set the format-level framerate option;
1903 * this is important for video grabbers, e.g. x11 */
1904 if (file_iformat && (priv_class = file_iformat->priv_class) &&
1905 av_opt_find(&priv_class, "framerate", NULL, 0,
1906 AV_OPT_SEARCH_FAKE_OBJ)) {
1907 av_dict_set(&o->g->format_opts, "framerate",
1908 o->frame_rates.opt[o->frame_rates.nb_opt - 1].u.str, 0);
1909 }
1910 }
1911 if (o->frame_sizes.nb_opt) {
1912 av_dict_set(&o->g->format_opts, "video_size", o->frame_sizes.opt[o->frame_sizes.nb_opt - 1].u.str, 0);
1913 }
1914 if (o->frame_pix_fmts.nb_opt)
1915 av_dict_set(&o->g->format_opts, "pixel_format", o->frame_pix_fmts.opt[o->frame_pix_fmts.nb_opt - 1].u.str, 0);
1916
1917 video_codec_name = opt_match_per_type_str(&o->codec_names, 'v');
1918 audio_codec_name = opt_match_per_type_str(&o->codec_names, 'a');
1919 subtitle_codec_name = opt_match_per_type_str(&o->codec_names, 's');
1920 data_codec_name = opt_match_per_type_str(&o->codec_names, 'd');
1921
1922 if (video_codec_name)
1923 ret = err_merge(ret, find_codec(NULL, video_codec_name , AVMEDIA_TYPE_VIDEO , 0,
1924 &ic->video_codec));
1925 if (audio_codec_name)
1926 ret = err_merge(ret, find_codec(NULL, audio_codec_name , AVMEDIA_TYPE_AUDIO , 0,
1927 &ic->audio_codec));
1928 if (subtitle_codec_name)
1929 ret = err_merge(ret, find_codec(NULL, subtitle_codec_name, AVMEDIA_TYPE_SUBTITLE, 0,
1930 &ic->subtitle_codec));
1931 if (data_codec_name)
1932 ret = err_merge(ret, find_codec(NULL, data_codec_name , AVMEDIA_TYPE_DATA, 0,
1933 &ic->data_codec));
1934 if (ret < 0) {
1935 avformat_free_context(ic);
1936 return ret;
1937 }
1938
1939 ic->video_codec_id = video_codec_name ? ic->video_codec->id : AV_CODEC_ID_NONE;
1940 ic->audio_codec_id = audio_codec_name ? ic->audio_codec->id : AV_CODEC_ID_NONE;
1941 ic->subtitle_codec_id = subtitle_codec_name ? ic->subtitle_codec->id : AV_CODEC_ID_NONE;
1942 ic->data_codec_id = data_codec_name ? ic->data_codec->id : AV_CODEC_ID_NONE;
1943
1944 ic->flags |= AVFMT_FLAG_NONBLOCK;
1945 if (o->bitexact)
1946 ic->flags |= AVFMT_FLAG_BITEXACT;
1947 ic->interrupt_callback = int_cb;
1948
1949 if (!av_dict_get(o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
1950 av_dict_set(&o->g->format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
1951 scan_all_pmts_set = 1;
1952 }
1953 /* open the input file with generic avformat function */
1954 err = avformat_open_input(&ic, filename, file_iformat, &o->g->format_opts);
1955 if (err < 0) {
1956 if (err != AVERROR_EXIT)
1957 av_log(d, AV_LOG_ERROR,
1958 "Error opening input: %s\n", av_err2str(err));
1959 if (err == AVERROR_PROTOCOL_NOT_FOUND)
1960 av_log(d, AV_LOG_ERROR, "Did you mean file:%s?\n", filename);
1961 return err;
1962 }
1963 f->ctx = ic;
1964
1965 av_strlcat(d->log_name, "/", sizeof(d->log_name));
1966 av_strlcat(d->log_name, ic->iformat->name, sizeof(d->log_name));
1967
1968 if (scan_all_pmts_set)
1969 av_dict_set(&o->g->format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
1970 remove_avoptions(&o->g->format_opts, o->g->codec_opts);
1971
1972 ret = check_avoptions(o->g->format_opts);
1973 if (ret < 0)
1974 return ret;
1975
1976 /* apply forced codec ids */
1977 for (int i = 0; i < ic->nb_streams; i++) {
1978 const AVCodec *dummy;
1979 ret = choose_decoder(o, f, ic, ic->streams[i], HWACCEL_NONE, AV_HWDEVICE_TYPE_NONE,
1980 &dummy);
1981 if (ret < 0)
1982 return ret;
1983 }
1984
1985 if (o->find_stream_info) {
1986 AVDictionary **opts;
1987 int orig_nb_streams = ic->nb_streams;
1988
1989 ret = setup_find_stream_info_opts(ic, o->g->codec_opts, &opts);
1990 if (ret < 0)
1991 return ret;
1992
1993 /* If not enough info to get the stream parameters, we decode the
1994 first frames to get it. (used in mpeg case for example) */
1995 ret = avformat_find_stream_info(ic, opts);
1996
1997 for (int i = 0; i < orig_nb_streams; i++)
1998 av_dict_free(&opts[i]);
1999 av_freep(&opts);
2000
2001 if (ret < 0) {
2002 av_log(d, AV_LOG_FATAL, "could not find codec parameters\n");
2003 if (ic->nb_streams == 0)
2004 return ret;
2005 }
2006 }
2007
2008 if (start_time != AV_NOPTS_VALUE && start_time_eof != AV_NOPTS_VALUE) {
2009 av_log(d, AV_LOG_WARNING, "Cannot use -ss and -sseof both, using -ss\n");
2010 start_time_eof = AV_NOPTS_VALUE;
2011 }
2012
2013 if (start_time_eof != AV_NOPTS_VALUE) {
2014 if (start_time_eof >= 0) {
2015 av_log(d, AV_LOG_ERROR, "-sseof value must be negative; aborting\n");
2016 return AVERROR(EINVAL);
2017 }
2018 if (ic->duration > 0) {
2019 start_time = start_time_eof + ic->duration;
2020 if (start_time < 0) {
2021 av_log(d, AV_LOG_WARNING, "-sseof value seeks to before start of file; ignored\n");
2022 start_time = AV_NOPTS_VALUE;
2023 }
2024 } else
2025 av_log(d, AV_LOG_WARNING, "Cannot use -sseof, file duration not known\n");
2026 }
2027 timestamp = (start_time == AV_NOPTS_VALUE) ? 0 : start_time;
2028 /* add the stream start time */
2029 if (!o->seek_timestamp && ic->start_time != AV_NOPTS_VALUE)
2030 timestamp += ic->start_time;
2031
2032 /* if seeking requested, we execute it */
2033 if (start_time != AV_NOPTS_VALUE) {
2034 int64_t seek_timestamp = timestamp;
2035
2036 if (!(ic->iformat->flags & AVFMT_SEEK_TO_PTS)) {
2037 int dts_heuristic = 0;
2038 for (int i = 0; i < ic->nb_streams; i++) {
2039 const AVCodecParameters *par = ic->streams[i]->codecpar;
2040 if (par->video_delay) {
2041 dts_heuristic = 1;
2042 break;
2043 }
2044 }
2045 if (dts_heuristic) {
2046 seek_timestamp -= 3*AV_TIME_BASE / 23;
2047 }
2048 }
2049 ret = avformat_seek_file(ic, -1, INT64_MIN, seek_timestamp, seek_timestamp, 0);
2050 if (ret < 0) {
2051 av_log(d, AV_LOG_WARNING, "could not seek to position %0.3f\n",
2052 (double)timestamp / AV_TIME_BASE);
2053 }
2054 }
2055
2056 f->start_time = start_time;
2057 d->recording_time = recording_time;
2058 f->input_sync_ref = o->input_sync_ref;
2059 f->input_ts_offset = o->input_ts_offset;
2060 f->ts_offset = o->input_ts_offset - (copy_ts ? (start_at_zero && ic->start_time != AV_NOPTS_VALUE ? ic->start_time : 0) : timestamp);
2061 d->accurate_seek = o->accurate_seek;
2062 d->loop = o->loop;
2063 d->nb_streams_warn = ic->nb_streams;
2064
2065 d->duration = (Timestamp){ .ts = 0, .tb = (AVRational){ 1, 1 } };
2066 d->min_pts = (Timestamp){ .ts = AV_NOPTS_VALUE, .tb = (AVRational){ 1, 1 } };
2067 d->max_pts = (Timestamp){ .ts = AV_NOPTS_VALUE, .tb = (AVRational){ 1, 1 } };
2068
2069 d->readrate = o->readrate ? o->readrate : 0.0;
2070 if (d->readrate < 0.0f) {
2071 av_log(d, AV_LOG_ERROR, "Option -readrate is %0.3f; it must be non-negative.\n", d->readrate);
2072 return AVERROR(EINVAL);
2073 }
2074 if (o->rate_emu) {
2075 if (d->readrate) {
2076 av_log(d, AV_LOG_WARNING, "Both -readrate and -re set. Using -readrate %0.3f.\n", d->readrate);
2077 } else
2078 d->readrate = 1.0f;
2079 }
2080
2081 if (d->readrate) {
2082 d->readrate_initial_burst = o->readrate_initial_burst ? o->readrate_initial_burst : 0.5;
2083 if (d->readrate_initial_burst < 0.0) {
2084 av_log(d, AV_LOG_ERROR,
2085 "Option -readrate_initial_burst is %0.3f; it must be non-negative.\n",
2086 d->readrate_initial_burst);
2087 return AVERROR(EINVAL);
2088 }
2089 d->readrate_catchup = o->readrate_catchup ? o->readrate_catchup : d->readrate * 1.05;
2090 if (d->readrate_catchup < d->readrate) {
2091 av_log(d, AV_LOG_ERROR,
2092 "Option -readrate_catchup is %0.3f; it must be at least equal to %0.3f.\n",
2093 d->readrate_catchup, d->readrate);
2094 return AVERROR(EINVAL);
2095 }
2096 } else {
2097 if (o->readrate_initial_burst) {
2098 av_log(d, AV_LOG_WARNING, "Option -readrate_initial_burst ignored "
2099 "since neither -readrate nor -re were given\n");
2100 }
2101 if (o->readrate_catchup) {
2102 av_log(d, AV_LOG_WARNING, "Option -readrate_catchup ignored "
2103 "since neither -readrate nor -re were given\n");
2104 }
2105 }
2106
2107 /* Add all the streams from the given input file to the demuxer */
2108 for (int i = 0; i < ic->nb_streams; i++) {
2109 ret = ist_add(o, d, ic->streams[i], &opts_used);
2110 if (ret < 0) {
2111 av_dict_free(&opts_used);
2112 return ret;
2113 }
2114 }
2115
2116 /* Add all the stream groups from the given input file to the demuxer */
2117 for (int i = 0; i < ic->nb_stream_groups; i++) {
2118 ret = istg_add(o, d, ic->stream_groups[i]);
2119 if (ret < 0)
2120 return ret;
2121 }
2122
2123 /* dump the file content */
2124 av_dump_format(ic, f->index, filename, 0);
2125
2126 /* check if all codec options have been used */
2127 ret = check_avoptions_used(o->g->codec_opts, opts_used, d, 1);
2128 av_dict_free(&opts_used);
2129 if (ret < 0)
2130 return ret;
2131
2132 for (int i = 0; i < o->dump_attachment.nb_opt; i++) {
2133 for (int j = 0; j < f->nb_streams; j++) {
2134 InputStream *ist = f->streams[j];
2135
2136 if (check_stream_specifier(ic, ist->st, o->dump_attachment.opt[i].specifier) == 1) {
2137 ret = dump_attachment(ist, o->dump_attachment.opt[i].u.str);
2138 if (ret < 0)
2139 return ret;
2140 }
2141 }
2142 }
2143
2144 return 0;
2145 }