FFmpeg  2.6.3
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdint.h>
34 
35 #if HAVE_ISATTY
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 #endif
43 
44 #include "libavformat/avformat.h"
45 #include "libavdevice/avdevice.h"
47 #include "libavutil/opt.h"
49 #include "libavutil/parseutils.h"
50 #include "libavutil/samplefmt.h"
51 #include "libavutil/fifo.h"
52 #include "libavutil/intreadwrite.h"
53 #include "libavutil/dict.h"
54 #include "libavutil/mathematics.h"
55 #include "libavutil/pixdesc.h"
56 #include "libavutil/avstring.h"
57 #include "libavutil/libm.h"
58 #include "libavutil/imgutils.h"
59 #include "libavutil/timestamp.h"
60 #include "libavutil/bprint.h"
61 #include "libavutil/time.h"
63 #include "libavformat/os_support.h"
64 
65 # include "libavfilter/avcodec.h"
66 # include "libavfilter/avfilter.h"
67 # include "libavfilter/buffersrc.h"
68 # include "libavfilter/buffersink.h"
69 
70 #if HAVE_SYS_RESOURCE_H
71 #include <sys/time.h>
72 #include <sys/types.h>
73 #include <sys/resource.h>
74 #elif HAVE_GETPROCESSTIMES
75 #include <windows.h>
76 #endif
77 #if HAVE_GETPROCESSMEMORYINFO
78 #include <windows.h>
79 #include <psapi.h>
80 #endif
81 
82 #if HAVE_SYS_SELECT_H
83 #include <sys/select.h>
84 #endif
85 
86 #if HAVE_TERMIOS_H
87 #include <fcntl.h>
88 #include <sys/ioctl.h>
89 #include <sys/time.h>
90 #include <termios.h>
91 #elif HAVE_KBHIT
92 #include <conio.h>
93 #endif
94 
95 #if HAVE_PTHREADS
96 #include <pthread.h>
97 #endif
98 
99 #include <time.h>
100 
101 #include "ffmpeg.h"
102 #include "cmdutils.h"
103 
104 #include "libavutil/avassert.h"
105 
106 const char program_name[] = "ffmpeg";
107 const int program_birth_year = 2000;
108 
109 static FILE *vstats_file;
110 
111 const char *const forced_keyframes_const_names[] = {
112  "n",
113  "n_forced",
114  "prev_forced_n",
115  "prev_forced_t",
116  "t",
117  NULL
118 };
119 
120 static void do_video_stats(OutputStream *ost, int frame_size);
121 static int64_t getutime(void);
122 static int64_t getmaxrss(void);
123 
124 static int run_as_daemon = 0;
125 static int nb_frames_dup = 0;
126 static int nb_frames_drop = 0;
127 static int64_t decode_error_stat[2];
128 
129 static int current_time;
131 
133 
134 #define DEFAULT_PASS_LOGFILENAME_PREFIX "ffmpeg2pass"
135 
140 
145 
148 
149 #if HAVE_TERMIOS_H
150 
151 /* init terminal so that we can grab keys */
152 static struct termios oldtty;
153 static int restore_tty;
154 #endif
155 
156 #if HAVE_PTHREADS
157 static void free_input_threads(void);
158 #endif
159 
160 /* sub2video hack:
161  Convert subtitles to video with alpha to insert them in filter graphs.
162  This is a temporary solution until libavfilter gets real subtitles support.
163  */
164 
166 {
167  int ret;
168  AVFrame *frame = ist->sub2video.frame;
169 
170  av_frame_unref(frame);
171  ist->sub2video.frame->width = ist->sub2video.w;
172  ist->sub2video.frame->height = ist->sub2video.h;
174  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
175  return ret;
176  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
177  return 0;
178 }
179 
180 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
181  AVSubtitleRect *r)
182 {
183  uint32_t *pal, *dst2;
184  uint8_t *src, *src2;
185  int x, y;
186 
187  if (r->type != SUBTITLE_BITMAP) {
188  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
189  return;
190  }
191  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
192  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle overflowing\n");
193  return;
194  }
195 
196  dst += r->y * dst_linesize + r->x * 4;
197  src = r->pict.data[0];
198  pal = (uint32_t *)r->pict.data[1];
199  for (y = 0; y < r->h; y++) {
200  dst2 = (uint32_t *)dst;
201  src2 = src;
202  for (x = 0; x < r->w; x++)
203  *(dst2++) = pal[*(src2++)];
204  dst += dst_linesize;
205  src += r->pict.linesize[0];
206  }
207 }
208 
209 static void sub2video_push_ref(InputStream *ist, int64_t pts)
210 {
211  AVFrame *frame = ist->sub2video.frame;
212  int i;
213 
214  av_assert1(frame->data[0]);
215  ist->sub2video.last_pts = frame->pts = pts;
216  for (i = 0; i < ist->nb_filters; i++)
220 }
221 
222 static void sub2video_update(InputStream *ist, AVSubtitle *sub)
223 {
224  int w = ist->sub2video.w, h = ist->sub2video.h;
225  AVFrame *frame = ist->sub2video.frame;
226  int8_t *dst;
227  int dst_linesize;
228  int num_rects, i;
229  int64_t pts, end_pts;
230 
231  if (!frame)
232  return;
233  if (sub) {
234  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
235  AV_TIME_BASE_Q, ist->st->time_base);
236  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
237  AV_TIME_BASE_Q, ist->st->time_base);
238  num_rects = sub->num_rects;
239  } else {
240  pts = ist->sub2video.end_pts;
241  end_pts = INT64_MAX;
242  num_rects = 0;
243  }
244  if (sub2video_get_blank_frame(ist) < 0) {
246  "Impossible to get a blank canvas.\n");
247  return;
248  }
249  dst = frame->data [0];
250  dst_linesize = frame->linesize[0];
251  for (i = 0; i < num_rects; i++)
252  sub2video_copy_rect(dst, dst_linesize, w, h, sub->rects[i]);
253  sub2video_push_ref(ist, pts);
254  ist->sub2video.end_pts = end_pts;
255 }
256 
257 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
258 {
259  InputFile *infile = input_files[ist->file_index];
260  int i, j, nb_reqs;
261  int64_t pts2;
262 
263  /* When a frame is read from a file, examine all sub2video streams in
264  the same file and send the sub2video frame again. Otherwise, decoded
265  video frames could be accumulating in the filter graph while a filter
266  (possibly overlay) is desperately waiting for a subtitle frame. */
267  for (i = 0; i < infile->nb_streams; i++) {
268  InputStream *ist2 = input_streams[infile->ist_index + i];
269  if (!ist2->sub2video.frame)
270  continue;
271  /* subtitles seem to be usually muxed ahead of other streams;
272  if not, subtracting a larger time here is necessary */
273  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
274  /* do not send the heartbeat frame if the subtitle is already ahead */
275  if (pts2 <= ist2->sub2video.last_pts)
276  continue;
277  if (pts2 >= ist2->sub2video.end_pts || !ist2->sub2video.frame->data[0])
278  sub2video_update(ist2, NULL);
279  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
280  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
281  if (nb_reqs)
282  sub2video_push_ref(ist2, pts2);
283  }
284 }
285 
286 static void sub2video_flush(InputStream *ist)
287 {
288  int i;
289 
290  if (ist->sub2video.end_pts < INT64_MAX)
291  sub2video_update(ist, NULL);
292  for (i = 0; i < ist->nb_filters; i++)
293  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
294 }
295 
296 /* end of sub2video hack */
297 
298 static void term_exit_sigsafe(void)
299 {
300 #if HAVE_TERMIOS_H
301  if(restore_tty)
302  tcsetattr (0, TCSANOW, &oldtty);
303 #endif
304 }
305 
306 void term_exit(void)
307 {
308  av_log(NULL, AV_LOG_QUIET, "%s", "");
310 }
311 
312 static volatile int received_sigterm = 0;
313 static volatile int received_nb_signals = 0;
314 static volatile int transcode_init_done = 0;
315 static int main_return_code = 0;
316 
317 static void
319 {
320  received_sigterm = sig;
323  if(received_nb_signals > 3)
324  exit(123);
325 }
326 
327 void term_init(void)
328 {
329 #if HAVE_TERMIOS_H
330  if(!run_as_daemon){
331  struct termios tty;
332  int istty = 1;
333 #if HAVE_ISATTY
334  istty = isatty(0) && isatty(2);
335 #endif
336  if (istty && tcgetattr (0, &tty) == 0) {
337  oldtty = tty;
338  restore_tty = 1;
339 
340  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
341  |INLCR|IGNCR|ICRNL|IXON);
342  tty.c_oflag |= OPOST;
343  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
344  tty.c_cflag &= ~(CSIZE|PARENB);
345  tty.c_cflag |= CS8;
346  tty.c_cc[VMIN] = 1;
347  tty.c_cc[VTIME] = 0;
348 
349  tcsetattr (0, TCSANOW, &tty);
350  }
351  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
352  }
353 #endif
354 
355  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
356  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
357 #ifdef SIGXCPU
358  signal(SIGXCPU, sigterm_handler);
359 #endif
360 }
361 
362 /* read a key without blocking */
363 static int read_key(void)
364 {
365  unsigned char ch;
366 #if HAVE_TERMIOS_H
367  int n = 1;
368  struct timeval tv;
369  fd_set rfds;
370 
371  FD_ZERO(&rfds);
372  FD_SET(0, &rfds);
373  tv.tv_sec = 0;
374  tv.tv_usec = 0;
375  n = select(1, &rfds, NULL, NULL, &tv);
376  if (n > 0) {
377  n = read(0, &ch, 1);
378  if (n == 1)
379  return ch;
380 
381  return n;
382  }
383 #elif HAVE_KBHIT
384 # if HAVE_PEEKNAMEDPIPE
385  static int is_pipe;
386  static HANDLE input_handle;
387  DWORD dw, nchars;
388  if(!input_handle){
389  input_handle = GetStdHandle(STD_INPUT_HANDLE);
390  is_pipe = !GetConsoleMode(input_handle, &dw);
391  }
392 
393  if (stdin->_cnt > 0) {
394  read(0, &ch, 1);
395  return ch;
396  }
397  if (is_pipe) {
398  /* When running under a GUI, you will end here. */
399  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
400  // input pipe may have been closed by the program that ran ffmpeg
401  return -1;
402  }
403  //Read it
404  if(nchars != 0) {
405  read(0, &ch, 1);
406  return ch;
407  }else{
408  return -1;
409  }
410  }
411 # endif
412  if(kbhit())
413  return(getch());
414 #endif
415  return -1;
416 }
417 
418 static int decode_interrupt_cb(void *ctx)
419 {
421 }
422 
424 
425 static void ffmpeg_cleanup(int ret)
426 {
427  int i, j;
428 
429  if (do_benchmark) {
430  int maxrss = getmaxrss() / 1024;
431  printf("bench: maxrss=%ikB\n", maxrss);
432  }
433 
434  for (i = 0; i < nb_filtergraphs; i++) {
435  FilterGraph *fg = filtergraphs[i];
437  for (j = 0; j < fg->nb_inputs; j++) {
438  av_freep(&fg->inputs[j]->name);
439  av_freep(&fg->inputs[j]);
440  }
441  av_freep(&fg->inputs);
442  for (j = 0; j < fg->nb_outputs; j++) {
443  av_freep(&fg->outputs[j]->name);
444  av_freep(&fg->outputs[j]);
445  }
446  av_freep(&fg->outputs);
447  av_freep(&fg->graph_desc);
448 
449  av_freep(&filtergraphs[i]);
450  }
451  av_freep(&filtergraphs);
452 
454 
455  /* close files */
456  for (i = 0; i < nb_output_files; i++) {
457  OutputFile *of = output_files[i];
458  AVFormatContext *s = of->ctx;
459  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
460  avio_closep(&s->pb);
462  av_dict_free(&of->opts);
463 
464  av_freep(&output_files[i]);
465  }
466  for (i = 0; i < nb_output_streams; i++) {
467  OutputStream *ost = output_streams[i];
469  while (bsfc) {
470  AVBitStreamFilterContext *next = bsfc->next;
472  bsfc = next;
473  }
474  ost->bitstream_filters = NULL;
476  av_frame_free(&ost->last_frame);
477 
478  av_parser_close(ost->parser);
479 
480  av_freep(&ost->forced_keyframes);
482  av_freep(&ost->avfilter);
483  av_freep(&ost->logfile_prefix);
484 
486  ost->audio_channels_mapped = 0;
487 
489 
490  av_freep(&output_streams[i]);
491  }
492 #if HAVE_PTHREADS
494 #endif
495  for (i = 0; i < nb_input_files; i++) {
496  avformat_close_input(&input_files[i]->ctx);
497  av_freep(&input_files[i]);
498  }
499  for (i = 0; i < nb_input_streams; i++) {
500  InputStream *ist = input_streams[i];
501 
504  av_dict_free(&ist->decoder_opts);
507  av_freep(&ist->filters);
508  av_freep(&ist->hwaccel_device);
509 
511 
512  av_freep(&input_streams[i]);
513  }
514 
515  if (vstats_file)
516  fclose(vstats_file);
518 
519  av_freep(&input_streams);
520  av_freep(&input_files);
521  av_freep(&output_streams);
522  av_freep(&output_files);
523 
524  uninit_opts();
525 
527 
528  if (received_sigterm) {
529  av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
530  (int) received_sigterm);
531  } else if (ret && transcode_init_done) {
532  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
533  }
534  term_exit();
535 }
536 
538 {
539  AVDictionaryEntry *t = NULL;
540 
541  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
543  }
544 }
545 
547 {
549  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
550  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
551  exit_program(1);
552  }
553 }
554 
555 static void abort_codec_experimental(AVCodec *c, int encoder)
556 {
557  exit_program(1);
558 }
559 
560 static void update_benchmark(const char *fmt, ...)
561 {
562  if (do_benchmark_all) {
563  int64_t t = getutime();
564  va_list va;
565  char buf[1024];
566 
567  if (fmt) {
568  va_start(va, fmt);
569  vsnprintf(buf, sizeof(buf), fmt, va);
570  va_end(va);
571  printf("bench: %8"PRIu64" %s \n", t - current_time, buf);
572  }
573  current_time = t;
574  }
575 }
576 
577 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
578 {
579  int i;
580  for (i = 0; i < nb_output_streams; i++) {
581  OutputStream *ost2 = output_streams[i];
582  ost2->finished |= ost == ost2 ? this_stream : others;
583  }
584 }
585 
587 {
589  AVCodecContext *avctx = ost->st->codec;
590  int ret;
591 
592  if (!ost->st->codec->extradata_size && ost->enc_ctx->extradata_size) {
594  if (ost->st->codec->extradata) {
595  memcpy(ost->st->codec->extradata, ost->enc_ctx->extradata, ost->enc_ctx->extradata_size);
597  }
598  }
599 
602  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
603 
604  /*
605  * Audio encoders may split the packets -- #frames in != #packets out.
606  * But there is no reordering, so we can limit the number of output packets
607  * by simply dropping them here.
608  * Counting encoded video frames needs to be done separately because of
609  * reordering, see do_video_out()
610  */
611  if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
612  if (ost->frame_number >= ost->max_frames) {
613  av_free_packet(pkt);
614  return;
615  }
616  ost->frame_number++;
617  }
618 
619  if (bsfc)
621 
622  while (bsfc) {
623  AVPacket new_pkt = *pkt;
624  AVDictionaryEntry *bsf_arg = av_dict_get(ost->bsf_args,
625  bsfc->filter->name,
626  NULL, 0);
627  int a = av_bitstream_filter_filter(bsfc, avctx,
628  bsf_arg ? bsf_arg->value : NULL,
629  &new_pkt.data, &new_pkt.size,
630  pkt->data, pkt->size,
631  pkt->flags & AV_PKT_FLAG_KEY);
632  if(a == 0 && new_pkt.data != pkt->data && new_pkt.destruct) {
633  uint8_t *t = av_malloc(new_pkt.size + FF_INPUT_BUFFER_PADDING_SIZE); //the new should be a subset of the old so cannot overflow
634  if(t) {
635  memcpy(t, new_pkt.data, new_pkt.size);
636  memset(t + new_pkt.size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
637  new_pkt.data = t;
638  new_pkt.buf = NULL;
639  a = 1;
640  } else
641  a = AVERROR(ENOMEM);
642  }
643  if (a > 0) {
644  pkt->side_data = NULL;
645  pkt->side_data_elems = 0;
646  av_free_packet(pkt);
647  new_pkt.buf = av_buffer_create(new_pkt.data, new_pkt.size,
648  av_buffer_default_free, NULL, 0);
649  if (!new_pkt.buf)
650  exit_program(1);
651  } else if (a < 0) {
652  av_log(NULL, AV_LOG_ERROR, "Failed to open bitstream filter %s for stream %d with codec %s",
653  bsfc->filter->name, pkt->stream_index,
654  avctx->codec ? avctx->codec->name : "copy");
655  print_error("", a);
656  if (exit_on_error)
657  exit_program(1);
658  }
659  *pkt = new_pkt;
660 
661  bsfc = bsfc->next;
662  }
663 
664  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
665  if (pkt->dts != AV_NOPTS_VALUE &&
666  pkt->pts != AV_NOPTS_VALUE &&
667  pkt->dts > pkt->pts) {
668  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
669  pkt->dts, pkt->pts,
670  ost->file_index, ost->st->index);
671  pkt->pts =
672  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
673  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
674  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
675  }
676  if(
677  (avctx->codec_type == AVMEDIA_TYPE_AUDIO || avctx->codec_type == AVMEDIA_TYPE_VIDEO) &&
678  pkt->dts != AV_NOPTS_VALUE &&
679  ost->last_mux_dts != AV_NOPTS_VALUE) {
680  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
681  if (pkt->dts < max) {
682  int loglevel = max - pkt->dts > 2 || avctx->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
683  av_log(s, loglevel, "Non-monotonous DTS in output stream "
684  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
685  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
686  if (exit_on_error) {
687  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
688  exit_program(1);
689  }
690  av_log(s, loglevel, "changing to %"PRId64". This may result "
691  "in incorrect timestamps in the output file.\n",
692  max);
693  if(pkt->pts >= pkt->dts)
694  pkt->pts = FFMAX(pkt->pts, max);
695  pkt->dts = max;
696  }
697  }
698  }
699  ost->last_mux_dts = pkt->dts;
700 
701  ost->data_size += pkt->size;
702  ost->packets_written++;
703 
704  pkt->stream_index = ost->index;
705 
706  if (debug_ts) {
707  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
708  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
710  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
711  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
712  pkt->size
713  );
714  }
715 
716  ret = av_interleaved_write_frame(s, pkt);
717  if (ret < 0) {
718  print_error("av_interleaved_write_frame()", ret);
719  main_return_code = 1;
721  }
722  av_free_packet(pkt);
723 }
724 
726 {
727  OutputFile *of = output_files[ost->file_index];
728 
729  ost->finished |= ENCODER_FINISHED;
730  if (of->shortest) {
731  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
732  of->recording_time = FFMIN(of->recording_time, end);
733  }
734 }
735 
737 {
738  OutputFile *of = output_files[ost->file_index];
739 
740  if (of->recording_time != INT64_MAX &&
742  AV_TIME_BASE_Q) >= 0) {
743  close_output_stream(ost);
744  return 0;
745  }
746  return 1;
747 }
748 
750  AVFrame *frame)
751 {
752  AVCodecContext *enc = ost->enc_ctx;
753  AVPacket pkt;
754  int got_packet = 0;
755 
756  av_init_packet(&pkt);
757  pkt.data = NULL;
758  pkt.size = 0;
759 
760  if (!check_recording_time(ost))
761  return;
762 
763  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
764  frame->pts = ost->sync_opts;
765  ost->sync_opts = frame->pts + frame->nb_samples;
766  ost->samples_encoded += frame->nb_samples;
767  ost->frames_encoded++;
768 
769  av_assert0(pkt.size || !pkt.data);
771  if (debug_ts) {
772  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
773  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
774  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
775  enc->time_base.num, enc->time_base.den);
776  }
777 
778  if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
779  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed (avcodec_encode_audio2)\n");
780  exit_program(1);
781  }
782  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
783 
784  if (got_packet) {
785  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
786 
787  if (debug_ts) {
788  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
789  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
790  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
791  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
792  }
793 
794  write_frame(s, &pkt, ost);
795  }
796 }
797 
799  OutputStream *ost,
800  InputStream *ist,
801  AVSubtitle *sub)
802 {
803  int subtitle_out_max_size = 1024 * 1024;
804  int subtitle_out_size, nb, i;
805  AVCodecContext *enc;
806  AVPacket pkt;
807  int64_t pts;
808 
809  if (sub->pts == AV_NOPTS_VALUE) {
810  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
811  if (exit_on_error)
812  exit_program(1);
813  return;
814  }
815 
816  enc = ost->enc_ctx;
817 
818  if (!subtitle_out) {
819  subtitle_out = av_malloc(subtitle_out_max_size);
820  if (!subtitle_out) {
821  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
822  exit_program(1);
823  }
824  }
825 
826  /* Note: DVB subtitle need one packet to draw them and one other
827  packet to clear them */
828  /* XXX: signal it in the codec context ? */
830  nb = 2;
831  else
832  nb = 1;
833 
834  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
835  pts = sub->pts;
836  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
837  pts -= output_files[ost->file_index]->start_time;
838  for (i = 0; i < nb; i++) {
839  unsigned save_num_rects = sub->num_rects;
840 
841  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
842  if (!check_recording_time(ost))
843  return;
844 
845  sub->pts = pts;
846  // start_display_time is required to be 0
847  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
849  sub->start_display_time = 0;
850  if (i == 1)
851  sub->num_rects = 0;
852 
853  ost->frames_encoded++;
854 
855  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
856  subtitle_out_max_size, sub);
857  if (i == 1)
858  sub->num_rects = save_num_rects;
859  if (subtitle_out_size < 0) {
860  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
861  exit_program(1);
862  }
863 
864  av_init_packet(&pkt);
865  pkt.data = subtitle_out;
866  pkt.size = subtitle_out_size;
867  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
868  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->st->time_base);
869  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
870  /* XXX: the pts correction is handled here. Maybe handling
871  it in the codec would be better */
872  if (i == 0)
873  pkt.pts += 90 * sub->start_display_time;
874  else
875  pkt.pts += 90 * sub->end_display_time;
876  }
877  pkt.dts = pkt.pts;
878  write_frame(s, &pkt, ost);
879  }
880 }
881 
883  OutputStream *ost,
884  AVFrame *next_picture,
885  double sync_ipts)
886 {
887  int ret, format_video_sync;
888  AVPacket pkt;
889  AVCodecContext *enc = ost->enc_ctx;
890  AVCodecContext *mux_enc = ost->st->codec;
891  int nb_frames, nb0_frames, i;
892  double delta, delta0;
893  double duration = 0;
894  int frame_size = 0;
895  InputStream *ist = NULL;
897 
898  if (ost->source_index >= 0)
899  ist = input_streams[ost->source_index];
900 
901  if (filter->inputs[0]->frame_rate.num > 0 &&
902  filter->inputs[0]->frame_rate.den > 0)
903  duration = 1/(av_q2d(filter->inputs[0]->frame_rate) * av_q2d(enc->time_base));
904 
905  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
906  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
907 
908  if (!ost->filters_script &&
909  !ost->filters &&
910  next_picture &&
911  ist &&
912  lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
913  duration = lrintf(av_frame_get_pkt_duration(next_picture) * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
914  }
915 
916  delta0 = sync_ipts - ost->sync_opts;
917  delta = delta0 + duration;
918 
919  /* by default, we output a single frame */
920  nb0_frames = 0;
921  nb_frames = 1;
922 
923  format_video_sync = video_sync_method;
924  if (format_video_sync == VSYNC_AUTO) {
925  if(!strcmp(s->oformat->name, "avi")) {
926  format_video_sync = VSYNC_VFR;
927  } else
929  if ( ist
930  && format_video_sync == VSYNC_CFR
931  && input_files[ist->file_index]->ctx->nb_streams == 1
932  && input_files[ist->file_index]->input_ts_offset == 0) {
933  format_video_sync = VSYNC_VSCFR;
934  }
935  if (format_video_sync == VSYNC_CFR && copy_ts) {
936  format_video_sync = VSYNC_VSCFR;
937  }
938  }
939 
940  if (delta0 < 0 &&
941  delta > 0 &&
942  format_video_sync != VSYNC_PASSTHROUGH &&
943  format_video_sync != VSYNC_DROP) {
944  double cor = FFMIN(-delta0, duration);
945  if (delta0 < -0.6) {
946  av_log(NULL, AV_LOG_WARNING, "Past duration %f too large\n", -delta0);
947  } else
948  av_log(NULL, AV_LOG_DEBUG, "Cliping frame in rate conversion by %f\n", -delta0);
949  sync_ipts += cor;
950  duration -= cor;
951  delta0 += cor;
952  }
953 
954  switch (format_video_sync) {
955  case VSYNC_VSCFR:
956  if (ost->frame_number == 0 && delta - duration >= 0.5) {
957  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta - duration));
958  delta = duration;
959  delta0 = 0;
960  ost->sync_opts = lrint(sync_ipts);
961  }
962  case VSYNC_CFR:
963  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
964  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
965  nb_frames = 0;
966  } else if (delta < -1.1)
967  nb_frames = 0;
968  else if (delta > 1.1) {
969  nb_frames = lrintf(delta);
970  if (delta0 > 1.1)
971  nb0_frames = lrintf(delta0 - 0.6);
972  }
973  break;
974  case VSYNC_VFR:
975  if (delta <= -0.6)
976  nb_frames = 0;
977  else if (delta > 0.6)
978  ost->sync_opts = lrint(sync_ipts);
979  break;
980  case VSYNC_DROP:
981  case VSYNC_PASSTHROUGH:
982  ost->sync_opts = lrint(sync_ipts);
983  break;
984  default:
985  av_assert0(0);
986  }
987 
988  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
989  nb0_frames = FFMIN(nb0_frames, nb_frames);
990  if (nb0_frames == 0 && ost->last_droped) {
991  nb_frames_drop++;
993  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
994  ost->frame_number, ost->st->index, ost->last_frame->pts);
995  }
996  if (nb_frames > (nb0_frames && ost->last_droped) + (nb_frames > nb0_frames)) {
997  if (nb_frames > dts_error_threshold * 30) {
998  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
999  nb_frames_drop++;
1000  return;
1001  }
1002  nb_frames_dup += nb_frames - (nb0_frames && ost->last_droped) - (nb_frames > nb0_frames);
1003  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1004  }
1005  ost->last_droped = nb_frames == nb0_frames;
1006 
1007  /* duplicates frame if needed */
1008  for (i = 0; i < nb_frames; i++) {
1009  AVFrame *in_picture;
1010  av_init_packet(&pkt);
1011  pkt.data = NULL;
1012  pkt.size = 0;
1013 
1014  if (i < nb0_frames && ost->last_frame) {
1015  in_picture = ost->last_frame;
1016  } else
1017  in_picture = next_picture;
1018 
1019  in_picture->pts = ost->sync_opts;
1020 
1021 #if 1
1022  if (!check_recording_time(ost))
1023 #else
1024  if (ost->frame_number >= ost->max_frames)
1025 #endif
1026  return;
1027 
1028  if (s->oformat->flags & AVFMT_RAWPICTURE &&
1029  enc->codec->id == AV_CODEC_ID_RAWVIDEO) {
1030  /* raw pictures are written as AVPicture structure to
1031  avoid any copies. We support temporarily the older
1032  method. */
1033  if (in_picture->interlaced_frame)
1034  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1035  else
1036  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1037  pkt.data = (uint8_t *)in_picture;
1038  pkt.size = sizeof(AVPicture);
1039  pkt.pts = av_rescale_q(in_picture->pts, enc->time_base, ost->st->time_base);
1040  pkt.flags |= AV_PKT_FLAG_KEY;
1041 
1042  write_frame(s, &pkt, ost);
1043  } else {
1044  int got_packet, forced_keyframe = 0;
1045  double pts_time;
1046 
1048  ost->top_field_first >= 0)
1049  in_picture->top_field_first = !!ost->top_field_first;
1050 
1051  if (in_picture->interlaced_frame) {
1052  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1053  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1054  else
1055  mux_enc->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1056  } else
1057  mux_enc->field_order = AV_FIELD_PROGRESSIVE;
1058 
1059  in_picture->quality = enc->global_quality;
1060  in_picture->pict_type = 0;
1061 
1062  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1063  in_picture->pts * av_q2d(enc->time_base) : NAN;
1064  if (ost->forced_kf_index < ost->forced_kf_count &&
1065  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1066  ost->forced_kf_index++;
1067  forced_keyframe = 1;
1068  } else if (ost->forced_keyframes_pexpr) {
1069  double res;
1070  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1073  av_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1079  res);
1080  if (res) {
1081  forced_keyframe = 1;
1087  }
1088 
1090  }
1091 
1092  if (forced_keyframe) {
1093  in_picture->pict_type = AV_PICTURE_TYPE_I;
1094  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1095  }
1096 
1098  if (debug_ts) {
1099  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1100  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1101  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1102  enc->time_base.num, enc->time_base.den);
1103  }
1104 
1105  ost->frames_encoded++;
1106 
1107  ret = avcodec_encode_video2(enc, &pkt, in_picture, &got_packet);
1108  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1109  if (ret < 0) {
1110  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1111  exit_program(1);
1112  }
1113 
1114  if (got_packet) {
1115  if (debug_ts) {
1116  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1117  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1118  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1119  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1120  }
1121 
1122  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & CODEC_CAP_DELAY))
1123  pkt.pts = ost->sync_opts;
1124 
1125  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1126 
1127  if (debug_ts) {
1128  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1129  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1130  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->st->time_base),
1131  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->st->time_base));
1132  }
1133 
1134  frame_size = pkt.size;
1135  write_frame(s, &pkt, ost);
1136 
1137  /* if two pass, output log */
1138  if (ost->logfile && enc->stats_out) {
1139  fprintf(ost->logfile, "%s", enc->stats_out);
1140  }
1141  }
1142  }
1143  ost->sync_opts++;
1144  /*
1145  * For video, number of frames in == number of packets out.
1146  * But there may be reordering, so we can't throw away frames on encoder
1147  * flush, we need to limit them here, before they go into encoder.
1148  */
1149  ost->frame_number++;
1150 
1151  if (vstats_filename && frame_size)
1152  do_video_stats(ost, frame_size);
1153  }
1154 
1155  if (!ost->last_frame)
1156  ost->last_frame = av_frame_alloc();
1157  av_frame_unref(ost->last_frame);
1158  av_frame_ref(ost->last_frame, next_picture);
1159 }
1160 
1161 static double psnr(double d)
1162 {
1163  return -10.0 * log(d) / log(10.0);
1164 }
1165 
1167 {
1168  AVCodecContext *enc;
1169  int frame_number;
1170  double ti1, bitrate, avg_bitrate;
1171 
1172  /* this is executed just the first time do_video_stats is called */
1173  if (!vstats_file) {
1174  vstats_file = fopen(vstats_filename, "w");
1175  if (!vstats_file) {
1176  perror("fopen");
1177  exit_program(1);
1178  }
1179  }
1180 
1181  enc = ost->enc_ctx;
1182  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1183  frame_number = ost->st->nb_frames;
1184  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame ? enc->coded_frame->quality / (float)FF_QP2LAMBDA : 0);
1185  if (enc->coded_frame && (enc->flags&CODEC_FLAG_PSNR))
1186  fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1187 
1188  fprintf(vstats_file,"f_size= %6d ", frame_size);
1189  /* compute pts value */
1190  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1191  if (ti1 < 0.01)
1192  ti1 = 0.01;
1193 
1194  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1195  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1196  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1197  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1198  fprintf(vstats_file, "type= %c\n", enc->coded_frame ? av_get_picture_type_char(enc->coded_frame->pict_type) : 'I');
1199  }
1200 }
1201 
1203 {
1204  OutputFile *of = output_files[ost->file_index];
1205  int i;
1206 
1208 
1209  if (of->shortest) {
1210  for (i = 0; i < of->ctx->nb_streams; i++)
1211  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1212  }
1213 }
1214 
1215 /**
1216  * Get and encode new output from any of the filtergraphs, without causing
1217  * activity.
1218  *
1219  * @return 0 for success, <0 for severe errors
1220  */
1221 static int reap_filters(void)
1222 {
1223  AVFrame *filtered_frame = NULL;
1224  int i;
1225 
1226  /* Reap all buffers present in the buffer sinks */
1227  for (i = 0; i < nb_output_streams; i++) {
1228  OutputStream *ost = output_streams[i];
1229  OutputFile *of = output_files[ost->file_index];
1231  AVCodecContext *enc = ost->enc_ctx;
1232  int ret = 0;
1233 
1234  if (!ost->filter)
1235  continue;
1236  filter = ost->filter->filter;
1237 
1238  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1239  return AVERROR(ENOMEM);
1240  }
1241  filtered_frame = ost->filtered_frame;
1242 
1243  while (1) {
1244  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1245  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1247  if (ret < 0) {
1248  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1250  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1251  }
1252  break;
1253  }
1254  if (ost->finished) {
1255  av_frame_unref(filtered_frame);
1256  continue;
1257  }
1258  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1259  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1260  AVRational tb = enc->time_base;
1261  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1262 
1263  tb.den <<= extra_bits;
1264  float_pts =
1265  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, tb) -
1266  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1267  float_pts /= 1 << extra_bits;
1268  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1269  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1270 
1271  filtered_frame->pts =
1272  av_rescale_q(filtered_frame->pts, filter->inputs[0]->time_base, enc->time_base) -
1273  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1274  }
1275  //if (ost->source_index >= 0)
1276  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1277 
1278  switch (filter->inputs[0]->type) {
1279  case AVMEDIA_TYPE_VIDEO:
1280  if (!ost->frame_aspect_ratio.num)
1281  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1282 
1283  if (debug_ts) {
1284  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1285  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1286  float_pts,
1287  enc->time_base.num, enc->time_base.den);
1288  }
1289 
1290  do_video_out(of->ctx, ost, filtered_frame, float_pts);
1291  break;
1292  case AVMEDIA_TYPE_AUDIO:
1293  if (!(enc->codec->capabilities & CODEC_CAP_PARAM_CHANGE) &&
1294  enc->channels != av_frame_get_channels(filtered_frame)) {
1296  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1297  break;
1298  }
1299  do_audio_out(of->ctx, ost, filtered_frame);
1300  break;
1301  default:
1302  // TODO support subtitle filters
1303  av_assert0(0);
1304  }
1305 
1306  av_frame_unref(filtered_frame);
1307  }
1308  }
1309 
1310  return 0;
1311 }
1312 
1313 static void print_final_stats(int64_t total_size)
1314 {
1315  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1316  uint64_t subtitle_size = 0;
1317  uint64_t data_size = 0;
1318  float percent = -1.0;
1319  int i, j;
1320 
1321  for (i = 0; i < nb_output_streams; i++) {
1322  OutputStream *ost = output_streams[i];
1323  switch (ost->enc_ctx->codec_type) {
1324  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1325  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1326  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1327  default: other_size += ost->data_size; break;
1328  }
1329  extra_size += ost->enc_ctx->extradata_size;
1330  data_size += ost->data_size;
1331  }
1332 
1333  if (data_size && total_size>0 && total_size >= data_size)
1334  percent = 100.0 * (total_size - data_size) / data_size;
1335 
1336  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1337  video_size / 1024.0,
1338  audio_size / 1024.0,
1339  subtitle_size / 1024.0,
1340  other_size / 1024.0,
1341  extra_size / 1024.0);
1342  if (percent >= 0.0)
1343  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1344  else
1345  av_log(NULL, AV_LOG_INFO, "unknown");
1346  av_log(NULL, AV_LOG_INFO, "\n");
1347 
1348  /* print verbose per-stream stats */
1349  for (i = 0; i < nb_input_files; i++) {
1350  InputFile *f = input_files[i];
1351  uint64_t total_packets = 0, total_size = 0;
1352 
1353  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1354  i, f->ctx->filename);
1355 
1356  for (j = 0; j < f->nb_streams; j++) {
1357  InputStream *ist = input_streams[f->ist_index + j];
1358  enum AVMediaType type = ist->dec_ctx->codec_type;
1359 
1360  total_size += ist->data_size;
1361  total_packets += ist->nb_packets;
1362 
1363  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1364  i, j, media_type_string(type));
1365  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1366  ist->nb_packets, ist->data_size);
1367 
1368  if (ist->decoding_needed) {
1369  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1370  ist->frames_decoded);
1371  if (type == AVMEDIA_TYPE_AUDIO)
1372  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1373  av_log(NULL, AV_LOG_VERBOSE, "; ");
1374  }
1375 
1376  av_log(NULL, AV_LOG_VERBOSE, "\n");
1377  }
1378 
1379  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1380  total_packets, total_size);
1381  }
1382 
1383  for (i = 0; i < nb_output_files; i++) {
1384  OutputFile *of = output_files[i];
1385  uint64_t total_packets = 0, total_size = 0;
1386 
1387  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1388  i, of->ctx->filename);
1389 
1390  for (j = 0; j < of->ctx->nb_streams; j++) {
1391  OutputStream *ost = output_streams[of->ost_index + j];
1392  enum AVMediaType type = ost->enc_ctx->codec_type;
1393 
1394  total_size += ost->data_size;
1395  total_packets += ost->packets_written;
1396 
1397  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1398  i, j, media_type_string(type));
1399  if (ost->encoding_needed) {
1400  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1401  ost->frames_encoded);
1402  if (type == AVMEDIA_TYPE_AUDIO)
1403  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1404  av_log(NULL, AV_LOG_VERBOSE, "; ");
1405  }
1406 
1407  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1408  ost->packets_written, ost->data_size);
1409 
1410  av_log(NULL, AV_LOG_VERBOSE, "\n");
1411  }
1412 
1413  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1414  total_packets, total_size);
1415  }
1416  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1417  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded (check -ss / -t / -frames parameters if used)\n");
1418  }
1419 }
1420 
1421 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1422 {
1423  char buf[1024];
1424  AVBPrint buf_script;
1425  OutputStream *ost;
1426  AVFormatContext *oc;
1427  int64_t total_size;
1428  AVCodecContext *enc;
1429  int frame_number, vid, i;
1430  double bitrate;
1431  int64_t pts = INT64_MIN;
1432  static int64_t last_time = -1;
1433  static int qp_histogram[52];
1434  int hours, mins, secs, us;
1435 
1436  if (!print_stats && !is_last_report && !progress_avio)
1437  return;
1438 
1439  if (!is_last_report) {
1440  if (last_time == -1) {
1441  last_time = cur_time;
1442  return;
1443  }
1444  if ((cur_time - last_time) < 500000)
1445  return;
1446  last_time = cur_time;
1447  }
1448 
1449 
1450  oc = output_files[0]->ctx;
1451 
1452  total_size = avio_size(oc->pb);
1453  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1454  total_size = avio_tell(oc->pb);
1455 
1456  buf[0] = '\0';
1457  vid = 0;
1458  av_bprint_init(&buf_script, 0, 1);
1459  for (i = 0; i < nb_output_streams; i++) {
1460  float q = -1;
1461  ost = output_streams[i];
1462  enc = ost->enc_ctx;
1463  if (!ost->stream_copy && enc->coded_frame)
1464  q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
1465  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1466  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
1467  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1468  ost->file_index, ost->index, q);
1469  }
1470  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1471  float fps, t = (cur_time-timer_start) / 1000000.0;
1472 
1473  frame_number = ost->frame_number;
1474  fps = t > 1 ? frame_number / t : 0;
1475  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3.*f q=%3.1f ",
1476  frame_number, fps < 9.95, fps, q);
1477  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1478  av_bprintf(&buf_script, "fps=%.1f\n", fps);
1479  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1480  ost->file_index, ost->index, q);
1481  if (is_last_report)
1482  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
1483  if (qp_hist) {
1484  int j;
1485  int qp = lrintf(q);
1486  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1487  qp_histogram[qp]++;
1488  for (j = 0; j < 32; j++)
1489  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log2(qp_histogram[j] + 1)));
1490  }
1491  if ((enc->flags&CODEC_FLAG_PSNR) && (enc->coded_frame || is_last_report)) {
1492  int j;
1493  double error, error_sum = 0;
1494  double scale, scale_sum = 0;
1495  double p;
1496  char type[3] = { 'Y','U','V' };
1497  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
1498  for (j = 0; j < 3; j++) {
1499  if (is_last_report) {
1500  error = enc->error[j];
1501  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1502  } else {
1503  error = enc->coded_frame->error[j];
1504  scale = enc->width * enc->height * 255.0 * 255.0;
1505  }
1506  if (j)
1507  scale /= 4;
1508  error_sum += error;
1509  scale_sum += scale;
1510  p = psnr(error / scale);
1511  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], p);
1512  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1513  ost->file_index, ost->index, type[j] | 32, p);
1514  }
1515  p = psnr(error_sum / scale_sum);
1516  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
1517  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1518  ost->file_index, ost->index, p);
1519  }
1520  vid = 1;
1521  }
1522  /* compute min output value */
1524  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1525  ost->st->time_base, AV_TIME_BASE_Q));
1526  if (is_last_report)
1527  nb_frames_drop += ost->last_droped;
1528  }
1529 
1530  secs = FFABS(pts) / AV_TIME_BASE;
1531  us = FFABS(pts) % AV_TIME_BASE;
1532  mins = secs / 60;
1533  secs %= 60;
1534  hours = mins / 60;
1535  mins %= 60;
1536 
1537  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1538 
1539  if (total_size < 0) snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1540  "size=N/A time=");
1541  else snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1542  "size=%8.0fkB time=", total_size / 1024.0);
1543  if (pts < 0)
1544  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "-");
1545  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
1546  "%02d:%02d:%02d.%02d ", hours, mins, secs,
1547  (100 * us) / AV_TIME_BASE);
1548 
1549  if (bitrate < 0) {
1550  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=N/A");
1551  av_bprintf(&buf_script, "bitrate=N/A\n");
1552  }else{
1553  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),"bitrate=%6.1fkbits/s", bitrate);
1554  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1555  }
1556 
1557  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1558  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1559  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1560  av_bprintf(&buf_script, "out_time=%02d:%02d:%02d.%06d\n",
1561  hours, mins, secs, us);
1562 
1564  snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
1566  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1567  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1568 
1569  if (print_stats || is_last_report) {
1570  const char end = is_last_report ? '\n' : '\r';
1571  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1572  fprintf(stderr, "%s %c", buf, end);
1573  } else
1574  av_log(NULL, AV_LOG_INFO, "%s %c", buf, end);
1575 
1576  fflush(stderr);
1577  }
1578 
1579  if (progress_avio) {
1580  av_bprintf(&buf_script, "progress=%s\n",
1581  is_last_report ? "end" : "continue");
1582  avio_write(progress_avio, buf_script.str,
1583  FFMIN(buf_script.len, buf_script.size - 1));
1584  avio_flush(progress_avio);
1585  av_bprint_finalize(&buf_script, NULL);
1586  if (is_last_report) {
1587  avio_closep(&progress_avio);
1588  }
1589  }
1590 
1591  if (is_last_report)
1592  print_final_stats(total_size);
1593 }
1594 
1595 static void flush_encoders(void)
1596 {
1597  int i, ret;
1598 
1599  for (i = 0; i < nb_output_streams; i++) {
1600  OutputStream *ost = output_streams[i];
1601  AVCodecContext *enc = ost->enc_ctx;
1602  AVFormatContext *os = output_files[ost->file_index]->ctx;
1603  int stop_encoding = 0;
1604 
1605  if (!ost->encoding_needed)
1606  continue;
1607 
1608  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1609  continue;
1611  continue;
1612 
1613  for (;;) {
1614  int (*encode)(AVCodecContext*, AVPacket*, const AVFrame*, int*) = NULL;
1615  const char *desc;
1616 
1617  switch (enc->codec_type) {
1618  case AVMEDIA_TYPE_AUDIO:
1619  encode = avcodec_encode_audio2;
1620  desc = "Audio";
1621  break;
1622  case AVMEDIA_TYPE_VIDEO:
1623  encode = avcodec_encode_video2;
1624  desc = "Video";
1625  break;
1626  default:
1627  stop_encoding = 1;
1628  }
1629 
1630  if (encode) {
1631  AVPacket pkt;
1632  int pkt_size;
1633  int got_packet;
1634  av_init_packet(&pkt);
1635  pkt.data = NULL;
1636  pkt.size = 0;
1637 
1639  ret = encode(enc, &pkt, NULL, &got_packet);
1640  update_benchmark("flush %s %d.%d", desc, ost->file_index, ost->index);
1641  if (ret < 0) {
1642  av_log(NULL, AV_LOG_FATAL, "%s encoding failed\n", desc);
1643  exit_program(1);
1644  }
1645  if (ost->logfile && enc->stats_out) {
1646  fprintf(ost->logfile, "%s", enc->stats_out);
1647  }
1648  if (!got_packet) {
1649  stop_encoding = 1;
1650  break;
1651  }
1652  if (ost->finished & MUXER_FINISHED) {
1653  av_free_packet(&pkt);
1654  continue;
1655  }
1656  av_packet_rescale_ts(&pkt, enc->time_base, ost->st->time_base);
1657  pkt_size = pkt.size;
1658  write_frame(os, &pkt, ost);
1660  do_video_stats(ost, pkt_size);
1661  }
1662  }
1663 
1664  if (stop_encoding)
1665  break;
1666  }
1667  }
1668 }
1669 
1670 /*
1671  * Check whether a packet from ist should be written into ost at this time
1672  */
1674 {
1675  OutputFile *of = output_files[ost->file_index];
1676  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1677 
1678  if (ost->source_index != ist_index)
1679  return 0;
1680 
1681  if (ost->finished)
1682  return 0;
1683 
1684  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1685  return 0;
1686 
1687  return 1;
1688 }
1689 
1690 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1691 {
1692  OutputFile *of = output_files[ost->file_index];
1693  InputFile *f = input_files [ist->file_index];
1694  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1695  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->st->time_base);
1696  int64_t ist_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ist->st->time_base);
1697  AVPicture pict;
1698  AVPacket opkt;
1699 
1700  av_init_packet(&opkt);
1701 
1702  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
1704  return;
1705 
1706  if (pkt->pts == AV_NOPTS_VALUE) {
1707  if (!ost->frame_number && ist->pts < start_time &&
1708  !ost->copy_prior_start)
1709  return;
1710  } else {
1711  if (!ost->frame_number && pkt->pts < ist_tb_start_time &&
1712  !ost->copy_prior_start)
1713  return;
1714  }
1715 
1716  if (of->recording_time != INT64_MAX &&
1717  ist->pts >= of->recording_time + start_time) {
1718  close_output_stream(ost);
1719  return;
1720  }
1721 
1722  if (f->recording_time != INT64_MAX) {
1723  start_time = f->ctx->start_time;
1724  if (f->start_time != AV_NOPTS_VALUE)
1725  start_time += f->start_time;
1726  if (ist->pts >= f->recording_time + start_time) {
1727  close_output_stream(ost);
1728  return;
1729  }
1730  }
1731 
1732  /* force the input stream PTS */
1733  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
1734  ost->sync_opts++;
1735 
1736  if (pkt->pts != AV_NOPTS_VALUE)
1737  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
1738  else
1739  opkt.pts = AV_NOPTS_VALUE;
1740 
1741  if (pkt->dts == AV_NOPTS_VALUE)
1742  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->st->time_base);
1743  else
1744  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
1745  opkt.dts -= ost_tb_start_time;
1746 
1747  if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
1749  if(!duration)
1750  duration = ist->dec_ctx->frame_size;
1751  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
1752  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
1753  ost->st->time_base) - ost_tb_start_time;
1754  }
1755 
1756  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
1757  opkt.flags = pkt->flags;
1758 
1759  // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
1760  if ( ost->enc_ctx->codec_id != AV_CODEC_ID_H264
1763  && ost->enc_ctx->codec_id != AV_CODEC_ID_VC1
1764  ) {
1765  if (av_parser_change(ost->parser, ost->st->codec,
1766  &opkt.data, &opkt.size,
1767  pkt->data, pkt->size,
1768  pkt->flags & AV_PKT_FLAG_KEY)) {
1769  opkt.buf = av_buffer_create(opkt.data, opkt.size, av_buffer_default_free, NULL, 0);
1770  if (!opkt.buf)
1771  exit_program(1);
1772  }
1773  } else {
1774  opkt.data = pkt->data;
1775  opkt.size = pkt->size;
1776  }
1777  av_copy_packet_side_data(&opkt, pkt);
1778 
1779  if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (of->ctx->oformat->flags & AVFMT_RAWPICTURE)) {
1780  /* store AVPicture in AVPacket, as expected by the output format */
1781  avpicture_fill(&pict, opkt.data, ost->st->codec->pix_fmt, ost->st->codec->width, ost->st->codec->height);
1782  opkt.data = (uint8_t *)&pict;
1783  opkt.size = sizeof(AVPicture);
1784  opkt.flags |= AV_PKT_FLAG_KEY;
1785  }
1786 
1787  write_frame(of->ctx, &opkt, ost);
1788 }
1789 
1791 {
1792  AVCodecContext *dec = ist->dec_ctx;
1793 
1794  if (!dec->channel_layout) {
1795  char layout_name[256];
1796 
1797  if (dec->channels > ist->guess_layout_max)
1798  return 0;
1800  if (!dec->channel_layout)
1801  return 0;
1802  av_get_channel_layout_string(layout_name, sizeof(layout_name),
1803  dec->channels, dec->channel_layout);
1804  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1805  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1806  }
1807  return 1;
1808 }
1809 
1810 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
1811 {
1812  AVFrame *decoded_frame, *f;
1813  AVCodecContext *avctx = ist->dec_ctx;
1814  int i, ret, err = 0, resample_changed;
1815  AVRational decoded_frame_tb;
1816 
1817  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1818  return AVERROR(ENOMEM);
1819  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1820  return AVERROR(ENOMEM);
1821  decoded_frame = ist->decoded_frame;
1822 
1824  ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
1825  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
1826 
1827  if (ret >= 0 && avctx->sample_rate <= 0) {
1828  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
1829  ret = AVERROR_INVALIDDATA;
1830  }
1831 
1832  if (*got_output || ret<0 || pkt->size)
1833  decode_error_stat[ret<0] ++;
1834 
1835  if (!*got_output || ret < 0) {
1836  if (!pkt->size) {
1837  for (i = 0; i < ist->nb_filters; i++)
1838 #if 1
1839  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1840 #else
1842 #endif
1843  }
1844  return ret;
1845  }
1846 
1847  ist->samples_decoded += decoded_frame->nb_samples;
1848  ist->frames_decoded++;
1849 
1850 #if 1
1851  /* increment next_dts to use for the case where the input stream does not
1852  have timestamps or there are multiple frames in the packet */
1853  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1854  avctx->sample_rate;
1855  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
1856  avctx->sample_rate;
1857 #endif
1858 
1859  resample_changed = ist->resample_sample_fmt != decoded_frame->format ||
1860  ist->resample_channels != avctx->channels ||
1861  ist->resample_channel_layout != decoded_frame->channel_layout ||
1862  ist->resample_sample_rate != decoded_frame->sample_rate;
1863  if (resample_changed) {
1864  char layout1[64], layout2[64];
1865 
1866  if (!guess_input_channel_layout(ist)) {
1867  av_log(NULL, AV_LOG_FATAL, "Unable to find default channel "
1868  "layout for Input Stream #%d.%d\n", ist->file_index,
1869  ist->st->index);
1870  exit_program(1);
1871  }
1872  decoded_frame->channel_layout = avctx->channel_layout;
1873 
1874  av_get_channel_layout_string(layout1, sizeof(layout1), ist->resample_channels,
1876  av_get_channel_layout_string(layout2, sizeof(layout2), avctx->channels,
1877  decoded_frame->channel_layout);
1878 
1880  "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d chl:%s to rate:%d fmt:%s ch:%d chl:%s\n",
1881  ist->file_index, ist->st->index,
1883  ist->resample_channels, layout1,
1884  decoded_frame->sample_rate, av_get_sample_fmt_name(decoded_frame->format),
1885  avctx->channels, layout2);
1886 
1887  ist->resample_sample_fmt = decoded_frame->format;
1888  ist->resample_sample_rate = decoded_frame->sample_rate;
1889  ist->resample_channel_layout = decoded_frame->channel_layout;
1890  ist->resample_channels = avctx->channels;
1891 
1892  for (i = 0; i < nb_filtergraphs; i++)
1893  if (ist_in_filtergraph(filtergraphs[i], ist)) {
1894  FilterGraph *fg = filtergraphs[i];
1895  if (configure_filtergraph(fg) < 0) {
1896  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
1897  exit_program(1);
1898  }
1899  }
1900  }
1901 
1902  /* if the decoder provides a pts, use it instead of the last packet pts.
1903  the decoder could be delaying output by a packet or more. */
1904  if (decoded_frame->pts != AV_NOPTS_VALUE) {
1905  ist->dts = ist->next_dts = ist->pts = ist->next_pts = av_rescale_q(decoded_frame->pts, avctx->time_base, AV_TIME_BASE_Q);
1906  decoded_frame_tb = avctx->time_base;
1907  } else if (decoded_frame->pkt_pts != AV_NOPTS_VALUE) {
1908  decoded_frame->pts = decoded_frame->pkt_pts;
1909  decoded_frame_tb = ist->st->time_base;
1910  } else if (pkt->pts != AV_NOPTS_VALUE) {
1911  decoded_frame->pts = pkt->pts;
1912  decoded_frame_tb = ist->st->time_base;
1913  }else {
1914  decoded_frame->pts = ist->dts;
1915  decoded_frame_tb = AV_TIME_BASE_Q;
1916  }
1917  pkt->pts = AV_NOPTS_VALUE;
1918  if (decoded_frame->pts != AV_NOPTS_VALUE)
1919  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
1920  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
1921  (AVRational){1, avctx->sample_rate});
1922  for (i = 0; i < ist->nb_filters; i++) {
1923  if (i < ist->nb_filters - 1) {
1924  f = ist->filter_frame;
1925  err = av_frame_ref(f, decoded_frame);
1926  if (err < 0)
1927  break;
1928  } else
1929  f = decoded_frame;
1930  err = av_buffersrc_add_frame_flags(ist->filters[i]->filter, f,
1932  if (err == AVERROR_EOF)
1933  err = 0; /* ignore */
1934  if (err < 0)
1935  break;
1936  }
1937  decoded_frame->pts = AV_NOPTS_VALUE;
1938 
1940  av_frame_unref(decoded_frame);
1941  return err < 0 ? err : ret;
1942 }
1943 
1944 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
1945 {
1946  AVFrame *decoded_frame, *f;
1947  int i, ret = 0, err = 0, resample_changed;
1948  int64_t best_effort_timestamp;
1949  AVRational *frame_sample_aspect;
1950 
1951  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
1952  return AVERROR(ENOMEM);
1953  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
1954  return AVERROR(ENOMEM);
1955  decoded_frame = ist->decoded_frame;
1956  pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
1957 
1959  ret = avcodec_decode_video2(ist->dec_ctx,
1960  decoded_frame, got_output, pkt);
1961  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
1962 
1963  // The following line may be required in some cases where there is no parser
1964  // or the parser does not has_b_frames correctly
1965  if (ist->st->codec->has_b_frames < ist->dec_ctx->has_b_frames) {
1966  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
1967  ist->st->codec->has_b_frames = ist->dec_ctx->has_b_frames;
1968  } else
1970  ist->dec_ctx,
1971  "has_b_frames is larger in decoder than demuxer %d > %d ",
1972  ist->dec_ctx->has_b_frames,
1973  ist->st->codec->has_b_frames
1974  );
1975  }
1976 
1977  if (*got_output || ret<0 || pkt->size)
1978  decode_error_stat[ret<0] ++;
1979 
1980  if (*got_output && ret >= 0) {
1981  if (ist->dec_ctx->width != decoded_frame->width ||
1982  ist->dec_ctx->height != decoded_frame->height ||
1983  ist->dec_ctx->pix_fmt != decoded_frame->format) {
1984  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
1985  decoded_frame->width,
1986  decoded_frame->height,
1987  decoded_frame->format,
1988  ist->dec_ctx->width,
1989  ist->dec_ctx->height,
1990  ist->dec_ctx->pix_fmt);
1991  }
1992  }
1993 
1994  if (!*got_output || ret < 0) {
1995  if (!pkt->size) {
1996  for (i = 0; i < ist->nb_filters; i++)
1997 #if 1
1998  av_buffersrc_add_ref(ist->filters[i]->filter, NULL, 0);
1999 #else
2001 #endif
2002  }
2003  return ret;
2004  }
2005 
2006  if(ist->top_field_first>=0)
2007  decoded_frame->top_field_first = ist->top_field_first;
2008 
2009  ist->frames_decoded++;
2010 
2011  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2012  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2013  if (err < 0)
2014  goto fail;
2015  }
2016  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2017 
2018  best_effort_timestamp= av_frame_get_best_effort_timestamp(decoded_frame);
2019  if(best_effort_timestamp != AV_NOPTS_VALUE)
2020  ist->next_pts = ist->pts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2021 
2022  if (debug_ts) {
2023  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2024  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2025  ist->st->index, av_ts2str(decoded_frame->pts),
2026  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2027  best_effort_timestamp,
2028  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2029  decoded_frame->key_frame, decoded_frame->pict_type,
2030  ist->st->time_base.num, ist->st->time_base.den);
2031  }
2032 
2033  pkt->size = 0;
2034 
2035  if (ist->st->sample_aspect_ratio.num)
2036  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2037 
2038  resample_changed = ist->resample_width != decoded_frame->width ||
2039  ist->resample_height != decoded_frame->height ||
2040  ist->resample_pix_fmt != decoded_frame->format;
2041  if (resample_changed) {
2043  "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
2044  ist->file_index, ist->st->index,
2046  decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
2047 
2048  ist->resample_width = decoded_frame->width;
2049  ist->resample_height = decoded_frame->height;
2050  ist->resample_pix_fmt = decoded_frame->format;
2051 
2052  for (i = 0; i < nb_filtergraphs; i++) {
2053  if (ist_in_filtergraph(filtergraphs[i], ist) && ist->reinit_filters &&
2054  configure_filtergraph(filtergraphs[i]) < 0) {
2055  av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
2056  exit_program(1);
2057  }
2058  }
2059  }
2060 
2061  frame_sample_aspect= av_opt_ptr(avcodec_get_frame_class(), decoded_frame, "sample_aspect_ratio");
2062  for (i = 0; i < ist->nb_filters; i++) {
2063  if (!frame_sample_aspect->num)
2064  *frame_sample_aspect = ist->st->sample_aspect_ratio;
2065 
2066  if (i < ist->nb_filters - 1) {
2067  f = ist->filter_frame;
2068  err = av_frame_ref(f, decoded_frame);
2069  if (err < 0)
2070  break;
2071  } else
2072  f = decoded_frame;
2074  if (ret == AVERROR_EOF) {
2075  ret = 0; /* ignore */
2076  } else if (ret < 0) {
2078  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2079  exit_program(1);
2080  }
2081  }
2082 
2083 fail:
2085  av_frame_unref(decoded_frame);
2086  return err < 0 ? err : ret;
2087 }
2088 
2089 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
2090 {
2091  AVSubtitle subtitle;
2092  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2093  &subtitle, got_output, pkt);
2094 
2095  if (*got_output || ret<0 || pkt->size)
2096  decode_error_stat[ret<0] ++;
2097 
2098  if (ret < 0 || !*got_output) {
2099  if (!pkt->size)
2100  sub2video_flush(ist);
2101  return ret;
2102  }
2103 
2104  if (ist->fix_sub_duration) {
2105  int end = 1;
2106  if (ist->prev_sub.got_output) {
2107  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2108  1000, AV_TIME_BASE);
2109  if (end < ist->prev_sub.subtitle.end_display_time) {
2110  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2111  "Subtitle duration reduced from %d to %d%s\n",
2113  end <= 0 ? ", dropping it" : "");
2115  }
2116  }
2117  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2118  FFSWAP(int, ret, ist->prev_sub.ret);
2119  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2120  if (end <= 0)
2121  goto out;
2122  }
2123 
2124  if (!*got_output)
2125  return ret;
2126 
2127  sub2video_update(ist, &subtitle);
2128 
2129  if (!subtitle.num_rects)
2130  goto out;
2131 
2132  ist->frames_decoded++;
2133 
2134  for (i = 0; i < nb_output_streams; i++) {
2135  OutputStream *ost = output_streams[i];
2136 
2137  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2138  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2139  continue;
2140 
2141  do_subtitle_out(output_files[ost->file_index]->ctx, ost, ist, &subtitle);
2142  }
2143 
2144 out:
2145  avsubtitle_free(&subtitle);
2146  return ret;
2147 }
2148 
2149 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2151 {
2152  int ret = 0, i;
2153  int got_output = 0;
2154 
2155  AVPacket avpkt;
2156  if (!ist->saw_first_ts) {
2157  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2158  ist->pts = 0;
2159  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2160  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2161  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2162  }
2163  ist->saw_first_ts = 1;
2164  }
2165 
2166  if (ist->next_dts == AV_NOPTS_VALUE)
2167  ist->next_dts = ist->dts;
2168  if (ist->next_pts == AV_NOPTS_VALUE)
2169  ist->next_pts = ist->pts;
2170 
2171  if (!pkt) {
2172  /* EOF handling */
2173  av_init_packet(&avpkt);
2174  avpkt.data = NULL;
2175  avpkt.size = 0;
2176  goto handle_eof;
2177  } else {
2178  avpkt = *pkt;
2179  }
2180 
2181  if (pkt->dts != AV_NOPTS_VALUE) {
2182  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2183  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2184  ist->next_pts = ist->pts = ist->dts;
2185  }
2186 
2187  // while we have more to decode or while the decoder did output something on EOF
2188  while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
2189  int duration;
2190  handle_eof:
2191 
2192  ist->pts = ist->next_pts;
2193  ist->dts = ist->next_dts;
2194 
2195  if (avpkt.size && avpkt.size != pkt->size &&
2196  !(ist->dec->capabilities & CODEC_CAP_SUBFRAMES)) {
2198  "Multiple frames in a packet from stream %d\n", pkt->stream_index);
2199  ist->showed_multi_packet_warning = 1;
2200  }
2201 
2202  switch (ist->dec_ctx->codec_type) {
2203  case AVMEDIA_TYPE_AUDIO:
2204  ret = decode_audio (ist, &avpkt, &got_output);
2205  break;
2206  case AVMEDIA_TYPE_VIDEO:
2207  ret = decode_video (ist, &avpkt, &got_output);
2208  if (avpkt.duration) {
2209  duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
2210  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2212  duration = ((int64_t)AV_TIME_BASE *
2213  ist->dec_ctx->framerate.den * ticks) /
2215  } else
2216  duration = 0;
2217 
2218  if(ist->dts != AV_NOPTS_VALUE && duration) {
2219  ist->next_dts += duration;
2220  }else
2221  ist->next_dts = AV_NOPTS_VALUE;
2222 
2223  if (got_output)
2224  ist->next_pts += duration; //FIXME the duration is not correct in some cases
2225  break;
2226  case AVMEDIA_TYPE_SUBTITLE:
2227  ret = transcode_subtitles(ist, &avpkt, &got_output);
2228  break;
2229  default:
2230  return -1;
2231  }
2232 
2233  if (ret < 0)
2234  return ret;
2235 
2236  avpkt.dts=
2237  avpkt.pts= AV_NOPTS_VALUE;
2238 
2239  // touch data and size only if not EOF
2240  if (pkt) {
2242  ret = avpkt.size;
2243  avpkt.data += ret;
2244  avpkt.size -= ret;
2245  }
2246  if (!got_output) {
2247  continue;
2248  }
2249  if (got_output && !pkt)
2250  break;
2251  }
2252 
2253  /* handle stream copy */
2254  if (!ist->decoding_needed) {
2255  ist->dts = ist->next_dts;
2256  switch (ist->dec_ctx->codec_type) {
2257  case AVMEDIA_TYPE_AUDIO:
2258  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2259  ist->dec_ctx->sample_rate;
2260  break;
2261  case AVMEDIA_TYPE_VIDEO:
2262  if (ist->framerate.num) {
2263  // TODO: Remove work-around for c99-to-c89 issue 7
2264  AVRational time_base_q = AV_TIME_BASE_Q;
2265  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2266  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2267  } else if (pkt->duration) {
2268  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2269  } else if(ist->dec_ctx->framerate.num != 0) {
2270  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2271  ist->next_dts += ((int64_t)AV_TIME_BASE *
2272  ist->dec_ctx->framerate.den * ticks) /
2274  }
2275  break;
2276  }
2277  ist->pts = ist->dts;
2278  ist->next_pts = ist->next_dts;
2279  }
2280  for (i = 0; pkt && i < nb_output_streams; i++) {
2281  OutputStream *ost = output_streams[i];
2282 
2283  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2284  continue;
2285 
2286  do_streamcopy(ist, ost, pkt);
2287  }
2288 
2289  return got_output;
2290 }
2291 
2292 static void print_sdp(void)
2293 {
2294  char sdp[16384];
2295  int i;
2296  int j;
2297  AVIOContext *sdp_pb;
2298  AVFormatContext **avc = av_malloc_array(nb_output_files, sizeof(*avc));
2299 
2300  if (!avc)
2301  exit_program(1);
2302  for (i = 0, j = 0; i < nb_output_files; i++) {
2303  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2304  avc[j] = output_files[i]->ctx;
2305  j++;
2306  }
2307  }
2308 
2309  av_sdp_create(avc, j, sdp, sizeof(sdp));
2310 
2311  if (!sdp_filename) {
2312  printf("SDP:\n%s\n", sdp);
2313  fflush(stdout);
2314  } else {
2315  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2316  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2317  } else {
2318  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2319  avio_closep(&sdp_pb);
2321  }
2322  }
2323 
2324  av_freep(&avc);
2325 }
2326 
2328 {
2329  int i;
2330  for (i = 0; hwaccels[i].name; i++)
2331  if (hwaccels[i].pix_fmt == pix_fmt)
2332  return &hwaccels[i];
2333  return NULL;
2334 }
2335 
2336 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2337 {
2338  InputStream *ist = s->opaque;
2339  const enum AVPixelFormat *p;
2340  int ret;
2341 
2342  for (p = pix_fmts; *p != -1; p++) {
2343  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2344  const HWAccel *hwaccel;
2345 
2346  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2347  break;
2348 
2349  hwaccel = get_hwaccel(*p);
2350  if (!hwaccel ||
2351  (ist->active_hwaccel_id && ist->active_hwaccel_id != hwaccel->id) ||
2352  (ist->hwaccel_id != HWACCEL_AUTO && ist->hwaccel_id != hwaccel->id))
2353  continue;
2354 
2355  ret = hwaccel->init(s);
2356  if (ret < 0) {
2357  if (ist->hwaccel_id == hwaccel->id) {
2359  "%s hwaccel requested for input stream #%d:%d, "
2360  "but cannot be initialized.\n", hwaccel->name,
2361  ist->file_index, ist->st->index);
2362  exit_program(1);
2363  }
2364  continue;
2365  }
2366  ist->active_hwaccel_id = hwaccel->id;
2367  ist->hwaccel_pix_fmt = *p;
2368  break;
2369  }
2370 
2371  return *p;
2372 }
2373 
2375 {
2376  InputStream *ist = s->opaque;
2377 
2378  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2379  return ist->hwaccel_get_buffer(s, frame, flags);
2380 
2381  return avcodec_default_get_buffer2(s, frame, flags);
2382 }
2383 
2384 static int init_input_stream(int ist_index, char *error, int error_len)
2385 {
2386  int ret;
2387  InputStream *ist = input_streams[ist_index];
2388 
2389  if (ist->decoding_needed) {
2390  AVCodec *codec = ist->dec;
2391  if (!codec) {
2392  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2393  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2394  return AVERROR(EINVAL);
2395  }
2396 
2397  ist->dec_ctx->opaque = ist;
2398  ist->dec_ctx->get_format = get_format;
2399  ist->dec_ctx->get_buffer2 = get_buffer;
2400  ist->dec_ctx->thread_safe_callbacks = 1;
2401 
2402  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2403  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2404  (ist->decoding_needed & DECODING_FOR_OST)) {
2405  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2407  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2408  }
2409 
2410  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2411  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2412  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2413  if (ret == AVERROR_EXPERIMENTAL)
2414  abort_codec_experimental(codec, 0);
2415 
2416  snprintf(error, error_len,
2417  "Error while opening decoder for input stream "
2418  "#%d:%d : %s",
2419  ist->file_index, ist->st->index, av_err2str(ret));
2420  return ret;
2421  }
2423  }
2424 
2425  ist->next_pts = AV_NOPTS_VALUE;
2426  ist->next_dts = AV_NOPTS_VALUE;
2427 
2428  return 0;
2429 }
2430 
2432 {
2433  if (ost->source_index >= 0)
2434  return input_streams[ost->source_index];
2435  return NULL;
2436 }
2437 
2438 static int compare_int64(const void *a, const void *b)
2439 {
2440  int64_t va = *(int64_t *)a, vb = *(int64_t *)b;
2441  return va < vb ? -1 : va > vb ? +1 : 0;
2442 }
2443 
2444 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2445  AVCodecContext *avctx)
2446 {
2447  char *p;
2448  int n = 1, i, size, index = 0;
2449  int64_t t, *pts;
2450 
2451  for (p = kf; *p; p++)
2452  if (*p == ',')
2453  n++;
2454  size = n;
2455  pts = av_malloc_array(size, sizeof(*pts));
2456  if (!pts) {
2457  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2458  exit_program(1);
2459  }
2460 
2461  p = kf;
2462  for (i = 0; i < n; i++) {
2463  char *next = strchr(p, ',');
2464 
2465  if (next)
2466  *next++ = 0;
2467 
2468  if (!memcmp(p, "chapters", 8)) {
2469 
2470  AVFormatContext *avf = output_files[ost->file_index]->ctx;
2471  int j;
2472 
2473  if (avf->nb_chapters > INT_MAX - size ||
2474  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2475  sizeof(*pts)))) {
2477  "Could not allocate forced key frames array.\n");
2478  exit_program(1);
2479  }
2480  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2481  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2482 
2483  for (j = 0; j < avf->nb_chapters; j++) {
2484  AVChapter *c = avf->chapters[j];
2485  av_assert1(index < size);
2486  pts[index++] = av_rescale_q(c->start, c->time_base,
2487  avctx->time_base) + t;
2488  }
2489 
2490  } else {
2491 
2492  t = parse_time_or_die("force_key_frames", p, 1);
2493  av_assert1(index < size);
2494  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2495 
2496  }
2497 
2498  p = next;
2499  }
2500 
2501  av_assert0(index == size);
2502  qsort(pts, size, sizeof(*pts), compare_int64);
2503  ost->forced_kf_count = size;
2504  ost->forced_kf_pts = pts;
2505 }
2506 
2507 static void report_new_stream(int input_index, AVPacket *pkt)
2508 {
2509  InputFile *file = input_files[input_index];
2510  AVStream *st = file->ctx->streams[pkt->stream_index];
2511 
2512  if (pkt->stream_index < file->nb_streams_warn)
2513  return;
2514  av_log(file->ctx, AV_LOG_WARNING,
2515  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
2517  input_index, pkt->stream_index,
2518  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
2519  file->nb_streams_warn = pkt->stream_index + 1;
2520 }
2521 
2523 {
2524  AVDictionaryEntry *e;
2525 
2526  uint8_t *encoder_string;
2527  int encoder_string_len;
2528  int format_flags = 0;
2529  int codec_flags = 0;
2530 
2531  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2532  return;
2533 
2534  e = av_dict_get(of->opts, "fflags", NULL, 0);
2535  if (e) {
2536  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2537  if (!o)
2538  return;
2539  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2540  }
2541  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2542  if (e) {
2543  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2544  if (!o)
2545  return;
2546  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2547  }
2548 
2549  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2550  encoder_string = av_mallocz(encoder_string_len);
2551  if (!encoder_string)
2552  exit_program(1);
2553 
2554  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & CODEC_FLAG_BITEXACT))
2555  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2556  else
2557  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2558  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2559  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2561 }
2562 
2563 static int transcode_init(void)
2564 {
2565  int ret = 0, i, j, k;
2566  AVFormatContext *oc;
2567  OutputStream *ost;
2568  InputStream *ist;
2569  char error[1024] = {0};
2570  int want_sdp = 1;
2571 
2572  for (i = 0; i < nb_filtergraphs; i++) {
2573  FilterGraph *fg = filtergraphs[i];
2574  for (j = 0; j < fg->nb_outputs; j++) {
2575  OutputFilter *ofilter = fg->outputs[j];
2576  if (!ofilter->ost || ofilter->ost->source_index >= 0)
2577  continue;
2578  if (fg->nb_inputs != 1)
2579  continue;
2580  for (k = nb_input_streams-1; k >= 0 ; k--)
2581  if (fg->inputs[0]->ist == input_streams[k])
2582  break;
2583  ofilter->ost->source_index = k;
2584  }
2585  }
2586 
2587  /* init framerate emulation */
2588  for (i = 0; i < nb_input_files; i++) {
2589  InputFile *ifile = input_files[i];
2590  if (ifile->rate_emu)
2591  for (j = 0; j < ifile->nb_streams; j++)
2592  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
2593  }
2594 
2595  /* output stream init */
2596  for (i = 0; i < nb_output_files; i++) {
2597  oc = output_files[i]->ctx;
2598  if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
2599  av_dump_format(oc, i, oc->filename, 1);
2600  av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
2601  return AVERROR(EINVAL);
2602  }
2603  }
2604 
2605  /* init complex filtergraphs */
2606  for (i = 0; i < nb_filtergraphs; i++)
2607  if ((ret = avfilter_graph_config(filtergraphs[i]->graph, NULL)) < 0)
2608  return ret;
2609 
2610  /* for each output stream, we compute the right encoding parameters */
2611  for (i = 0; i < nb_output_streams; i++) {
2612  AVCodecContext *enc_ctx;
2614  ost = output_streams[i];
2615  oc = output_files[ost->file_index]->ctx;
2616  ist = get_input_stream(ost);
2617 
2618  if (ost->attachment_filename)
2619  continue;
2620 
2621  enc_ctx = ost->enc_ctx;
2622 
2623  if (ist) {
2624  dec_ctx = ist->dec_ctx;
2625 
2626  ost->st->disposition = ist->st->disposition;
2627  enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
2628  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
2629  } else {
2630  for (j=0; j<oc->nb_streams; j++) {
2631  AVStream *st = oc->streams[j];
2632  if (st != ost->st && st->codec->codec_type == enc_ctx->codec_type)
2633  break;
2634  }
2635  if (j == oc->nb_streams)
2636  if (enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO || enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2638  }
2639 
2640  if (ost->stream_copy) {
2641  AVRational sar;
2642  uint64_t extra_size;
2643 
2644  av_assert0(ist && !ost->filter);
2645 
2646  extra_size = (uint64_t)dec_ctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
2647 
2648  if (extra_size > INT_MAX) {
2649  return AVERROR(EINVAL);
2650  }
2651 
2652  /* if stream_copy is selected, no need to decode or encode */
2653  enc_ctx->codec_id = dec_ctx->codec_id;
2654  enc_ctx->codec_type = dec_ctx->codec_type;
2655 
2656  if (!enc_ctx->codec_tag) {
2657  unsigned int codec_tag;
2658  if (!oc->oformat->codec_tag ||
2659  av_codec_get_id (oc->oformat->codec_tag, dec_ctx->codec_tag) == enc_ctx->codec_id ||
2660  !av_codec_get_tag2(oc->oformat->codec_tag, dec_ctx->codec_id, &codec_tag))
2661  enc_ctx->codec_tag = dec_ctx->codec_tag;
2662  }
2663 
2664  enc_ctx->bit_rate = dec_ctx->bit_rate;
2665  enc_ctx->rc_max_rate = dec_ctx->rc_max_rate;
2666  enc_ctx->rc_buffer_size = dec_ctx->rc_buffer_size;
2667  enc_ctx->field_order = dec_ctx->field_order;
2668  if (dec_ctx->extradata_size) {
2669  enc_ctx->extradata = av_mallocz(extra_size);
2670  if (!enc_ctx->extradata) {
2671  return AVERROR(ENOMEM);
2672  }
2673  memcpy(enc_ctx->extradata, dec_ctx->extradata, dec_ctx->extradata_size);
2674  }
2675  enc_ctx->extradata_size= dec_ctx->extradata_size;
2676  enc_ctx->bits_per_coded_sample = dec_ctx->bits_per_coded_sample;
2677 
2678  enc_ctx->time_base = ist->st->time_base;
2679  /*
2680  * Avi is a special case here because it supports variable fps but
2681  * having the fps and timebase differe significantly adds quite some
2682  * overhead
2683  */
2684  if(!strcmp(oc->oformat->name, "avi")) {
2685  if ( copy_tb<0 && av_q2d(ist->st->r_frame_rate) >= av_q2d(ist->st->avg_frame_rate)
2686  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(ist->st->time_base)
2687  && 0.5/av_q2d(ist->st->r_frame_rate) > av_q2d(dec_ctx->time_base)
2688  && av_q2d(ist->st->time_base) < 1.0/500 && av_q2d(dec_ctx->time_base) < 1.0/500
2689  || copy_tb==2){
2690  enc_ctx->time_base.num = ist->st->r_frame_rate.den;
2691  enc_ctx->time_base.den = 2*ist->st->r_frame_rate.num;
2692  enc_ctx->ticks_per_frame = 2;
2693  } else if ( copy_tb<0 && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > 2*av_q2d(ist->st->time_base)
2694  && av_q2d(ist->st->time_base) < 1.0/500
2695  || copy_tb==0){
2696  enc_ctx->time_base = dec_ctx->time_base;
2697  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2698  enc_ctx->time_base.den *= 2;
2699  enc_ctx->ticks_per_frame = 2;
2700  }
2701  } else if(!(oc->oformat->flags & AVFMT_VARIABLE_FPS)
2702  && strcmp(oc->oformat->name, "mov") && strcmp(oc->oformat->name, "mp4") && strcmp(oc->oformat->name, "3gp")
2703  && strcmp(oc->oformat->name, "3g2") && strcmp(oc->oformat->name, "psp") && strcmp(oc->oformat->name, "ipod")
2704  && strcmp(oc->oformat->name, "f4v")
2705  ) {
2706  if( copy_tb<0 && dec_ctx->time_base.den
2707  && av_q2d(dec_ctx->time_base)*dec_ctx->ticks_per_frame > av_q2d(ist->st->time_base)
2708  && av_q2d(ist->st->time_base) < 1.0/500
2709  || copy_tb==0){
2710  enc_ctx->time_base = dec_ctx->time_base;
2711  enc_ctx->time_base.num *= dec_ctx->ticks_per_frame;
2712  }
2713  }
2714  if ( enc_ctx->codec_tag == AV_RL32("tmcd")
2715  && dec_ctx->time_base.num < dec_ctx->time_base.den
2716  && dec_ctx->time_base.num > 0
2717  && 121LL*dec_ctx->time_base.num > dec_ctx->time_base.den) {
2718  enc_ctx->time_base = dec_ctx->time_base;
2719  }
2720 
2721  if (ist && !ost->frame_rate.num)
2722  ost->frame_rate = ist->framerate;
2723  if(ost->frame_rate.num)
2724  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2725 
2726  av_reduce(&enc_ctx->time_base.num, &enc_ctx->time_base.den,
2727  enc_ctx->time_base.num, enc_ctx->time_base.den, INT_MAX);
2728 
2729  if (ist->st->nb_side_data) {
2731  sizeof(*ist->st->side_data));
2732  if (!ost->st->side_data)
2733  return AVERROR(ENOMEM);
2734 
2735  for (j = 0; j < ist->st->nb_side_data; j++) {
2736  const AVPacketSideData *sd_src = &ist->st->side_data[j];
2737  AVPacketSideData *sd_dst = &ost->st->side_data[j];
2738 
2739  sd_dst->data = av_malloc(sd_src->size);
2740  if (!sd_dst->data)
2741  return AVERROR(ENOMEM);
2742  memcpy(sd_dst->data, sd_src->data, sd_src->size);
2743  sd_dst->size = sd_src->size;
2744  sd_dst->type = sd_src->type;
2745  ost->st->nb_side_data++;
2746  }
2747  }
2748 
2749  ost->parser = av_parser_init(enc_ctx->codec_id);
2750 
2751  switch (enc_ctx->codec_type) {
2752  case AVMEDIA_TYPE_AUDIO:
2753  if (audio_volume != 256) {
2754  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2755  exit_program(1);
2756  }
2757  enc_ctx->channel_layout = dec_ctx->channel_layout;
2758  enc_ctx->sample_rate = dec_ctx->sample_rate;
2759  enc_ctx->channels = dec_ctx->channels;
2760  enc_ctx->frame_size = dec_ctx->frame_size;
2761  enc_ctx->audio_service_type = dec_ctx->audio_service_type;
2762  enc_ctx->block_align = dec_ctx->block_align;
2763  enc_ctx->initial_padding = dec_ctx->delay;
2764 #if FF_API_AUDIOENC_DELAY
2765  enc_ctx->delay = dec_ctx->delay;
2766 #endif
2767  if((enc_ctx->block_align == 1 || enc_ctx->block_align == 1152 || enc_ctx->block_align == 576) && enc_ctx->codec_id == AV_CODEC_ID_MP3)
2768  enc_ctx->block_align= 0;
2769  if(enc_ctx->codec_id == AV_CODEC_ID_AC3)
2770  enc_ctx->block_align= 0;
2771  break;
2772  case AVMEDIA_TYPE_VIDEO:
2773  enc_ctx->pix_fmt = dec_ctx->pix_fmt;
2774  enc_ctx->width = dec_ctx->width;
2775  enc_ctx->height = dec_ctx->height;
2776  enc_ctx->has_b_frames = dec_ctx->has_b_frames;
2777  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2778  sar =
2780  (AVRational){ enc_ctx->height, enc_ctx->width });
2781  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2782  "with stream copy may produce invalid files\n");
2783  }
2784  else if (ist->st->sample_aspect_ratio.num)
2785  sar = ist->st->sample_aspect_ratio;
2786  else
2787  sar = dec_ctx->sample_aspect_ratio;
2788  ost->st->sample_aspect_ratio = enc_ctx->sample_aspect_ratio = sar;
2789  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2790  ost->st->r_frame_rate = ist->st->r_frame_rate;
2791  break;
2792  case AVMEDIA_TYPE_SUBTITLE:
2793  enc_ctx->width = dec_ctx->width;
2794  enc_ctx->height = dec_ctx->height;
2795  break;
2796  case AVMEDIA_TYPE_DATA:
2798  break;
2799  default:
2800  abort();
2801  }
2802  } else {
2803  if (!ost->enc)
2804  ost->enc = avcodec_find_encoder(enc_ctx->codec_id);
2805  if (!ost->enc) {
2806  /* should only happen when a default codec is not present. */
2807  snprintf(error, sizeof(error), "Encoder (codec %s) not found for output stream #%d:%d",
2808  avcodec_get_name(ost->st->codec->codec_id), ost->file_index, ost->index);
2809  ret = AVERROR(EINVAL);
2810  goto dump_format;
2811  }
2812 
2813  if (ist)
2815  ost->encoding_needed = 1;
2816 
2817  set_encoder_id(output_files[ost->file_index], ost);
2818 
2819  if (!ost->filter &&
2820  (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
2821  enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO)) {
2822  FilterGraph *fg;
2823  fg = init_simple_filtergraph(ist, ost);
2824  if (configure_filtergraph(fg)) {
2825  av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
2826  exit_program(1);
2827  }
2828  }
2829 
2830  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2831  if (ost->filter && !ost->frame_rate.num)
2833  if (ist && !ost->frame_rate.num)
2834  ost->frame_rate = ist->framerate;
2835  if (ist && !ost->frame_rate.num)
2836  ost->frame_rate = ist->st->r_frame_rate;
2837  if (ist && !ost->frame_rate.num) {
2838  ost->frame_rate = (AVRational){25, 1};
2840  "No information "
2841  "about the input framerate is available. Falling "
2842  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2843  "if you want a different framerate.\n",
2844  ost->file_index, ost->index);
2845  }
2846 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
2847  if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
2849  ost->frame_rate = ost->enc->supported_framerates[idx];
2850  }
2851  // reduce frame rate for mpeg4 to be within the spec limits
2852  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2853  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2854  ost->frame_rate.num, ost->frame_rate.den, 65535);
2855  }
2856  }
2857 
2858  switch (enc_ctx->codec_type) {
2859  case AVMEDIA_TYPE_AUDIO:
2860  enc_ctx->sample_fmt = ost->filter->filter->inputs[0]->format;
2861  enc_ctx->sample_rate = ost->filter->filter->inputs[0]->sample_rate;
2862  enc_ctx->channel_layout = ost->filter->filter->inputs[0]->channel_layout;
2863  enc_ctx->channels = avfilter_link_get_channels(ost->filter->filter->inputs[0]);
2864  enc_ctx->time_base = (AVRational){ 1, enc_ctx->sample_rate };
2865  break;
2866  case AVMEDIA_TYPE_VIDEO:
2867  enc_ctx->time_base = av_inv_q(ost->frame_rate);
2868  if (ost->filter && !(enc_ctx->time_base.num && enc_ctx->time_base.den))
2869  enc_ctx->time_base = ost->filter->filter->inputs[0]->time_base;
2870  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
2872  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
2873  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
2874  }
2875  for (j = 0; j < ost->forced_kf_count; j++)
2876  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
2878  enc_ctx->time_base);
2879 
2880  enc_ctx->width = ost->filter->filter->inputs[0]->w;
2881  enc_ctx->height = ost->filter->filter->inputs[0]->h;
2882  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
2883  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
2884  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
2886  if (!strncmp(ost->enc->name, "libx264", 7) &&
2887  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2890  "No pixel format specified, %s for H.264 encoding chosen.\n"
2891  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2893  if (!strncmp(ost->enc->name, "mpeg2video", 10) &&
2894  enc_ctx->pix_fmt == AV_PIX_FMT_NONE &&
2897  "No pixel format specified, %s for MPEG-2 encoding chosen.\n"
2898  "Use -pix_fmt yuv420p for compatibility with outdated media players.\n",
2900  enc_ctx->pix_fmt = ost->filter->filter->inputs[0]->format;
2901 
2902  ost->st->avg_frame_rate = ost->frame_rate;
2903 
2904  if (!dec_ctx ||
2905  enc_ctx->width != dec_ctx->width ||
2906  enc_ctx->height != dec_ctx->height ||
2907  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
2909  }
2910 
2911  if (ost->forced_keyframes) {
2912  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
2915  if (ret < 0) {
2917  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
2918  return ret;
2919  }
2924  } else {
2926  }
2927  }
2928  break;
2929  case AVMEDIA_TYPE_SUBTITLE:
2930  enc_ctx->time_base = (AVRational){1, 1000};
2931  if (!enc_ctx->width) {
2932  enc_ctx->width = input_streams[ost->source_index]->st->codec->width;
2933  enc_ctx->height = input_streams[ost->source_index]->st->codec->height;
2934  }
2935  break;
2936  case AVMEDIA_TYPE_DATA:
2937  break;
2938  default:
2939  abort();
2940  break;
2941  }
2942  /* two pass mode */
2943  if (enc_ctx->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2)) {
2944  char logfilename[1024];
2945  FILE *f;
2946 
2947  snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
2948  ost->logfile_prefix ? ost->logfile_prefix :
2950  i);
2951  if (!strcmp(ost->enc->name, "libx264")) {
2952  av_dict_set(&ost->encoder_opts, "stats", logfilename, AV_DICT_DONT_OVERWRITE);
2953  } else {
2954  if (enc_ctx->flags & CODEC_FLAG_PASS2) {
2955  char *logbuffer;
2956  size_t logbuffer_size;
2957  if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
2958  av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
2959  logfilename);
2960  exit_program(1);
2961  }
2962  enc_ctx->stats_in = logbuffer;
2963  }
2964  if (enc_ctx->flags & CODEC_FLAG_PASS1) {
2965  f = av_fopen_utf8(logfilename, "wb");
2966  if (!f) {
2967  av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
2968  logfilename, strerror(errno));
2969  exit_program(1);
2970  }
2971  ost->logfile = f;
2972  }
2973  }
2974  }
2975  }
2976 
2977  if (ost->disposition) {
2978  static const AVOption opts[] = {
2979  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
2980  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
2981  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
2982  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
2983  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
2984  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
2985  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
2986  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
2987  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
2988  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
2989  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
2990  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
2991  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
2992  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
2993  { NULL },
2994  };
2995  static const AVClass class = {
2996  .class_name = "",
2997  .item_name = av_default_item_name,
2998  .option = opts,
2999  .version = LIBAVUTIL_VERSION_INT,
3000  };
3001  const AVClass *pclass = &class;
3002 
3003  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3004  if (ret < 0)
3005  goto dump_format;
3006  }
3007  }
3008 
3009  /* open each encoder */
3010  for (i = 0; i < nb_output_streams; i++) {
3011  ost = output_streams[i];
3012  if (ost->encoding_needed) {
3013  AVCodec *codec = ost->enc;
3014  AVCodecContext *dec = NULL;
3015 
3016  if ((ist = get_input_stream(ost)))
3017  dec = ist->dec_ctx;
3018  if (dec && dec->subtitle_header) {
3019  /* ASS code assumes this buffer is null terminated so add extra byte. */
3021  if (!ost->enc_ctx->subtitle_header) {
3022  ret = AVERROR(ENOMEM);
3023  goto dump_format;
3024  }
3025  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3027  }
3028  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3029  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3030  av_dict_set(&ost->encoder_opts, "side_data_only_packets", "1", 0);
3031 
3032  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3033  if (ret == AVERROR_EXPERIMENTAL)
3034  abort_codec_experimental(codec, 1);
3035  snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
3036  ost->file_index, ost->index);
3037  goto dump_format;
3038  }
3039  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3042  ost->enc_ctx->frame_size);
3044  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000)
3045  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3046  " It takes bits/s as argument, not kbits/s\n");
3047  } else {
3048  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3049  if (ret < 0) {
3051  "Error setting up codec context options.\n");
3052  return ret;
3053  }
3054  }
3055 
3056  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3057  if (ret < 0) {
3059  "Error initializing the output stream codec context.\n");
3060  exit_program(1);
3061  }
3062  ost->st->codec->codec= ost->enc_ctx->codec;
3063 
3064  // copy timebase while removing common factors
3065  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3066  }
3067 
3068  /* init input streams */
3069  for (i = 0; i < nb_input_streams; i++)
3070  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3071  for (i = 0; i < nb_output_streams; i++) {
3072  ost = output_streams[i];
3073  avcodec_close(ost->enc_ctx);
3074  }
3075  goto dump_format;
3076  }
3077 
3078  /* discard unused programs */
3079  for (i = 0; i < nb_input_files; i++) {
3080  InputFile *ifile = input_files[i];
3081  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3082  AVProgram *p = ifile->ctx->programs[j];
3083  int discard = AVDISCARD_ALL;
3084 
3085  for (k = 0; k < p->nb_stream_indexes; k++)
3086  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3087  discard = AVDISCARD_DEFAULT;
3088  break;
3089  }
3090  p->discard = discard;
3091  }
3092  }
3093 
3094  /* open files and write file headers */
3095  for (i = 0; i < nb_output_files; i++) {
3096  oc = output_files[i]->ctx;
3097  oc->interrupt_callback = int_cb;
3098  if ((ret = avformat_write_header(oc, &output_files[i]->opts)) < 0) {
3099  snprintf(error, sizeof(error),
3100  "Could not write header for output file #%d "
3101  "(incorrect codec parameters ?): %s",
3102  i, av_err2str(ret));
3103  ret = AVERROR(EINVAL);
3104  goto dump_format;
3105  }
3106 // assert_avoptions(output_files[i]->opts);
3107  if (strcmp(oc->oformat->name, "rtp")) {
3108  want_sdp = 0;
3109  }
3110  }
3111 
3112  dump_format:
3113  /* dump the file output parameters - cannot be done before in case
3114  of stream copy */
3115  for (i = 0; i < nb_output_files; i++) {
3116  av_dump_format(output_files[i]->ctx, i, output_files[i]->ctx->filename, 1);
3117  }
3118 
3119  /* dump the stream mapping */
3120  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3121  for (i = 0; i < nb_input_streams; i++) {
3122  ist = input_streams[i];
3123 
3124  for (j = 0; j < ist->nb_filters; j++) {
3125  if (ist->filters[j]->graph->graph_desc) {
3126  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3127  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3128  ist->filters[j]->name);
3129  if (nb_filtergraphs > 1)
3130  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3131  av_log(NULL, AV_LOG_INFO, "\n");
3132  }
3133  }
3134  }
3135 
3136  for (i = 0; i < nb_output_streams; i++) {
3137  ost = output_streams[i];
3138 
3139  if (ost->attachment_filename) {
3140  /* an attached file */
3141  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3142  ost->attachment_filename, ost->file_index, ost->index);
3143  continue;
3144  }
3145 
3146  if (ost->filter && ost->filter->graph->graph_desc) {
3147  /* output from a complex graph */
3148  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3149  if (nb_filtergraphs > 1)
3150  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3151 
3152  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3153  ost->index, ost->enc ? ost->enc->name : "?");
3154  continue;
3155  }
3156 
3157  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3158  input_streams[ost->source_index]->file_index,
3159  input_streams[ost->source_index]->st->index,
3160  ost->file_index,
3161  ost->index);
3162  if (ost->sync_ist != input_streams[ost->source_index])
3163  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3164  ost->sync_ist->file_index,
3165  ost->sync_ist->st->index);
3166  if (ost->stream_copy)
3167  av_log(NULL, AV_LOG_INFO, " (copy)");
3168  else {
3169  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3170  const AVCodec *out_codec = ost->enc;
3171  const char *decoder_name = "?";
3172  const char *in_codec_name = "?";
3173  const char *encoder_name = "?";
3174  const char *out_codec_name = "?";
3175 
3176  if (in_codec) {
3177  decoder_name = in_codec->name;
3178  in_codec_name = avcodec_descriptor_get(in_codec->id)->name;
3179  if (!strcmp(decoder_name, in_codec_name))
3180  decoder_name = "native";
3181  }
3182 
3183  if (out_codec) {
3184  encoder_name = out_codec->name;
3185  out_codec_name = avcodec_descriptor_get(out_codec->id)->name;
3186  if (!strcmp(encoder_name, out_codec_name))
3187  encoder_name = "native";
3188  }
3189 
3190  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3191  in_codec_name, decoder_name,
3192  out_codec_name, encoder_name);
3193  }
3194  av_log(NULL, AV_LOG_INFO, "\n");
3195  }
3196 
3197  if (ret) {
3198  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3199  return ret;
3200  }
3201 
3202  if (sdp_filename || want_sdp) {
3203  print_sdp();
3204  }
3205 
3206  transcode_init_done = 1;
3207 
3208  return 0;
3209 }
3210 
3211 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3212 static int need_output(void)
3213 {
3214  int i;
3215 
3216  for (i = 0; i < nb_output_streams; i++) {
3217  OutputStream *ost = output_streams[i];
3218  OutputFile *of = output_files[ost->file_index];
3219  AVFormatContext *os = output_files[ost->file_index]->ctx;
3220 
3221  if (ost->finished ||
3222  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3223  continue;
3224  if (ost->frame_number >= ost->max_frames) {
3225  int j;
3226  for (j = 0; j < of->ctx->nb_streams; j++)
3227  close_output_stream(output_streams[of->ost_index + j]);
3228  continue;
3229  }
3230 
3231  return 1;
3232  }
3233 
3234  return 0;
3235 }
3236 
3237 /**
3238  * Select the output stream to process.
3239  *
3240  * @return selected output stream, or NULL if none available
3241  */
3243 {
3244  int i;
3245  int64_t opts_min = INT64_MAX;
3246  OutputStream *ost_min = NULL;
3247 
3248  for (i = 0; i < nb_output_streams; i++) {
3249  OutputStream *ost = output_streams[i];
3250  int64_t opts = av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3251  AV_TIME_BASE_Q);
3252  if (!ost->finished && opts < opts_min) {
3253  opts_min = opts;
3254  ost_min = ost->unavailable ? NULL : ost;
3255  }
3256  }
3257  return ost_min;
3258 }
3259 
3261 {
3262  int i, ret, key;
3263  static int64_t last_time;
3264  if (received_nb_signals)
3265  return AVERROR_EXIT;
3266  /* read_key() returns 0 on EOF */
3267  if(cur_time - last_time >= 100000 && !run_as_daemon){
3268  key = read_key();
3269  last_time = cur_time;
3270  }else
3271  key = -1;
3272  if (key == 'q')
3273  return AVERROR_EXIT;
3274  if (key == '+') av_log_set_level(av_log_get_level()+10);
3275  if (key == '-') av_log_set_level(av_log_get_level()-10);
3276  if (key == 's') qp_hist ^= 1;
3277  if (key == 'h'){
3278  if (do_hex_dump){
3279  do_hex_dump = do_pkt_dump = 0;
3280  } else if(do_pkt_dump){
3281  do_hex_dump = 1;
3282  } else
3283  do_pkt_dump = 1;
3285  }
3286  if (key == 'c' || key == 'C'){
3287  char buf[4096], target[64], command[256], arg[256] = {0};
3288  double time;
3289  int k, n = 0;
3290  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3291  i = 0;
3292  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3293  if (k > 0)
3294  buf[i++] = k;
3295  buf[i] = 0;
3296  if (k > 0 &&
3297  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3298  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3299  target, time, command, arg);
3300  for (i = 0; i < nb_filtergraphs; i++) {
3301  FilterGraph *fg = filtergraphs[i];
3302  if (fg->graph) {
3303  if (time < 0) {
3304  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3305  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3306  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3307  } else if (key == 'c') {
3308  fprintf(stderr, "Queing commands only on filters supporting the specific command is unsupported\n");
3309  ret = AVERROR_PATCHWELCOME;
3310  } else {
3311  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3312  }
3313  }
3314  }
3315  } else {
3317  "Parse error, at least 3 arguments were expected, "
3318  "only %d given in string '%s'\n", n, buf);
3319  }
3320  }
3321  if (key == 'd' || key == 'D'){
3322  int debug=0;
3323  if(key == 'D') {
3324  debug = input_streams[0]->st->codec->debug<<1;
3325  if(!debug) debug = 1;
3326  while(debug & (FF_DEBUG_DCT_COEFF|FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) //unsupported, would just crash
3327  debug += debug;
3328  }else
3329  if(scanf("%d", &debug)!=1)
3330  fprintf(stderr,"error parsing debug value\n");
3331  for(i=0;i<nb_input_streams;i++) {
3332  input_streams[i]->st->codec->debug = debug;
3333  }
3334  for(i=0;i<nb_output_streams;i++) {
3335  OutputStream *ost = output_streams[i];
3336  ost->enc_ctx->debug = debug;
3337  }
3338  if(debug) av_log_set_level(AV_LOG_DEBUG);
3339  fprintf(stderr,"debug=%d\n", debug);
3340  }
3341  if (key == '?'){
3342  fprintf(stderr, "key function\n"
3343  "? show this help\n"
3344  "+ increase verbosity\n"
3345  "- decrease verbosity\n"
3346  "c Send command to first matching filter supporting it\n"
3347  "C Send/Que command to all matching filters\n"
3348  "D cycle through available debug modes\n"
3349  "h dump packets/hex press to cycle through the 3 states\n"
3350  "q quit\n"
3351  "s Show QP histogram\n"
3352  );
3353  }
3354  return 0;
3355 }
3356 
3357 #if HAVE_PTHREADS
3358 static void *input_thread(void *arg)
3359 {
3360  InputFile *f = arg;
3361  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3362  int ret = 0;
3363 
3364  while (1) {
3365  AVPacket pkt;
3366  ret = av_read_frame(f->ctx, &pkt);
3367 
3368  if (ret == AVERROR(EAGAIN)) {
3369  av_usleep(10000);
3370  continue;
3371  }
3372  if (ret < 0) {
3374  break;
3375  }
3376  av_dup_packet(&pkt);
3377  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3378  if (flags && ret == AVERROR(EAGAIN)) {
3379  flags = 0;
3380  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
3382  "Thread message queue blocking; consider raising the "
3383  "thread_queue_size option (current value: %d)\n",
3384  f->thread_queue_size);
3385  }
3386  if (ret < 0) {
3387  if (ret != AVERROR_EOF)
3388  av_log(f->ctx, AV_LOG_ERROR,
3389  "Unable to send packet to main thread: %s\n",
3390  av_err2str(ret));
3391  av_free_packet(&pkt);
3393  break;
3394  }
3395  }
3396 
3397  return NULL;
3398 }
3399 
3400 static void free_input_threads(void)
3401 {
3402  int i;
3403 
3404  for (i = 0; i < nb_input_files; i++) {
3405  InputFile *f = input_files[i];
3406  AVPacket pkt;
3407 
3408  if (!f->in_thread_queue)
3409  continue;
3411  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3412  av_free_packet(&pkt);
3413 
3414  pthread_join(f->thread, NULL);
3415  f->joined = 1;
3417  }
3418 }
3419 
3420 static int init_input_threads(void)
3421 {
3422  int i, ret;
3423 
3424  if (nb_input_files == 1)
3425  return 0;
3426 
3427  for (i = 0; i < nb_input_files; i++) {
3428  InputFile *f = input_files[i];
3429 
3430  if (f->ctx->pb ? !f->ctx->pb->seekable :
3431  strcmp(f->ctx->iformat->name, "lavfi"))
3432  f->non_blocking = 1;
3434  f->thread_queue_size, sizeof(AVPacket));
3435  if (ret < 0)
3436  return ret;
3437 
3438  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3439  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3441  return AVERROR(ret);
3442  }
3443  }
3444  return 0;
3445 }
3446 
3448 {
3450  f->non_blocking ?
3452 }
3453 #endif
3454 
3456 {
3457  if (f->rate_emu) {
3458  int i;
3459  for (i = 0; i < f->nb_streams; i++) {
3460  InputStream *ist = input_streams[f->ist_index + i];
3461  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3462  int64_t now = av_gettime_relative() - ist->start;
3463  if (pts > now)
3464  return AVERROR(EAGAIN);
3465  }
3466  }
3467 
3468 #if HAVE_PTHREADS
3469  if (nb_input_files > 1)
3470  return get_input_packet_mt(f, pkt);
3471 #endif
3472  return av_read_frame(f->ctx, pkt);
3473 }
3474 
3475 static int got_eagain(void)
3476 {
3477  int i;
3478  for (i = 0; i < nb_output_streams; i++)
3479  if (output_streams[i]->unavailable)
3480  return 1;
3481  return 0;
3482 }
3483 
3484 static void reset_eagain(void)
3485 {
3486  int i;
3487  for (i = 0; i < nb_input_files; i++)
3488  input_files[i]->eagain = 0;
3489  for (i = 0; i < nb_output_streams; i++)
3490  output_streams[i]->unavailable = 0;
3491 }
3492 
3493 /*
3494  * Return
3495  * - 0 -- one packet was read and processed
3496  * - AVERROR(EAGAIN) -- no packets were available for selected file,
3497  * this function should be called again
3498  * - AVERROR_EOF -- this function should not be called again
3499  */
3500 static int process_input(int file_index)
3501 {
3502  InputFile *ifile = input_files[file_index];
3503  AVFormatContext *is;
3504  InputStream *ist;
3505  AVPacket pkt;
3506  int ret, i, j;
3507 
3508  is = ifile->ctx;
3509  ret = get_input_packet(ifile, &pkt);
3510 
3511  if (ret == AVERROR(EAGAIN)) {
3512  ifile->eagain = 1;
3513  return ret;
3514  }
3515  if (ret < 0) {
3516  if (ret != AVERROR_EOF) {
3517  print_error(is->filename, ret);
3518  if (exit_on_error)
3519  exit_program(1);
3520  }
3521 
3522  for (i = 0; i < ifile->nb_streams; i++) {
3523  ist = input_streams[ifile->ist_index + i];
3524  if (ist->decoding_needed) {
3525  ret = process_input_packet(ist, NULL);
3526  if (ret>0)
3527  return 0;
3528  }
3529 
3530  /* mark all outputs that don't go through lavfi as finished */
3531  for (j = 0; j < nb_output_streams; j++) {
3532  OutputStream *ost = output_streams[j];
3533 
3534  if (ost->source_index == ifile->ist_index + i &&
3535  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3536  finish_output_stream(ost);
3537  }
3538  }
3539 
3540  ifile->eof_reached = 1;
3541  return AVERROR(EAGAIN);
3542  }
3543 
3544  reset_eagain();
3545 
3546  if (do_pkt_dump) {
3548  is->streams[pkt.stream_index]);
3549  }
3550  /* the following test is needed in case new streams appear
3551  dynamically in stream : we ignore them */
3552  if (pkt.stream_index >= ifile->nb_streams) {
3553  report_new_stream(file_index, &pkt);
3554  goto discard_packet;
3555  }
3556 
3557  ist = input_streams[ifile->ist_index + pkt.stream_index];
3558 
3559  ist->data_size += pkt.size;
3560  ist->nb_packets++;
3561 
3562  if (ist->discard)
3563  goto discard_packet;
3564 
3565  if (debug_ts) {
3566  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
3567  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3571  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3572  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3573  av_ts2str(input_files[ist->file_index]->ts_offset),
3574  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3575  }
3576 
3577  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
3578  int64_t stime, stime2;
3579  // Correcting starttime based on the enabled streams
3580  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
3581  // so we instead do it here as part of discontinuity handling
3582  if ( ist->next_dts == AV_NOPTS_VALUE
3583  && ifile->ts_offset == -is->start_time
3584  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
3585  int64_t new_start_time = INT64_MAX;
3586  for (i=0; i<is->nb_streams; i++) {
3587  AVStream *st = is->streams[i];
3588  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
3589  continue;
3590  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
3591  }
3592  if (new_start_time > is->start_time) {
3593  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
3594  ifile->ts_offset = -new_start_time;
3595  }
3596  }
3597 
3598  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
3599  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
3600  ist->wrap_correction_done = 1;
3601 
3602  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3603  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
3604  ist->wrap_correction_done = 0;
3605  }
3606  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
3607  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
3608  ist->wrap_correction_done = 0;
3609  }
3610  }
3611 
3612  /* add the stream-global side data to the first packet */
3613  if (ist->nb_packets == 1) {
3614  if (ist->st->nb_side_data)
3616  for (i = 0; i < ist->st->nb_side_data; i++) {
3617  AVPacketSideData *src_sd = &ist->st->side_data[i];
3618  uint8_t *dst_data;
3619 
3620  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
3621  continue;
3622 
3623  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
3624  if (!dst_data)
3625  exit_program(1);
3626 
3627  memcpy(dst_data, src_sd->data, src_sd->size);
3628  }
3629  }
3630 
3631  if (pkt.dts != AV_NOPTS_VALUE)
3632  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3633  if (pkt.pts != AV_NOPTS_VALUE)
3634  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
3635 
3636  if (pkt.pts != AV_NOPTS_VALUE)
3637  pkt.pts *= ist->ts_scale;
3638  if (pkt.dts != AV_NOPTS_VALUE)
3639  pkt.dts *= ist->ts_scale;
3640 
3641  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3643  pkt.dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
3644  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
3645  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3646  int64_t delta = pkt_dts - ifile->last_ts;
3647  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3648  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
3649  ifile->ts_offset -= delta;
3651  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3652  delta, ifile->ts_offset);
3653  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3654  if (pkt.pts != AV_NOPTS_VALUE)
3655  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3656  }
3657  }
3658 
3659  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3661  pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
3662  !copy_ts) {
3663  int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3664  int64_t delta = pkt_dts - ist->next_dts;
3665  if (is->iformat->flags & AVFMT_TS_DISCONT) {
3666  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
3667  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
3668  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
3669  ifile->ts_offset -= delta;
3671  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
3672  delta, ifile->ts_offset);
3673  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3674  if (pkt.pts != AV_NOPTS_VALUE)
3675  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
3676  }
3677  } else {
3678  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3679  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3680  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
3681  pkt.dts = AV_NOPTS_VALUE;
3682  }
3683  if (pkt.pts != AV_NOPTS_VALUE){
3684  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
3685  delta = pkt_pts - ist->next_dts;
3686  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
3687  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
3688  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
3689  pkt.pts = AV_NOPTS_VALUE;
3690  }
3691  }
3692  }
3693  }
3694 
3695  if (pkt.dts != AV_NOPTS_VALUE)
3696  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
3697 
3698  if (debug_ts) {
3699  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
3701  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
3702  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
3703  av_ts2str(input_files[ist->file_index]->ts_offset),
3704  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
3705  }
3706 
3707  sub2video_heartbeat(ist, pkt.pts);
3708 
3709  ret = process_input_packet(ist, &pkt);
3710  if (ret < 0) {
3711  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
3712  ist->file_index, ist->st->index, av_err2str(ret));
3713  if (exit_on_error)
3714  exit_program(1);
3715  }
3716 
3717 discard_packet:
3718  av_free_packet(&pkt);
3719 
3720  return 0;
3721 }
3722 
3723 /**
3724  * Perform a step of transcoding for the specified filter graph.
3725  *
3726  * @param[in] graph filter graph to consider
3727  * @param[out] best_ist input stream where a frame would allow to continue
3728  * @return 0 for success, <0 for error
3729  */
3730 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
3731 {
3732  int i, ret;
3733  int nb_requests, nb_requests_max = 0;
3734  InputFilter *ifilter;
3735  InputStream *ist;
3736 
3737  *best_ist = NULL;
3738  ret = avfilter_graph_request_oldest(graph->graph);
3739  if (ret >= 0)
3740  return reap_filters();
3741 
3742  if (ret == AVERROR_EOF) {
3743  ret = reap_filters();
3744  for (i = 0; i < graph->nb_outputs; i++)
3745  close_output_stream(graph->outputs[i]->ost);
3746  return ret;
3747  }
3748  if (ret != AVERROR(EAGAIN))
3749  return ret;
3750 
3751  for (i = 0; i < graph->nb_inputs; i++) {
3752  ifilter = graph->inputs[i];
3753  ist = ifilter->ist;
3754  if (input_files[ist->file_index]->eagain ||
3755  input_files[ist->file_index]->eof_reached)
3756  continue;
3757  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
3758  if (nb_requests > nb_requests_max) {
3759  nb_requests_max = nb_requests;
3760  *best_ist = ist;
3761  }
3762  }
3763 
3764  if (!*best_ist)
3765  for (i = 0; i < graph->nb_outputs; i++)
3766  graph->outputs[i]->ost->unavailable = 1;
3767 
3768  return 0;
3769 }
3770 
3771 /**
3772  * Run a single step of transcoding.
3773  *
3774  * @return 0 for success, <0 for error
3775  */
3776 static int transcode_step(void)
3777 {
3778  OutputStream *ost;
3779  InputStream *ist;
3780  int ret;
3781 
3782  ost = choose_output();
3783  if (!ost) {
3784  if (got_eagain()) {
3785  reset_eagain();
3786  av_usleep(10000);
3787  return 0;
3788  }
3789  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
3790  return AVERROR_EOF;
3791  }
3792 
3793  if (ost->filter) {
3794  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
3795  return ret;
3796  if (!ist)
3797  return 0;
3798  } else {
3799  av_assert0(ost->source_index >= 0);
3800  ist = input_streams[ost->source_index];
3801  }
3802 
3803  ret = process_input(ist->file_index);
3804  if (ret == AVERROR(EAGAIN)) {
3805  if (input_files[ist->file_index]->eagain)
3806  ost->unavailable = 1;
3807  return 0;
3808  }
3809  if (ret < 0)
3810  return ret == AVERROR_EOF ? 0 : ret;
3811 
3812  return reap_filters();
3813 }
3814 
3815 /*
3816  * The following code is the main loop of the file converter
3817  */
3818 static int transcode(void)
3819 {
3820  int ret, i;
3821  AVFormatContext *os;
3822  OutputStream *ost;
3823  InputStream *ist;
3824  int64_t timer_start;
3825 
3826  ret = transcode_init();
3827  if (ret < 0)
3828  goto fail;
3829 
3830  if (stdin_interaction) {
3831  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
3832  }
3833 
3834  timer_start = av_gettime_relative();
3835 
3836 #if HAVE_PTHREADS
3837  if ((ret = init_input_threads()) < 0)
3838  goto fail;
3839 #endif
3840 
3841  while (!received_sigterm) {
3842  int64_t cur_time= av_gettime_relative();
3843 
3844  /* if 'q' pressed, exits */
3845  if (stdin_interaction)
3846  if (check_keyboard_interaction(cur_time) < 0)
3847  break;
3848 
3849  /* check if there's any stream where output is still needed */
3850  if (!need_output()) {
3851  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
3852  break;
3853  }
3854 
3855  ret = transcode_step();
3856  if (ret < 0) {
3857  if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
3858  continue;
3859 
3860  av_log(NULL, AV_LOG_ERROR, "Error while filtering.\n");
3861  break;
3862  }
3863 
3864  /* dump report by using the output first video and audio streams */
3865  print_report(0, timer_start, cur_time);
3866  }
3867 #if HAVE_PTHREADS
3869 #endif
3870 
3871  /* at the end of stream, we must flush the decoder buffers */
3872  for (i = 0; i < nb_input_streams; i++) {
3873  ist = input_streams[i];
3874  if (!input_files[ist->file_index]->eof_reached && ist->decoding_needed) {
3875  process_input_packet(ist, NULL);
3876  }
3877  }
3878  flush_encoders();
3879 
3880  term_exit();
3881 
3882  /* write the trailer if needed and close file */
3883  for (i = 0; i < nb_output_files; i++) {
3884  os = output_files[i]->ctx;
3885  av_write_trailer(os);
3886  }
3887 
3888  /* dump report by using the first video and audio streams */
3889  print_report(1, timer_start, av_gettime_relative());
3890 
3891  /* close each encoder */
3892  for (i = 0; i < nb_output_streams; i++) {
3893  ost = output_streams[i];
3894  if (ost->encoding_needed) {
3895  av_freep(&ost->enc_ctx->stats_in);
3896  }
3897  }
3898 
3899  /* close each decoder */
3900  for (i = 0; i < nb_input_streams; i++) {
3901  ist = input_streams[i];
3902  if (ist->decoding_needed) {
3903  avcodec_close(ist->dec_ctx);
3904  if (ist->hwaccel_uninit)
3905  ist->hwaccel_uninit(ist->dec_ctx);
3906  }
3907  }
3908 
3909  /* finished ! */
3910  ret = 0;
3911 
3912  fail:
3913 #if HAVE_PTHREADS
3915 #endif
3916 
3917  if (output_streams) {
3918  for (i = 0; i < nb_output_streams; i++) {
3919  ost = output_streams[i];
3920  if (ost) {
3921  if (ost->logfile) {
3922  fclose(ost->logfile);
3923  ost->logfile = NULL;
3924  }
3925  av_freep(&ost->forced_kf_pts);
3926  av_freep(&ost->apad);
3927  av_freep(&ost->disposition);
3928  av_dict_free(&ost->encoder_opts);
3929  av_dict_free(&ost->swr_opts);
3930  av_dict_free(&ost->resample_opts);
3931  av_dict_free(&ost->bsf_args);
3932  }
3933  }
3934  }
3935  return ret;
3936 }
3937 
3938 
3939 static int64_t getutime(void)
3940 {
3941 #if HAVE_GETRUSAGE
3942  struct rusage rusage;
3943 
3944  getrusage(RUSAGE_SELF, &rusage);
3945  return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
3946 #elif HAVE_GETPROCESSTIMES
3947  HANDLE proc;
3948  FILETIME c, e, k, u;
3949  proc = GetCurrentProcess();
3950  GetProcessTimes(proc, &c, &e, &k, &u);
3951  return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
3952 #else
3953  return av_gettime_relative();
3954 #endif
3955 }
3956 
3957 static int64_t getmaxrss(void)
3958 {
3959 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
3960  struct rusage rusage;
3961  getrusage(RUSAGE_SELF, &rusage);
3962  return (int64_t)rusage.ru_maxrss * 1024;
3963 #elif HAVE_GETPROCESSMEMORYINFO
3964  HANDLE proc;
3965  PROCESS_MEMORY_COUNTERS memcounters;
3966  proc = GetCurrentProcess();
3967  memcounters.cb = sizeof(memcounters);
3968  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
3969  return memcounters.PeakPagefileUsage;
3970 #else
3971  return 0;
3972 #endif
3973 }
3974 
3975 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
3976 {
3977 }
3978 
3979 int main(int argc, char **argv)
3980 {
3981  int ret;
3982  int64_t ti;
3983 
3985 
3986  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
3987 
3989  parse_loglevel(argc, argv, options);
3990 
3991  if(argc>1 && !strcmp(argv[1], "-d")){
3992  run_as_daemon=1;
3994  argc--;
3995  argv++;
3996  }
3997 
3999 #if CONFIG_AVDEVICE
4001 #endif
4003  av_register_all();
4005 
4006  show_banner(argc, argv, options);
4007 
4008  term_init();
4009 
4010  /* parse options and open all input/output files */
4011  ret = ffmpeg_parse_options(argc, argv);
4012  if (ret < 0)
4013  exit_program(1);
4014 
4015  if (nb_output_files <= 0 && nb_input_files == 0) {
4016  show_usage();
4017  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4018  exit_program(1);
4019  }
4020 
4021  /* file converter / grab */
4022  if (nb_output_files <= 0) {
4023  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4024  exit_program(1);
4025  }
4026 
4027 // if (nb_input_files == 0) {
4028 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4029 // exit_program(1);
4030 // }
4031 
4032  current_time = ti = getutime();
4033  if (transcode() < 0)
4034  exit_program(1);
4035  ti = getutime() - ti;
4036  if (do_benchmark) {
4037  printf("bench: utime=%0.3fs\n", ti / 1000000.0);
4038  }
4039  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4042  exit_program(69);
4043 
4045  return main_return_code;
4046 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1412
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:265
#define extra_bits(eb)
Definition: intrax8.c:152
struct InputStream::@26 prev_sub
int got_output
Definition: ffmpeg.h:289
#define AV_DISPOSITION_METADATA
Definition: avformat.h:779
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2575
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1690
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:971
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:1790
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:380
const struct AVCodec * codec
Definition: avcodec.h:1248
Definition: ffmpeg.h:359
AVRational framerate
Definition: avcodec.h:3015
enum HWAccelID active_hwaccel_id
Definition: ffmpeg.h:315
const char * s
Definition: avisynth_c.h:669
Bytestream IO Context.
Definition: avio.h:68
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:457
void term_init(void)
Definition: ffmpeg.c:327
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:281
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
int64_t av_frame_get_pkt_duration(const AVFrame *frame)
uint8_t * name
Definition: ffmpeg.h:225
int nb_outputs
Definition: ffmpeg.h:241
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3441
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
AVDictionary * swr_opts
Definition: ffmpeg.h:427
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:251
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2029
int resample_channels
Definition: ffmpeg.h:284
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
void term_exit(void)
Definition: ffmpeg.c:306
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: utils.c:2915
int stream_copy
Definition: ffmpeg.h:433
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:880
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3469
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1461
#define FF_DEBUG_VIS_QP
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2585
AVOption.
Definition: opt.h:255
AVRational frame_rate
Definition: ffmpeg.h:399
int64_t * forced_kf_pts
Definition: ffmpeg.h:406
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:286
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2661
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:422
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:735
static int process_input(int file_index)
Definition: ffmpeg.c:3500
#define CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:880
int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:398
int exit_on_error
Definition: ffmpeg_opt.c:99
const char * fmt
Definition: avisynth_c.h:670
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:181
#define LIBAVUTIL_VERSION_INT
Definition: version.h:62
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1185
Keep a reference to the frame.
Definition: buffersrc.h:62
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:734
static int run_as_daemon
Definition: ffmpeg.c:124
Memory buffer source API.
AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:2743
void av_log_set_level(int level)
Set the log level.
Definition: log.c:368
AVRational framerate
Definition: ffmpeg.h:274
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
AVCodecParserContext * parser
Definition: ffmpeg.h:441
static int64_t cur_time
Definition: ffserver.c:253
FILE * av_fopen_utf8(const char *path, const char *mode)
Open a file using a UTF-8 filename.
Definition: file_open.c:92
int decoding_needed
Definition: ffmpeg.h:249
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:867
int num
numerator
Definition: rational.h:44
FilterGraph * init_simple_filtergraph(InputStream *ist, OutputStream *ost)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1421
#define vsnprintf
Definition: snprintf.h:36
int index
stream index in AVFormatContext
Definition: avformat.h:796
int size
Definition: avcodec.h:1161
static int64_t getmaxrss(void)
Definition: ffmpeg.c:3957
const char * b
Definition: vf_curves.c:109
static int nb_frames_dup
Definition: ffmpeg.c:125
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2431
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:369
#define AV_DISPOSITION_DUB
Definition: avformat.h:751
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1621
int eagain
Definition: ffmpeg.h:336
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1115
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1442
AVBitStreamFilterContext * bitstream_filters
Definition: ffmpeg.h:390
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:555
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:763
unsigned num_rects
Definition: avcodec.h:3498
AVFrame * filter_frame
Definition: ffmpeg.h:256
static int transcode_init(void)
Definition: ffmpeg.c:2563
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2438
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2500
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
int do_benchmark_all
Definition: ffmpeg_opt.c:92
enum AVMediaType type
Definition: avcodec.h:3186
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:765
#define FF_ARRAY_ELEMS(a)
static int init_input_threads(void)
Definition: ffmpeg.c:3420
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:652
discard all
Definition: avcodec.h:667
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:907
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:318
int64_t input_ts_offset
Definition: ffmpeg.h:338
int do_hex_dump
Definition: ffmpeg_opt.c:93
static AVPacket pkt
int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of audio.
Definition: utils.c:1821
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2725
#define FF_DEBUG_VIS_MB_TYPE
only access through AVOptions from outside libavcodec
Definition: avcodec.h:2586
int nb_input_streams
Definition: ffmpeg.c:137
void avcodec_register_all(void)
Register all the codecs, parsers and bitstream filters which were enabled at configuration time...
Definition: allcodecs.c:68
const char * name
Definition: ffmpeg.h:69
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2713
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:2507
Picture data structure.
Definition: avcodec.h:3439
uint64_t packets_written
Definition: ffmpeg.h:447
AVCodec.
Definition: avcodec.h:3173
#define VSYNC_VFR
Definition: ffmpeg.h:54
int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:180
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs...
Definition: avcodec.h:2020
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:436
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Setup the picture fields based on the specified image parameters and the provided image data buffer...
Definition: avpicture.c:34
int print_stats
Definition: ffmpeg_opt.c:100
float dts_error_threshold
Definition: ffmpeg_opt.c:84
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:458
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:232
uint64_t data_size
Definition: ffmpeg.h:445
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:418
static int process_input_packet(InputStream *ist, const AVPacket *pkt)
Definition: ffmpeg.c:2150
#define log2(x)
Definition: libm.h:122
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:755
struct FilterGraph * graph
Definition: ffmpeg.h:217
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1367
AVSubtitleRect ** rects
Definition: avcodec.h:3499
enum AVAudioServiceType audio_service_type
Type of service that the audio stream conveys.
Definition: avcodec.h:2058
int encoding_needed
Definition: ffmpeg.h:379
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:560
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:3975
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3260
Format I/O context.
Definition: avformat.h:1214
uint64_t samples_decoded
Definition: ffmpeg.h:330
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:216
unsigned int nb_stream_indexes
Definition: avformat.h:1154
#define AV_LOG_QUIET
Print no output.
Definition: log.h:157
enum HWAccelID id
Definition: ffmpeg.h:71
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:744
int64_t cur_dts
Definition: avformat.h:972
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3471
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:71
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
uint64_t frames_decoded
Definition: ffmpeg.h:329
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:235
Public dictionary API.
static void do_video_out(AVFormatContext *s, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:882
char * logfile_prefix
Definition: ffmpeg.h:417
static uint8_t * subtitle_out
Definition: ffmpeg.c:132
#define DEFAULT_PASS_LOGFILENAME_PREFIX
Definition: ffmpeg.c:134
static int main_return_code
Definition: ffmpeg.c:315
static int64_t start_time
Definition: ffplay.c:319
int copy_initial_nonkeyframes
Definition: ffmpeg.h:435
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:116
if()
Definition: avfilter.c:975
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1991
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT
Definition: avformat.h:485
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
Opaque data information usually continuous.
Definition: avutil.h:196
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
static void * input_thread(void *arg)
Definition: ffmpeg.c:3358
int av_parser_change(AVCodecParserContext *s, AVCodecContext *avctx, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Definition: parser.c:186
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:209
AVOptions.
int subtitle_header_size
Definition: avcodec.h:2950
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:595
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
attribute_deprecated void(* destruct)(struct AVPacket *)
Definition: avcodec.h:1181
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3440
int stdin_interaction
Definition: ffmpeg_opt.c:102
FILE * logfile
Definition: ffmpeg.h:418
AVDictionary * opts
Definition: ffmpeg.h:455
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
#define media_type_string
Definition: cmdutils.h:577
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
libavcodec/libavfilter gluing utilities
#define ECHO(name, type, min, max)
Definition: af_aecho.c:182
static const HWAccel * get_hwaccel(enum AVPixelFormat pix_fmt)
Definition: ffmpeg.c:2327
static int need_output(void)
Definition: ffmpeg.c:3212
int last_droped
Definition: ffmpeg.h:396
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:249
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:911
static double psnr(double d)
Definition: ffmpeg.c:1161
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1353
int do_benchmark
Definition: ffmpeg_opt.c:91
int audio_sync_method
Definition: ffmpeg_opt.c:87
int shortest
Definition: ffmpeg.h:461
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1282
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
Definition: utils.c:2085
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
static int64_t getutime(void)
Definition: ffmpeg.c:3939
static AVFrame * frame
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:111
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:94
const char * name
Definition: avcodec.h:5067
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int nb_streams
Definition: ffmpeg.h:343
pthread_t thread
Definition: ffmpeg.h:351
uint8_t * data
Definition: avcodec.h:1160
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * resample_opts
Definition: ffmpeg.h:428
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:2444
list ifile
Definition: normalize.py:6
#define FFMIN3(a, b, c)
Definition: common.h:67
AVFilterContext * filter
Definition: ffmpeg.h:222
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4109
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
int nb_input_files
Definition: ffmpeg.c:139
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:363
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:191
#define lrintf(x)
Definition: libm_mips.h:70
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:819
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1166
int resample_sample_rate
Definition: ffmpeg.h:283
uint8_t * data
Definition: avcodec.h:1110
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:359
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:757
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:483
const AVClass * avcodec_get_frame_class(void)
Get the AVClass for AVFrame.
Definition: options.c:283
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
ptrdiff_t size
Definition: opengl_enc.c:101
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3472
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:273
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Definition: avcodec.h:2718
AVCodec * dec
Definition: ffmpeg.h:254
static int64_t duration
Definition: ffplay.c:320
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1152
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2492
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:177
int top_field_first
Definition: ffmpeg.h:275
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1342
int nb_output_streams
Definition: ffmpeg.c:142
int file_index
Definition: ffmpeg.h:245
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1178
const OptionDef options[]
Definition: ffserver.c:3749
struct AVBitStreamFilterContext * next
Definition: avcodec.h:5062
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1965
unsigned int * stream_index
Definition: avformat.h:1153
struct InputStream::sub2video sub2video
int resample_pix_fmt
Definition: ffmpeg.h:280
int resample_height
Definition: ffmpeg.h:278
int wrap_correction_done
Definition: ffmpeg.h:266
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:268
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:257
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:777
unsigned m
Definition: audioconvert.c:187
int av_buffersrc_add_ref(AVFilterContext *buffer_src, AVFilterBufferRef *picref, int flags)
Add buffer data in picref to buffer_src.
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:117
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1233
int64_t next_dts
Definition: ffmpeg.h:261
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1206
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
void av_buffer_default_free(void *opaque, uint8_t *data)
Default free callback, which calls av_free() on the buffer data.
Definition: buffer.c:61
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:428
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:51
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2826
static volatile int transcode_init_done
Definition: ffmpeg.c:314
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3187
int rate_emu
Definition: ffmpeg.h:346
int width
width and height of the video frame
Definition: frame.h:212
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1531
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1202
static void reset_eagain(void)
Definition: ffmpeg.c:3484
static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
Definition: ffmpeg.c:586
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2336
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:320
void * av_opt_ptr(const AVClass *class, void *obj, const char *name)
Gets a pointer to the requested field in a struct.
Definition: opt.c:1548
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:571
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:2746
FilterGraph ** filtergraphs
Definition: ffmpeg.c:146
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:423
AVFilterContext * filter
Definition: ffmpeg.h:215
#define CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:872
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:761
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:317
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:427
int64_t start
Definition: ffmpeg.h:258
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:822
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3470
av_default_item_name
uint64_t nb_packets
Definition: ffmpeg.h:327
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:389
int video_sync_method
Definition: ffmpeg_opt.c:88
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:127
#define VSYNC_VSCFR
Definition: ffmpeg.h:55
int avfilter_link_get_channels(AVFilterLink *link)
Get the number of channels of a link.
Definition: avfilter.c:175
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
char * sdp_filename
Definition: ffmpeg_opt.c:80
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:2089
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
const char * r
Definition: vf_curves.c:107
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:111
int capabilities
Codec capabilities.
Definition: avcodec.h:3192
int initial_padding
Audio only.
Definition: avcodec.h:3007
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:121
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:196
void av_bitstream_filter_close(AVBitStreamFilterContext *bsf)
Release bitstream filter context.
unsigned int nb_programs
Definition: avformat.h:1362
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:194
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:419
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1143
const char * arg
Definition: jacosubdec.c:66
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1333
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:490
AVChapter ** chapters
Definition: avformat.h:1413
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:333
int rc_max_rate
maximum bitrate
Definition: avcodec.h:2325
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:123
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1112
int av_log_get_level(void)
Get the current log level.
Definition: log.c:363
const char * name
Name of the codec implementation.
Definition: avcodec.h:3180
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:736
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:70
int side_data_elems
Definition: avcodec.h:1172
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:537
int force_fps
Definition: ffmpeg.h:400
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:878
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1149
#define FFMAX(a, b)
Definition: common.h:64
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
int qp_hist
Definition: ffmpeg_opt.c:101
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
float frame_drop_threshold
Definition: ffmpeg_opt.c:89
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1166
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:2877
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2044
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
uint32_t end_display_time
Definition: avcodec.h:3497
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3500
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:814
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:2302
OutputFilter * filter
Definition: ffmpeg.h:420
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:419
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVRational frame_aspect_ratio
Definition: ffmpeg.h:403
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:754
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1483
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:762
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:628
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1270
static int nb_frames_drop
Definition: ffmpeg.c:126
A bitmap, pict will be set.
Definition: avcodec.h:3451
int nb_output_files
Definition: ffmpeg.c:144
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:117
int bit_rate
the average bitrate
Definition: avcodec.h:1303
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:197
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
static int transcode(void)
Definition: ffmpeg.c:3818
char filename[1024]
input or output filename
Definition: avformat.h:1290
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3479
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:127
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:247
#define FFMIN(a, b)
Definition: common.h:66
float y
#define VSYNC_AUTO
Definition: ffmpeg.h:51
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:383
int saw_first_ts
Definition: ffmpeg.h:271
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1810
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that's been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:78
#define FFSIGN(a)
Definition: common.h:62
struct OutputStream * ost
Definition: ffmpeg.h:223
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1412
PVOID HANDLE
char * apad
Definition: ffmpeg.h:430
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:411
const char * name
Definition: avformat.h:466
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
void av_parser_close(AVCodecParserContext *s)
Definition: parser.c:221
int nb_filtergraphs
Definition: ffmpeg.c:147
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:78
int64_t last_ts
Definition: ffmpeg.h:340
static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3447
#define FFABS(a)
Definition: common.h:61
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:275
int do_pkt_dump
Definition: ffmpeg_opt.c:94
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2482
int64_t max_frames
Definition: ffmpeg.h:393
#define AV_RL32
Definition: intreadwrite.h:146
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:754
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:319
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:298
float u
int audio_channels_mapped
Definition: ffmpeg.h:415
int n
Definition: avisynth_c.h:589
AVDictionary * metadata
Definition: avformat.h:869
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1376
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:963
static int reap_filters(void)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1221
Opaque data information usually sparse.
Definition: avutil.h:198
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:106
static int restore_tty
Definition: ffmpeg.c:153
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
static int got_eagain(void)
Definition: ffmpeg.c:3475
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:107
static void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:222
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the nearest value in q_list to q.
Definition: rational.c:141
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:2953
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:121
int av_packet_split_side_data(AVPacket *pkt)
Definition: avpacket.c:381
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:764
int ret
Definition: ffmpeg.h:290
int audio_volume
Definition: ffmpeg_opt.c:86
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:795
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:425
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:224
InputFilter ** filters
Definition: ffmpeg.h:305
int fix_sub_duration
Definition: ffmpeg.h:287
#define VSYNC_DROP
Definition: ffmpeg.h:56
int64_t recording_time
Definition: ffmpeg.h:342
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4122
Definition: ffmpeg.h:68
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2003
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:64
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:750
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:165
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:2522
#define AV_LOG_INFO
Standard information.
Definition: log.h:186
int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, AVCodecContext *avctx, const char *args, uint8_t **poutbuf, int *poutbuf_size, const uint8_t *buf, int buf_size, int keyframe)
Filter bitstream.
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: frame.h:343
int frame_size
Definition: mxfenc.c:1618
attribute_deprecated void av_log_ask_for_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message asking for a sample.
AVCodecParserContext * av_parser_init(int codec_id)
Definition: parser.c:50
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:778
int ost_index
Definition: ffmpeg.h:456
struct InputStream * sync_ist
Definition: ffmpeg.h:383
AVS_Value src
Definition: avisynth_c.h:524
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: utils.c:715
enum AVMediaType codec_type
Definition: avcodec.h:1247
double ts_scale
Definition: ffmpeg.h:270
int unavailable
Definition: ffmpeg.h:432
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
const AVRational * supported_framerates
array of supported framerates, or NULL if any, array is terminated by {0,0}
Definition: avcodec.h:3193
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:162
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2384
enum AVCodecID codec_id
Definition: avcodec.h:1256
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:312
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:253
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1478
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:180
float max_error_rate
Definition: ffmpeg_opt.c:104
int sample_rate
samples per second
Definition: avcodec.h:1983
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
uint64_t frames_encoded
Definition: ffmpeg.h:449
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
Definition: ffmpeg.c:1944
AVIOContext * pb
I/O context.
Definition: avformat.h:1256
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:166
int ist_index
Definition: ffmpeg.h:337
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:491
uint8_t flags
Definition: pixdesc.h:90
int debug
debug
Definition: avcodec.h:2563
static void print_sdp(void)
Definition: ffmpeg.c:2292
const char * graph_desc
Definition: ffmpeg.h:233
int guess_layout_max
Definition: ffmpeg.h:276
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:69
int64_t start_time
Definition: ffmpeg.h:341
#define AVFMT_RAWPICTURE
Format wants AVPicture structure for raw picture data.
Definition: avformat.h:421
main external API structure.
Definition: avcodec.h:1239
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:335
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:425
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:758
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2807
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:244
const char * attachment_filename
Definition: ffmpeg.h:434
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
Definition: avcodec.h:1271
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1673
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:546
AVCodecContext * enc_ctx
Definition: ffmpeg.h:391
void * buf
Definition: avisynth_c.h:595
AVFrame * decoded_frame
Definition: ffmpeg.h:255
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1354
Perform non-blocking operation.
Definition: threadmessage.h:31
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
Replacements for frequently missing libm functions.
struct AVBitStreamFilter * filter
Definition: avcodec.h:5060
AVCodecContext * dec_ctx
Definition: ffmpeg.h:253
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:3730
AVStream * st
Definition: ffmpeg.h:246
int * audio_channels_map
Definition: ffmpeg.h:414
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:52
Describe the class of an AVClass context structure.
Definition: log.h:66
int sample_rate
Sample rate of the audio data.
Definition: frame.h:414
int configure_filtergraph(FilterGraph *fg)
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1493
int av_frame_get_channels(const AVFrame *frame)
OutputStream ** output_streams
Definition: ffmpeg.c:141
int index
Definition: gxfenc.c:89
rational number numerator/denominator
Definition: rational.h:43
int file_index
Definition: ffmpeg.h:375
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:48
static int current_time
Definition: ffmpeg.c:129
int64_t sync_opts
Definition: ffmpeg.h:384
char * vstats_filename
Definition: ffmpeg_opt.c:79
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:150
char * disposition
Definition: ffmpeg.h:437
AVMediaType
Definition: avutil.h:192
discard useless packets like 0 size packets in avi
Definition: avcodec.h:662
static av_always_inline av_const long int lrint(double x)
Definition: libm.h:148
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:902
int nb_streams_warn
Definition: ffmpeg.h:345
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2244
AVDictionary * decoder_opts
Definition: ffmpeg.h:273
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:566
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1325
int showed_multi_packet_warning
Definition: ffmpeg.h:272
#define snprintf
Definition: snprintf.h:34
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:103
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:3596
int64_t ts_offset
Definition: ffmpeg.h:339
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:327
static void do_subtitle_out(AVFormatContext *s, OutputStream *ost, InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:798
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:3776
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:423
static void free_input_threads(void)
Definition: ffmpeg.c:3400
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:3313
misc parsing utilities
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1469
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
Get the frame rate of the input.
Definition: buffersink.c:356
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:254
AVFrame * filtered_frame
Definition: ffmpeg.h:394
int source_index
Definition: ffmpeg.h:377
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:265
static volatile int received_nb_signals
Definition: ffmpeg.c:313
int copy_prior_start
Definition: ffmpeg.h:436
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
int cmdutils_read_file(const char *filename, char **bufptr, size_t *size)
Read the file with name filename, and put its content in a newly allocated 0-terminated buffer...
Definition: cmdutils.c:1863
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1319
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:577
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:72
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:84
int nb_filters
Definition: ffmpeg.h:306
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:133
static int flags
Definition: cpu.c:47
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2336
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1299
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
uint8_t level
Definition: svq3.c:150
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:410
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:262
int resample_sample_fmt
Definition: ffmpeg.h:282
int forced_kf_count
Definition: ffmpeg.h:407
int64_t start
Definition: avformat.h:1182
OSTFinished finished
Definition: ffmpeg.h:431
char * forced_keyframes
Definition: ffmpeg.h:409
uint64_t data_size
Definition: ffmpeg.h:325
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
#define CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:845
int resample_width
Definition: ffmpeg.h:279
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:264
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1017
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1171
struct FilterGraph * graph
Definition: ffmpeg.h:224
static void filter(MpegAudioContext *s, int ch, const short *samples, int incr)
uint64_t limit_filesize
Definition: ffmpeg.h:459
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1313
AVIOContext * progress_avio
Definition: ffmpeg.c:130
AVThreadMessageQueue * in_thread_queue
Definition: ffmpeg.h:350
int main(int argc, char **argv)
Definition: ffmpeg.c:3979
int reinit_filters
Definition: ffmpeg.h:308
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:418
#define VSYNC_CFR
Definition: ffmpeg.h:53
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:259
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:847
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:430
AVStream * st
Definition: muxing.c:54
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:858
static AVCodecContext * dec_ctx
uint32_t start_display_time
Definition: avcodec.h:3496
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:961
uint64_t samples_encoded
Definition: ffmpeg.h:450
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1181
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Definition: mem.c:206
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2780
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:856
char * key
Definition: dict.h:87
static FILE * vstats_file
Definition: ffmpeg.c:109
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
AVFrame * last_frame
Definition: ffmpeg.h:395
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int copy_ts
Definition: ffmpeg_opt.c:95
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1226
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3631
AVFormatContext * ctx
Definition: ffmpeg.h:334
static struct termios oldtty
Definition: ffmpeg.c:152
AVCodec * enc
Definition: ffmpeg.h:392
AVSubtitle subtitle
Definition: ffmpeg.h:291
int eof_reached
Definition: ffmpeg.h:335
int forced_kf_index
Definition: ffmpeg.h:408
static void do_audio_out(AVFormatContext *s, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:749
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:428
char * avfilter
Definition: ffmpeg.h:421
uint8_t * name
Definition: ffmpeg.h:218
char * value
Definition: dict.h:88
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:364
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define NAN
Definition: math.h:28
float dts_delta_threshold
Definition: ffmpeg_opt.c:83
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:703
int channels
number of audio channels
Definition: avcodec.h:1984
#define av_log2
Definition: intmath.h:105
int top_field_first
Definition: ffmpeg.h:401
OutputFilter ** outputs
Definition: ffmpeg.h:240
InputFile ** input_files
Definition: ffmpeg.c:138
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2374
void av_log_set_flags(int arg)
Definition: log.c:373
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:229
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:220
AVDictionary * bsf_args
Definition: ffmpeg.h:429
AVFormatContext * ctx
Definition: ffmpeg.h:454
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:753
void show_usage(void)
Definition: ffmpeg_opt.c:2696
An instance of a filter.
Definition: avfilter.h:633
#define LIBAVCODEC_IDENT
Definition: version.h:43
char * hwaccel_device
Definition: ffmpeg.h:312
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1159
AVDictionary * encoder_opts
Definition: ffmpeg.h:426
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:932
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:107
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:4250
int height
Definition: frame.h:212
InputFilter ** inputs
Definition: ffmpeg.h:238
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1980
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:321
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:581
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:324
OutputFile ** output_files
Definition: ffmpeg.c:143
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:169
#define av_malloc_array(a, b)
static void flush_encoders(void)
Definition: ffmpeg.c:1595
int copy_tb
Definition: ffmpeg_opt.c:97
static volatile int received_sigterm
Definition: ffmpeg.c:312
#define FFSWAP(type, a, b)
Definition: common.h:69
int discard
Definition: ffmpeg.h:247
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:3455
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:1950
int thread_queue_size
Definition: ffmpeg.h:354
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:299
int stream_index
Definition: avcodec.h:1162
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:837
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:311
enum AVSubtitleType type
Definition: avcodec.h:3480
int64_t first_pts
Definition: ffmpeg.h:387
int nb_inputs
Definition: ffmpeg.h:239
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:860
int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:223
#define DECODING_FOR_OST
Definition: ffmpeg.h:250
int index
Definition: ffmpeg.h:376
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1015
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
uint64_t resample_channel_layout
Definition: ffmpeg.h:285
OSTFinished
Definition: ffmpeg.h:369
This structure stores compressed data.
Definition: avcodec.h:1137
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:937
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: utils.c:2171
int non_blocking
Definition: ffmpeg.h:352
int delay
Codec delay.
Definition: avcodec.h:1400
int debug_ts
Definition: ffmpeg_opt.c:98
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3242
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:217
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void sigterm_handler(int sig)
Definition: ffmpeg.c:318
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1153
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:116
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1296
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:65
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1363
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:752
int joined
Definition: ffmpeg.h:353
Immediately push the frame to the output.
Definition: buffersrc.h:55
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
InputStream ** input_streams
Definition: ffmpeg.c:136
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:66
Definition: ffmpeg.h:363
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:725
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:2949