FFmpeg  2.6.3
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/colorspace.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avcodec.h"
52 # include "libavfilter/avfilter.h"
53 # include "libavfilter/buffersink.h"
54 # include "libavfilter/buffersrc.h"
55 #endif
56 
57 #include <SDL.h>
58 #include <SDL_thread.h>
59 
60 #include "cmdutils.h"
61 
62 #include <assert.h>
63 
64 const char program_name[] = "ffplay";
65 const int program_birth_year = 2003;
66 
67 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
68 #define MIN_FRAMES 5
69 
70 /* Minimum SDL audio buffer size, in samples. */
71 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
72 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
73 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
74 
75 /* no AV sync correction is done if below the minimum AV sync threshold */
76 #define AV_SYNC_THRESHOLD_MIN 0.04
77 /* AV sync correction is done if above the maximum AV sync threshold */
78 #define AV_SYNC_THRESHOLD_MAX 0.1
79 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
80 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
81 /* no AV correction is done if too big error */
82 #define AV_NOSYNC_THRESHOLD 10.0
83 
84 /* maximum audio speed change to get correct sync */
85 #define SAMPLE_CORRECTION_PERCENT_MAX 10
86 
87 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
88 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
89 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
90 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
91 
92 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
93 #define AUDIO_DIFF_AVG_NB 20
94 
95 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
96 #define REFRESH_RATE 0.01
97 
98 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
99 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
100 #define SAMPLE_ARRAY_SIZE (8 * 65536)
101 
102 #define CURSOR_HIDE_DELAY 1000000
103 
104 static int64_t sws_flags = SWS_BICUBIC;
105 
106 typedef struct MyAVPacketList {
109  int serial;
111 
112 typedef struct PacketQueue {
115  int size;
117  int serial;
118  SDL_mutex *mutex;
119  SDL_cond *cond;
120 } PacketQueue;
121 
122 #define VIDEO_PICTURE_QUEUE_SIZE 3
123 #define SUBPICTURE_QUEUE_SIZE 16
124 #define SAMPLE_QUEUE_SIZE 9
125 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
126 
127 typedef struct AudioParams {
128  int freq;
129  int channels;
130  int64_t channel_layout;
134 } AudioParams;
135 
136 typedef struct Clock {
137  double pts; /* clock base */
138  double pts_drift; /* clock base minus time at which we updated the clock */
139  double last_updated;
140  double speed;
141  int serial; /* clock is based on a packet with this serial */
142  int paused;
143  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
144 } Clock;
145 
146 /* Common struct for handling all types of decoded data and allocated render buffers. */
147 typedef struct Frame {
150  int serial;
151  double pts; /* presentation timestamp for the frame */
152  double duration; /* estimated duration of the frame */
153  int64_t pos; /* byte position of the frame in the input file */
154  SDL_Overlay *bmp;
157  int width;
158  int height;
160 } Frame;
161 
162 typedef struct FrameQueue {
164  int rindex;
165  int windex;
166  int size;
167  int max_size;
170  SDL_mutex *mutex;
171  SDL_cond *cond;
173 } FrameQueue;
174 
175 enum {
176  AV_SYNC_AUDIO_MASTER, /* default choice */
178  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
179 };
180 
181 typedef struct Decoder {
187  int finished;
189  SDL_cond *empty_queue_cond;
190  int64_t start_pts;
192  int64_t next_pts;
194  SDL_Thread *decoder_tid;
195 } Decoder;
196 
197 typedef struct VideoState {
198  SDL_Thread *read_tid;
202  int paused;
205  int seek_req;
207  int64_t seek_pos;
208  int64_t seek_rel;
211  int realtime;
212 
216 
220 
224 
226 
228 
229  double audio_clock;
231  double audio_diff_cum; /* used for AV difference average computation */
241  unsigned int audio_buf_size; /* in bytes */
242  unsigned int audio_buf1_size;
243  int audio_buf_index; /* in bytes */
246 #if CONFIG_AVFILTER
248 #endif
253 
254  enum ShowMode {
256  } show_mode;
263  int xpos;
265 
269 
270  double frame_timer;
276  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
277 #if !CONFIG_AVFILTER
278  struct SwsContext *img_convert_ctx;
279 #endif
281  int eof;
282 
283  char filename[1024];
285  int step;
286 
287 #if CONFIG_AVFILTER
289  AVFilterContext *in_video_filter; // the first filter in the video chain
290  AVFilterContext *out_video_filter; // the last filter in the video chain
291  AVFilterContext *in_audio_filter; // the first filter in the audio chain
292  AVFilterContext *out_audio_filter; // the last filter in the audio chain
293  AVFilterGraph *agraph; // audio filter graph
294 #endif
295 
297 
299 } VideoState;
300 
301 /* options specified by the user */
303 static const char *input_filename;
304 static const char *window_title;
305 static int fs_screen_width;
306 static int fs_screen_height;
307 static int default_width = 640;
308 static int default_height = 480;
309 static int screen_width = 0;
310 static int screen_height = 0;
311 static int audio_disable;
312 static int video_disable;
313 static int subtitle_disable;
314 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
315 static int seek_by_bytes = -1;
316 static int display_disable;
317 static int show_status = 1;
319 static int64_t start_time = AV_NOPTS_VALUE;
320 static int64_t duration = AV_NOPTS_VALUE;
321 static int fast = 0;
322 static int genpts = 0;
323 static int lowres = 0;
324 static int decoder_reorder_pts = -1;
325 static int autoexit;
326 static int exit_on_keydown;
327 static int exit_on_mousedown;
328 static int loop = 1;
329 static int framedrop = -1;
330 static int infinite_buffer = -1;
331 static enum ShowMode show_mode = SHOW_MODE_NONE;
332 static const char *audio_codec_name;
333 static const char *subtitle_codec_name;
334 static const char *video_codec_name;
335 double rdftspeed = 0.02;
336 static int64_t cursor_last_shown;
337 static int cursor_hidden = 0;
338 #if CONFIG_AVFILTER
339 static const char **vfilters_list = NULL;
340 static int nb_vfilters = 0;
341 static char *afilters = NULL;
342 #endif
343 static int autorotate = 1;
344 
345 /* current context */
346 static int is_full_screen;
347 static int64_t audio_callback_time;
348 
350 
351 #define FF_ALLOC_EVENT (SDL_USEREVENT)
352 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
353 
354 static SDL_Surface *screen;
355 
356 #if CONFIG_AVFILTER
357 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
358 {
361  return 0;
362 }
363 #endif
364 
365 static inline
366 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
367  enum AVSampleFormat fmt2, int64_t channel_count2)
368 {
369  /* If channel count == 1, planar and non-planar formats are the same */
370  if (channel_count1 == 1 && channel_count2 == 1)
372  else
373  return channel_count1 != channel_count2 || fmt1 != fmt2;
374 }
375 
376 static inline
377 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
378 {
379  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
380  return channel_layout;
381  else
382  return 0;
383 }
384 
385 static void free_picture(Frame *vp);
386 
388 {
389  MyAVPacketList *pkt1;
390 
391  if (q->abort_request)
392  return -1;
393 
394  pkt1 = av_malloc(sizeof(MyAVPacketList));
395  if (!pkt1)
396  return -1;
397  pkt1->pkt = *pkt;
398  pkt1->next = NULL;
399  if (pkt == &flush_pkt)
400  q->serial++;
401  pkt1->serial = q->serial;
402 
403  if (!q->last_pkt)
404  q->first_pkt = pkt1;
405  else
406  q->last_pkt->next = pkt1;
407  q->last_pkt = pkt1;
408  q->nb_packets++;
409  q->size += pkt1->pkt.size + sizeof(*pkt1);
410  /* XXX: should duplicate packet data in DV case */
411  SDL_CondSignal(q->cond);
412  return 0;
413 }
414 
416 {
417  int ret;
418 
419  /* duplicate the packet */
420  if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
421  return -1;
422 
423  SDL_LockMutex(q->mutex);
424  ret = packet_queue_put_private(q, pkt);
425  SDL_UnlockMutex(q->mutex);
426 
427  if (pkt != &flush_pkt && ret < 0)
428  av_free_packet(pkt);
429 
430  return ret;
431 }
432 
433 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
434 {
435  AVPacket pkt1, *pkt = &pkt1;
436  av_init_packet(pkt);
437  pkt->data = NULL;
438  pkt->size = 0;
439  pkt->stream_index = stream_index;
440  return packet_queue_put(q, pkt);
441 }
442 
443 /* packet queue handling */
445 {
446  memset(q, 0, sizeof(PacketQueue));
447  q->mutex = SDL_CreateMutex();
448  q->cond = SDL_CreateCond();
449  q->abort_request = 1;
450 }
451 
453 {
454  MyAVPacketList *pkt, *pkt1;
455 
456  SDL_LockMutex(q->mutex);
457  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
458  pkt1 = pkt->next;
459  av_free_packet(&pkt->pkt);
460  av_freep(&pkt);
461  }
462  q->last_pkt = NULL;
463  q->first_pkt = NULL;
464  q->nb_packets = 0;
465  q->size = 0;
466  SDL_UnlockMutex(q->mutex);
467 }
468 
470 {
472  SDL_DestroyMutex(q->mutex);
473  SDL_DestroyCond(q->cond);
474 }
475 
477 {
478  SDL_LockMutex(q->mutex);
479 
480  q->abort_request = 1;
481 
482  SDL_CondSignal(q->cond);
483 
484  SDL_UnlockMutex(q->mutex);
485 }
486 
488 {
489  SDL_LockMutex(q->mutex);
490  q->abort_request = 0;
491  packet_queue_put_private(q, &flush_pkt);
492  SDL_UnlockMutex(q->mutex);
493 }
494 
495 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
496 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
497 {
498  MyAVPacketList *pkt1;
499  int ret;
500 
501  SDL_LockMutex(q->mutex);
502 
503  for (;;) {
504  if (q->abort_request) {
505  ret = -1;
506  break;
507  }
508 
509  pkt1 = q->first_pkt;
510  if (pkt1) {
511  q->first_pkt = pkt1->next;
512  if (!q->first_pkt)
513  q->last_pkt = NULL;
514  q->nb_packets--;
515  q->size -= pkt1->pkt.size + sizeof(*pkt1);
516  *pkt = pkt1->pkt;
517  if (serial)
518  *serial = pkt1->serial;
519  av_free(pkt1);
520  ret = 1;
521  break;
522  } else if (!block) {
523  ret = 0;
524  break;
525  } else {
526  SDL_CondWait(q->cond, q->mutex);
527  }
528  }
529  SDL_UnlockMutex(q->mutex);
530  return ret;
531 }
532 
533 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
534  memset(d, 0, sizeof(Decoder));
535  d->avctx = avctx;
536  d->queue = queue;
537  d->empty_queue_cond = empty_queue_cond;
539 }
540 
542  int got_frame = 0;
543 
544  do {
545  int ret = -1;
546 
547  if (d->queue->abort_request)
548  return -1;
549 
550  if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
551  AVPacket pkt;
552  do {
553  if (d->queue->nb_packets == 0)
554  SDL_CondSignal(d->empty_queue_cond);
555  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
556  return -1;
557  if (pkt.data == flush_pkt.data) {
559  d->finished = 0;
560  d->next_pts = d->start_pts;
561  d->next_pts_tb = d->start_pts_tb;
562  }
563  } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
564  av_free_packet(&d->pkt);
565  d->pkt_temp = d->pkt = pkt;
566  d->packet_pending = 1;
567  }
568 
569  switch (d->avctx->codec_type) {
570  case AVMEDIA_TYPE_VIDEO:
571  ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
572  if (got_frame) {
573  if (decoder_reorder_pts == -1) {
574  frame->pts = av_frame_get_best_effort_timestamp(frame);
575  } else if (decoder_reorder_pts) {
576  frame->pts = frame->pkt_pts;
577  } else {
578  frame->pts = frame->pkt_dts;
579  }
580  }
581  break;
582  case AVMEDIA_TYPE_AUDIO:
583  ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
584  if (got_frame) {
585  AVRational tb = (AVRational){1, frame->sample_rate};
586  if (frame->pts != AV_NOPTS_VALUE)
587  frame->pts = av_rescale_q(frame->pts, d->avctx->time_base, tb);
588  else if (frame->pkt_pts != AV_NOPTS_VALUE)
589  frame->pts = av_rescale_q(frame->pkt_pts, av_codec_get_pkt_timebase(d->avctx), tb);
590  else if (d->next_pts != AV_NOPTS_VALUE)
591  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
592  if (frame->pts != AV_NOPTS_VALUE) {
593  d->next_pts = frame->pts + frame->nb_samples;
594  d->next_pts_tb = tb;
595  }
596  }
597  break;
599  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
600  break;
601  }
602 
603  if (ret < 0) {
604  d->packet_pending = 0;
605  } else {
606  d->pkt_temp.dts =
608  if (d->pkt_temp.data) {
610  ret = d->pkt_temp.size;
611  d->pkt_temp.data += ret;
612  d->pkt_temp.size -= ret;
613  if (d->pkt_temp.size <= 0)
614  d->packet_pending = 0;
615  } else {
616  if (!got_frame) {
617  d->packet_pending = 0;
618  d->finished = d->pkt_serial;
619  }
620  }
621  }
622  } while (!got_frame && !d->finished);
623 
624  return got_frame;
625 }
626 
627 static void decoder_destroy(Decoder *d) {
628  av_free_packet(&d->pkt);
629 }
630 
632 {
633  av_frame_unref(vp->frame);
634  avsubtitle_free(&vp->sub);
635 }
636 
637 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
638 {
639  int i;
640  memset(f, 0, sizeof(FrameQueue));
641  if (!(f->mutex = SDL_CreateMutex()))
642  return AVERROR(ENOMEM);
643  if (!(f->cond = SDL_CreateCond()))
644  return AVERROR(ENOMEM);
645  f->pktq = pktq;
646  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
647  f->keep_last = !!keep_last;
648  for (i = 0; i < f->max_size; i++)
649  if (!(f->queue[i].frame = av_frame_alloc()))
650  return AVERROR(ENOMEM);
651  return 0;
652 }
653 
655 {
656  int i;
657  for (i = 0; i < f->max_size; i++) {
658  Frame *vp = &f->queue[i];
660  av_frame_free(&vp->frame);
661  free_picture(vp);
662  }
663  SDL_DestroyMutex(f->mutex);
664  SDL_DestroyCond(f->cond);
665 }
666 
668 {
669  SDL_LockMutex(f->mutex);
670  SDL_CondSignal(f->cond);
671  SDL_UnlockMutex(f->mutex);
672 }
673 
675 {
676  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
677 }
678 
680 {
681  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
682 }
683 
685 {
686  return &f->queue[f->rindex];
687 }
688 
690 {
691  /* wait until we have space to put a new frame */
692  SDL_LockMutex(f->mutex);
693  while (f->size >= f->max_size &&
694  !f->pktq->abort_request) {
695  SDL_CondWait(f->cond, f->mutex);
696  }
697  SDL_UnlockMutex(f->mutex);
698 
699  if (f->pktq->abort_request)
700  return NULL;
701 
702  return &f->queue[f->windex];
703 }
704 
706 {
707  /* wait until we have a readable a new frame */
708  SDL_LockMutex(f->mutex);
709  while (f->size - f->rindex_shown <= 0 &&
710  !f->pktq->abort_request) {
711  SDL_CondWait(f->cond, f->mutex);
712  }
713  SDL_UnlockMutex(f->mutex);
714 
715  if (f->pktq->abort_request)
716  return NULL;
717 
718  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
719 }
720 
722 {
723  if (++f->windex == f->max_size)
724  f->windex = 0;
725  SDL_LockMutex(f->mutex);
726  f->size++;
727  SDL_CondSignal(f->cond);
728  SDL_UnlockMutex(f->mutex);
729 }
730 
732 {
733  if (f->keep_last && !f->rindex_shown) {
734  f->rindex_shown = 1;
735  return;
736  }
738  if (++f->rindex == f->max_size)
739  f->rindex = 0;
740  SDL_LockMutex(f->mutex);
741  f->size--;
742  SDL_CondSignal(f->cond);
743  SDL_UnlockMutex(f->mutex);
744 }
745 
746 /* jump back to the previous frame if available by resetting rindex_shown */
748 {
749  int ret = f->rindex_shown;
750  f->rindex_shown = 0;
751  return ret;
752 }
753 
754 /* return the number of undisplayed frames in the queue */
756 {
757  return f->size - f->rindex_shown;
758 }
759 
760 /* return last shown position */
762 {
763  Frame *fp = &f->queue[f->rindex];
764  if (f->rindex_shown && fp->serial == f->pktq->serial)
765  return fp->pos;
766  else
767  return -1;
768 }
769 
770 static void decoder_abort(Decoder *d, FrameQueue *fq)
771 {
773  frame_queue_signal(fq);
774  SDL_WaitThread(d->decoder_tid, NULL);
775  d->decoder_tid = NULL;
777 }
778 
779 static inline void fill_rectangle(SDL_Surface *screen,
780  int x, int y, int w, int h, int color, int update)
781 {
782  SDL_Rect rect;
783  rect.x = x;
784  rect.y = y;
785  rect.w = w;
786  rect.h = h;
787  SDL_FillRect(screen, &rect, color);
788  if (update && w > 0 && h > 0)
789  SDL_UpdateRect(screen, x, y, w, h);
790 }
791 
792 /* draw only the border of a rectangle */
793 static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
794 {
795  int w1, w2, h1, h2;
796 
797  /* fill the background */
798  w1 = x;
799  if (w1 < 0)
800  w1 = 0;
801  w2 = width - (x + w);
802  if (w2 < 0)
803  w2 = 0;
804  h1 = y;
805  if (h1 < 0)
806  h1 = 0;
807  h2 = height - (y + h);
808  if (h2 < 0)
809  h2 = 0;
811  xleft, ytop,
812  w1, height,
813  color, update);
815  xleft + width - w2, ytop,
816  w2, height,
817  color, update);
819  xleft + w1, ytop,
820  width - w1 - w2, h1,
821  color, update);
823  xleft + w1, ytop + height - h2,
824  width - w1 - w2, h2,
825  color, update);
826 }
827 
828 #define ALPHA_BLEND(a, oldp, newp, s)\
829 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
830 
831 #define RGBA_IN(r, g, b, a, s)\
832 {\
833  unsigned int v = ((const uint32_t *)(s))[0];\
834  a = (v >> 24) & 0xff;\
835  r = (v >> 16) & 0xff;\
836  g = (v >> 8) & 0xff;\
837  b = v & 0xff;\
838 }
839 
840 #define YUVA_IN(y, u, v, a, s, pal)\
841 {\
842  unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
843  a = (val >> 24) & 0xff;\
844  y = (val >> 16) & 0xff;\
845  u = (val >> 8) & 0xff;\
846  v = val & 0xff;\
847 }
848 
849 #define YUVA_OUT(d, y, u, v, a)\
850 {\
851  ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
852 }
853 
854 
855 #define BPP 1
856 
857 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
858 {
859  int wrap, wrap3, width2, skip2;
860  int y, u, v, a, u1, v1, a1, w, h;
861  uint8_t *lum, *cb, *cr;
862  const uint8_t *p;
863  const uint32_t *pal;
864  int dstx, dsty, dstw, dsth;
865 
866  dstw = av_clip(rect->w, 0, imgw);
867  dsth = av_clip(rect->h, 0, imgh);
868  dstx = av_clip(rect->x, 0, imgw - dstw);
869  dsty = av_clip(rect->y, 0, imgh - dsth);
870  lum = dst->data[0] + dsty * dst->linesize[0];
871  cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
872  cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
873 
874  width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
875  skip2 = dstx >> 1;
876  wrap = dst->linesize[0];
877  wrap3 = rect->pict.linesize[0];
878  p = rect->pict.data[0];
879  pal = (const uint32_t *)rect->pict.data[1]; /* Now in YCrCb! */
880 
881  if (dsty & 1) {
882  lum += dstx;
883  cb += skip2;
884  cr += skip2;
885 
886  if (dstx & 1) {
887  YUVA_IN(y, u, v, a, p, pal);
888  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
889  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
890  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
891  cb++;
892  cr++;
893  lum++;
894  p += BPP;
895  }
896  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
897  YUVA_IN(y, u, v, a, p, pal);
898  u1 = u;
899  v1 = v;
900  a1 = a;
901  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
902 
903  YUVA_IN(y, u, v, a, p + BPP, pal);
904  u1 += u;
905  v1 += v;
906  a1 += a;
907  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
908  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
909  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
910  cb++;
911  cr++;
912  p += 2 * BPP;
913  lum += 2;
914  }
915  if (w) {
916  YUVA_IN(y, u, v, a, p, pal);
917  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
918  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
919  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
920  p++;
921  lum++;
922  }
923  p += wrap3 - dstw * BPP;
924  lum += wrap - dstw - dstx;
925  cb += dst->linesize[1] - width2 - skip2;
926  cr += dst->linesize[2] - width2 - skip2;
927  }
928  for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
929  lum += dstx;
930  cb += skip2;
931  cr += skip2;
932 
933  if (dstx & 1) {
934  YUVA_IN(y, u, v, a, p, pal);
935  u1 = u;
936  v1 = v;
937  a1 = a;
938  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
939  p += wrap3;
940  lum += wrap;
941  YUVA_IN(y, u, v, a, p, pal);
942  u1 += u;
943  v1 += v;
944  a1 += a;
945  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
946  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
947  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
948  cb++;
949  cr++;
950  p += -wrap3 + BPP;
951  lum += -wrap + 1;
952  }
953  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
954  YUVA_IN(y, u, v, a, p, pal);
955  u1 = u;
956  v1 = v;
957  a1 = a;
958  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
959 
960  YUVA_IN(y, u, v, a, p + BPP, pal);
961  u1 += u;
962  v1 += v;
963  a1 += a;
964  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
965  p += wrap3;
966  lum += wrap;
967 
968  YUVA_IN(y, u, v, a, p, pal);
969  u1 += u;
970  v1 += v;
971  a1 += a;
972  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
973 
974  YUVA_IN(y, u, v, a, p + BPP, pal);
975  u1 += u;
976  v1 += v;
977  a1 += a;
978  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
979 
980  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
981  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
982 
983  cb++;
984  cr++;
985  p += -wrap3 + 2 * BPP;
986  lum += -wrap + 2;
987  }
988  if (w) {
989  YUVA_IN(y, u, v, a, p, pal);
990  u1 = u;
991  v1 = v;
992  a1 = a;
993  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
994  p += wrap3;
995  lum += wrap;
996  YUVA_IN(y, u, v, a, p, pal);
997  u1 += u;
998  v1 += v;
999  a1 += a;
1000  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1001  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
1002  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
1003  cb++;
1004  cr++;
1005  p += -wrap3 + BPP;
1006  lum += -wrap + 1;
1007  }
1008  p += wrap3 + (wrap3 - dstw * BPP);
1009  lum += wrap + (wrap - dstw - dstx);
1010  cb += dst->linesize[1] - width2 - skip2;
1011  cr += dst->linesize[2] - width2 - skip2;
1012  }
1013  /* handle odd height */
1014  if (h) {
1015  lum += dstx;
1016  cb += skip2;
1017  cr += skip2;
1018 
1019  if (dstx & 1) {
1020  YUVA_IN(y, u, v, a, p, pal);
1021  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1022  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1023  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1024  cb++;
1025  cr++;
1026  lum++;
1027  p += BPP;
1028  }
1029  for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
1030  YUVA_IN(y, u, v, a, p, pal);
1031  u1 = u;
1032  v1 = v;
1033  a1 = a;
1034  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1035 
1036  YUVA_IN(y, u, v, a, p + BPP, pal);
1037  u1 += u;
1038  v1 += v;
1039  a1 += a;
1040  lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
1041  cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
1042  cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
1043  cb++;
1044  cr++;
1045  p += 2 * BPP;
1046  lum += 2;
1047  }
1048  if (w) {
1049  YUVA_IN(y, u, v, a, p, pal);
1050  lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
1051  cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
1052  cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
1053  }
1054  }
1055 }
1056 
1057 static void free_picture(Frame *vp)
1058 {
1059  if (vp->bmp) {
1060  SDL_FreeYUVOverlay(vp->bmp);
1061  vp->bmp = NULL;
1062  }
1063 }
1064 
1065 static void calculate_display_rect(SDL_Rect *rect,
1066  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
1067  int pic_width, int pic_height, AVRational pic_sar)
1068 {
1069  float aspect_ratio;
1070  int width, height, x, y;
1071 
1072  if (pic_sar.num == 0)
1073  aspect_ratio = 0;
1074  else
1075  aspect_ratio = av_q2d(pic_sar);
1076 
1077  if (aspect_ratio <= 0.0)
1078  aspect_ratio = 1.0;
1079  aspect_ratio *= (float)pic_width / (float)pic_height;
1080 
1081  /* XXX: we suppose the screen has a 1.0 pixel ratio */
1082  height = scr_height;
1083  width = ((int)rint(height * aspect_ratio)) & ~1;
1084  if (width > scr_width) {
1085  width = scr_width;
1086  height = ((int)rint(width / aspect_ratio)) & ~1;
1087  }
1088  x = (scr_width - width) / 2;
1089  y = (scr_height - height) / 2;
1090  rect->x = scr_xleft + x;
1091  rect->y = scr_ytop + y;
1092  rect->w = FFMAX(width, 1);
1093  rect->h = FFMAX(height, 1);
1094 }
1095 
1097 {
1098  Frame *vp;
1099  Frame *sp;
1100  AVPicture pict;
1101  SDL_Rect rect;
1102  int i;
1103 
1104  vp = frame_queue_peek(&is->pictq);
1105  if (vp->bmp) {
1106  if (is->subtitle_st) {
1107  if (frame_queue_nb_remaining(&is->subpq) > 0) {
1108  sp = frame_queue_peek(&is->subpq);
1109 
1110  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
1111  SDL_LockYUVOverlay (vp->bmp);
1112 
1113  pict.data[0] = vp->bmp->pixels[0];
1114  pict.data[1] = vp->bmp->pixels[2];
1115  pict.data[2] = vp->bmp->pixels[1];
1116 
1117  pict.linesize[0] = vp->bmp->pitches[0];
1118  pict.linesize[1] = vp->bmp->pitches[2];
1119  pict.linesize[2] = vp->bmp->pitches[1];
1120 
1121  for (i = 0; i < sp->sub.num_rects; i++)
1122  blend_subrect(&pict, sp->sub.rects[i],
1123  vp->bmp->w, vp->bmp->h);
1124 
1125  SDL_UnlockYUVOverlay (vp->bmp);
1126  }
1127  }
1128  }
1129 
1130  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1131 
1132  SDL_DisplayYUVOverlay(vp->bmp, &rect);
1133 
1134  if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
1135  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1136  fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
1137  is->last_display_rect = rect;
1138  }
1139  }
1140 }
1141 
1142 static inline int compute_mod(int a, int b)
1143 {
1144  return a < 0 ? a%b + b : a%b;
1145 }
1146 
1148 {
1149  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1150  int ch, channels, h, h2, bgcolor, fgcolor;
1151  int64_t time_diff;
1152  int rdft_bits, nb_freq;
1153 
1154  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1155  ;
1156  nb_freq = 1 << (rdft_bits - 1);
1157 
1158  /* compute display index : center on currently output samples */
1159  channels = s->audio_tgt.channels;
1160  nb_display_channels = channels;
1161  if (!s->paused) {
1162  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1163  n = 2 * channels;
1164  delay = s->audio_write_buf_size;
1165  delay /= n;
1166 
1167  /* to be more precise, we take into account the time spent since
1168  the last buffer computation */
1169  if (audio_callback_time) {
1170  time_diff = av_gettime_relative() - audio_callback_time;
1171  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1172  }
1173 
1174  delay += 2 * data_used;
1175  if (delay < data_used)
1176  delay = data_used;
1177 
1178  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1179  if (s->show_mode == SHOW_MODE_WAVES) {
1180  h = INT_MIN;
1181  for (i = 0; i < 1000; i += channels) {
1182  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1183  int a = s->sample_array[idx];
1184  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1185  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1186  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1187  int score = a - d;
1188  if (h < score && (b ^ c) < 0) {
1189  h = score;
1190  i_start = idx;
1191  }
1192  }
1193  }
1194 
1195  s->last_i_start = i_start;
1196  } else {
1197  i_start = s->last_i_start;
1198  }
1199 
1200  bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
1201  if (s->show_mode == SHOW_MODE_WAVES) {
1203  s->xleft, s->ytop, s->width, s->height,
1204  bgcolor, 0);
1205 
1206  fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
1207 
1208  /* total height for one channel */
1209  h = s->height / nb_display_channels;
1210  /* graph height / 2 */
1211  h2 = (h * 9) / 20;
1212  for (ch = 0; ch < nb_display_channels; ch++) {
1213  i = i_start + ch;
1214  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1215  for (x = 0; x < s->width; x++) {
1216  y = (s->sample_array[i] * h2) >> 15;
1217  if (y < 0) {
1218  y = -y;
1219  ys = y1 - y;
1220  } else {
1221  ys = y1;
1222  }
1224  s->xleft + x, ys, 1, y,
1225  fgcolor, 0);
1226  i += channels;
1227  if (i >= SAMPLE_ARRAY_SIZE)
1228  i -= SAMPLE_ARRAY_SIZE;
1229  }
1230  }
1231 
1232  fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
1233 
1234  for (ch = 1; ch < nb_display_channels; ch++) {
1235  y = s->ytop + ch * h;
1237  s->xleft, y, s->width, 1,
1238  fgcolor, 0);
1239  }
1240  SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
1241  } else {
1242  nb_display_channels= FFMIN(nb_display_channels, 2);
1243  if (rdft_bits != s->rdft_bits) {
1244  av_rdft_end(s->rdft);
1245  av_free(s->rdft_data);
1246  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1247  s->rdft_bits = rdft_bits;
1248  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1249  }
1250  if (!s->rdft || !s->rdft_data){
1251  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1252  s->show_mode = SHOW_MODE_WAVES;
1253  } else {
1254  FFTSample *data[2];
1255  for (ch = 0; ch < nb_display_channels; ch++) {
1256  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1257  i = i_start + ch;
1258  for (x = 0; x < 2 * nb_freq; x++) {
1259  double w = (x-nb_freq) * (1.0 / nb_freq);
1260  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1261  i += channels;
1262  if (i >= SAMPLE_ARRAY_SIZE)
1263  i -= SAMPLE_ARRAY_SIZE;
1264  }
1265  av_rdft_calc(s->rdft, data[ch]);
1266  }
1267  /* Least efficient way to do this, we should of course
1268  * directly access it but it is more than fast enough. */
1269  for (y = 0; y < s->height; y++) {
1270  double w = 1 / sqrt(nb_freq);
1271  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1272  int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
1273  + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
1274  a = FFMIN(a, 255);
1275  b = FFMIN(b, 255);
1276  fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
1277 
1279  s->xpos, s->height-y, 1, 1,
1280  fgcolor, 0);
1281  }
1282  }
1283  SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
1284  if (!s->paused)
1285  s->xpos++;
1286  if (s->xpos >= s->width)
1287  s->xpos= s->xleft;
1288  }
1289 }
1290 
1291 static void stream_close(VideoState *is)
1292 {
1293  /* XXX: use a special url_shutdown call to abort parse cleanly */
1294  is->abort_request = 1;
1295  SDL_WaitThread(is->read_tid, NULL);
1299 
1300  /* free all pictures */
1301  frame_queue_destory(&is->pictq);
1302  frame_queue_destory(&is->sampq);
1303  frame_queue_destory(&is->subpq);
1304  SDL_DestroyCond(is->continue_read_thread);
1305 #if !CONFIG_AVFILTER
1306  sws_freeContext(is->img_convert_ctx);
1307 #endif
1308  av_free(is);
1309 }
1310 
1311 static void do_exit(VideoState *is)
1312 {
1313  if (is) {
1314  stream_close(is);
1315  }
1317  uninit_opts();
1318 #if CONFIG_AVFILTER
1320 #endif
1322  if (show_status)
1323  printf("\n");
1324  SDL_Quit();
1325  av_log(NULL, AV_LOG_QUIET, "%s", "");
1326  exit(0);
1327 }
1328 
1329 static void sigterm_handler(int sig)
1330 {
1331  exit(123);
1332 }
1333 
1335 {
1336  SDL_Rect rect;
1337  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1338  default_width = rect.w;
1339  default_height = rect.h;
1340 }
1341 
1342 static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
1343 {
1344  int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
1345  int w,h;
1346 
1347  if (is_full_screen) flags |= SDL_FULLSCREEN;
1348  else flags |= SDL_RESIZABLE;
1349 
1350  if (vp && vp->width)
1351  set_default_window_size(vp->width, vp->height, vp->sar);
1352 
1354  w = fs_screen_width;
1355  h = fs_screen_height;
1356  } else if (!is_full_screen && screen_width) {
1357  w = screen_width;
1358  h = screen_height;
1359  } else {
1360  w = default_width;
1361  h = default_height;
1362  }
1363  w = FFMIN(16383, w);
1364  if (screen && is->width == screen->w && screen->w == w
1365  && is->height== screen->h && screen->h == h && !force_set_video_mode)
1366  return 0;
1367  screen = SDL_SetVideoMode(w, h, 0, flags);
1368  if (!screen) {
1369  av_log(NULL, AV_LOG_FATAL, "SDL: could not set video mode - exiting\n");
1370  do_exit(is);
1371  }
1372  if (!window_title)
1374  SDL_WM_SetCaption(window_title, window_title);
1375 
1376  is->width = screen->w;
1377  is->height = screen->h;
1378 
1379  return 0;
1380 }
1381 
1382 /* display the current picture, if any */
1383 static void video_display(VideoState *is)
1384 {
1385  if (!screen)
1386  video_open(is, 0, NULL);
1387  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1388  video_audio_display(is);
1389  else if (is->video_st)
1390  video_image_display(is);
1391 }
1392 
1393 static double get_clock(Clock *c)
1394 {
1395  if (*c->queue_serial != c->serial)
1396  return NAN;
1397  if (c->paused) {
1398  return c->pts;
1399  } else {
1400  double time = av_gettime_relative() / 1000000.0;
1401  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1402  }
1403 }
1404 
1405 static void set_clock_at(Clock *c, double pts, int serial, double time)
1406 {
1407  c->pts = pts;
1408  c->last_updated = time;
1409  c->pts_drift = c->pts - time;
1410  c->serial = serial;
1411 }
1412 
1413 static void set_clock(Clock *c, double pts, int serial)
1414 {
1415  double time = av_gettime_relative() / 1000000.0;
1416  set_clock_at(c, pts, serial, time);
1417 }
1418 
1419 static void set_clock_speed(Clock *c, double speed)
1420 {
1421  set_clock(c, get_clock(c), c->serial);
1422  c->speed = speed;
1423 }
1424 
1425 static void init_clock(Clock *c, int *queue_serial)
1426 {
1427  c->speed = 1.0;
1428  c->paused = 0;
1429  c->queue_serial = queue_serial;
1430  set_clock(c, NAN, -1);
1431 }
1432 
1433 static void sync_clock_to_slave(Clock *c, Clock *slave)
1434 {
1435  double clock = get_clock(c);
1436  double slave_clock = get_clock(slave);
1437  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1438  set_clock(c, slave_clock, slave->serial);
1439 }
1440 
1442  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1443  if (is->video_st)
1444  return AV_SYNC_VIDEO_MASTER;
1445  else
1446  return AV_SYNC_AUDIO_MASTER;
1447  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1448  if (is->audio_st)
1449  return AV_SYNC_AUDIO_MASTER;
1450  else
1451  return AV_SYNC_EXTERNAL_CLOCK;
1452  } else {
1453  return AV_SYNC_EXTERNAL_CLOCK;
1454  }
1455 }
1456 
1457 /* get the current master clock value */
1458 static double get_master_clock(VideoState *is)
1459 {
1460  double val;
1461 
1462  switch (get_master_sync_type(is)) {
1463  case AV_SYNC_VIDEO_MASTER:
1464  val = get_clock(&is->vidclk);
1465  break;
1466  case AV_SYNC_AUDIO_MASTER:
1467  val = get_clock(&is->audclk);
1468  break;
1469  default:
1470  val = get_clock(&is->extclk);
1471  break;
1472  }
1473  return val;
1474 }
1475 
1477  if (is->video_stream >= 0 && is->videoq.nb_packets <= MIN_FRAMES / 2 ||
1478  is->audio_stream >= 0 && is->audioq.nb_packets <= MIN_FRAMES / 2) {
1480  } else if ((is->video_stream < 0 || is->videoq.nb_packets > MIN_FRAMES * 2) &&
1481  (is->audio_stream < 0 || is->audioq.nb_packets > MIN_FRAMES * 2)) {
1483  } else {
1484  double speed = is->extclk.speed;
1485  if (speed != 1.0)
1486  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1487  }
1488 }
1489 
1490 /* seek in the stream */
1491 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1492 {
1493  if (!is->seek_req) {
1494  is->seek_pos = pos;
1495  is->seek_rel = rel;
1496  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1497  if (seek_by_bytes)
1499  is->seek_req = 1;
1500  SDL_CondSignal(is->continue_read_thread);
1501  }
1502 }
1503 
1504 /* pause or resume the video */
1506 {
1507  if (is->paused) {
1508  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1509  if (is->read_pause_return != AVERROR(ENOSYS)) {
1510  is->vidclk.paused = 0;
1511  }
1512  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1513  }
1514  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1515  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1516 }
1517 
1518 static void toggle_pause(VideoState *is)
1519 {
1520  stream_toggle_pause(is);
1521  is->step = 0;
1522 }
1523 
1525 {
1526  /* if the stream is paused unpause it, then step */
1527  if (is->paused)
1528  stream_toggle_pause(is);
1529  is->step = 1;
1530 }
1531 
1532 static double compute_target_delay(double delay, VideoState *is)
1533 {
1534  double sync_threshold, diff;
1535 
1536  /* update delay to follow master synchronisation source */
1538  /* if video is slave, we try to correct big delays by
1539  duplicating or deleting a frame */
1540  diff = get_clock(&is->vidclk) - get_master_clock(is);
1541 
1542  /* skip or repeat frame. We take into account the
1543  delay to compute the threshold. I still don't know
1544  if it is the best guess */
1545  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1546  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1547  if (diff <= -sync_threshold)
1548  delay = FFMAX(0, delay + diff);
1549  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1550  delay = delay + diff;
1551  else if (diff >= sync_threshold)
1552  delay = 2 * delay;
1553  }
1554  }
1555 
1556  av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
1557  delay, -diff);
1558 
1559  return delay;
1560 }
1561 
1562 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1563  if (vp->serial == nextvp->serial) {
1564  double duration = nextvp->pts - vp->pts;
1565  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1566  return vp->duration;
1567  else
1568  return duration;
1569  } else {
1570  return 0.0;
1571  }
1572 }
1573 
1574 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1575  /* update current video pts */
1576  set_clock(&is->vidclk, pts, serial);
1577  sync_clock_to_slave(&is->extclk, &is->vidclk);
1578 }
1579 
1580 /* called to display each frame */
1581 static void video_refresh(void *opaque, double *remaining_time)
1582 {
1583  VideoState *is = opaque;
1584  double time;
1585 
1586  Frame *sp, *sp2;
1587 
1588  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1590 
1591  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1592  time = av_gettime_relative() / 1000000.0;
1593  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1594  video_display(is);
1595  is->last_vis_time = time;
1596  }
1597  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1598  }
1599 
1600  if (is->video_st) {
1601  int redisplay = 0;
1602  if (is->force_refresh)
1603  redisplay = frame_queue_prev(&is->pictq);
1604 retry:
1605  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1606  // nothing to do, no picture to display in the queue
1607  } else {
1608  double last_duration, duration, delay;
1609  Frame *vp, *lastvp;
1610 
1611  /* dequeue the picture */
1612  lastvp = frame_queue_peek_last(&is->pictq);
1613  vp = frame_queue_peek(&is->pictq);
1614 
1615  if (vp->serial != is->videoq.serial) {
1616  frame_queue_next(&is->pictq);
1617  redisplay = 0;
1618  goto retry;
1619  }
1620 
1621  if (lastvp->serial != vp->serial && !redisplay)
1622  is->frame_timer = av_gettime_relative() / 1000000.0;
1623 
1624  if (is->paused)
1625  goto display;
1626 
1627  /* compute nominal last_duration */
1628  last_duration = vp_duration(is, lastvp, vp);
1629  if (redisplay)
1630  delay = 0.0;
1631  else
1632  delay = compute_target_delay(last_duration, is);
1633 
1634  time= av_gettime_relative()/1000000.0;
1635  if (time < is->frame_timer + delay && !redisplay) {
1636  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1637  return;
1638  }
1639 
1640  is->frame_timer += delay;
1641  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1642  is->frame_timer = time;
1643 
1644  SDL_LockMutex(is->pictq.mutex);
1645  if (!redisplay && !isnan(vp->pts))
1646  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1647  SDL_UnlockMutex(is->pictq.mutex);
1648 
1649  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1650  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1651  duration = vp_duration(is, vp, nextvp);
1652  if(!is->step && (redisplay || framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1653  if (!redisplay)
1654  is->frame_drops_late++;
1655  frame_queue_next(&is->pictq);
1656  redisplay = 0;
1657  goto retry;
1658  }
1659  }
1660 
1661  if (is->subtitle_st) {
1662  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1663  sp = frame_queue_peek(&is->subpq);
1664 
1665  if (frame_queue_nb_remaining(&is->subpq) > 1)
1666  sp2 = frame_queue_peek_next(&is->subpq);
1667  else
1668  sp2 = NULL;
1669 
1670  if (sp->serial != is->subtitleq.serial
1671  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1672  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1673  {
1674  frame_queue_next(&is->subpq);
1675  } else {
1676  break;
1677  }
1678  }
1679  }
1680 
1681 display:
1682  /* display picture */
1683  if (!display_disable && is->show_mode == SHOW_MODE_VIDEO)
1684  video_display(is);
1685 
1686  frame_queue_next(&is->pictq);
1687 
1688  if (is->step && !is->paused)
1689  stream_toggle_pause(is);
1690  }
1691  }
1692  is->force_refresh = 0;
1693  if (show_status) {
1694  static int64_t last_time;
1695  int64_t cur_time;
1696  int aqsize, vqsize, sqsize;
1697  double av_diff;
1698 
1699  cur_time = av_gettime_relative();
1700  if (!last_time || (cur_time - last_time) >= 30000) {
1701  aqsize = 0;
1702  vqsize = 0;
1703  sqsize = 0;
1704  if (is->audio_st)
1705  aqsize = is->audioq.size;
1706  if (is->video_st)
1707  vqsize = is->videoq.size;
1708  if (is->subtitle_st)
1709  sqsize = is->subtitleq.size;
1710  av_diff = 0;
1711  if (is->audio_st && is->video_st)
1712  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1713  else if (is->video_st)
1714  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1715  else if (is->audio_st)
1716  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1718  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1719  get_master_clock(is),
1720  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1721  av_diff,
1723  aqsize / 1024,
1724  vqsize / 1024,
1725  sqsize,
1728  fflush(stdout);
1729  last_time = cur_time;
1730  }
1731  }
1732 }
1733 
1734 /* allocate a picture (needs to do that in main thread to avoid
1735  potential locking problems */
1736 static void alloc_picture(VideoState *is)
1737 {
1738  Frame *vp;
1739  int64_t bufferdiff;
1740 
1741  vp = &is->pictq.queue[is->pictq.windex];
1742 
1743  free_picture(vp);
1744 
1745  video_open(is, 0, vp);
1746 
1747  vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1748  SDL_YV12_OVERLAY,
1749  screen);
1750  bufferdiff = vp->bmp ? FFMAX(vp->bmp->pixels[0], vp->bmp->pixels[1]) - FFMIN(vp->bmp->pixels[0], vp->bmp->pixels[1]) : 0;
1751  if (!vp->bmp || vp->bmp->pitches[0] < vp->width || bufferdiff < (int64_t)vp->height * vp->bmp->pitches[0]) {
1752  /* SDL allocates a buffer smaller than requested if the video
1753  * overlay hardware is unable to support the requested size. */
1755  "Error: the video system does not support an image\n"
1756  "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1757  "to reduce the image size.\n", vp->width, vp->height );
1758  do_exit(is);
1759  }
1760 
1761  SDL_LockMutex(is->pictq.mutex);
1762  vp->allocated = 1;
1763  SDL_CondSignal(is->pictq.cond);
1764  SDL_UnlockMutex(is->pictq.mutex);
1765 }
1766 
1767 static void duplicate_right_border_pixels(SDL_Overlay *bmp) {
1768  int i, width, height;
1769  Uint8 *p, *maxp;
1770  for (i = 0; i < 3; i++) {
1771  width = bmp->w;
1772  height = bmp->h;
1773  if (i > 0) {
1774  width >>= 1;
1775  height >>= 1;
1776  }
1777  if (bmp->pitches[i] > width) {
1778  maxp = bmp->pixels[i] + bmp->pitches[i] * height - 1;
1779  for (p = bmp->pixels[i] + width - 1; p < maxp; p += bmp->pitches[i])
1780  *(p+1) = *p;
1781  }
1782  }
1783 }
1784 
1785 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1786 {
1787  Frame *vp;
1788 
1789 #if defined(DEBUG_SYNC) && 0
1790  printf("frame_type=%c pts=%0.3f\n",
1791  av_get_picture_type_char(src_frame->pict_type), pts);
1792 #endif
1793 
1794  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1795  return -1;
1796 
1797  vp->sar = src_frame->sample_aspect_ratio;
1798 
1799  /* alloc or resize hardware picture buffer */
1800  if (!vp->bmp || vp->reallocate || !vp->allocated ||
1801  vp->width != src_frame->width ||
1802  vp->height != src_frame->height) {
1803  SDL_Event event;
1804 
1805  vp->allocated = 0;
1806  vp->reallocate = 0;
1807  vp->width = src_frame->width;
1808  vp->height = src_frame->height;
1809 
1810  /* the allocation must be done in the main thread to avoid
1811  locking problems. */
1812  event.type = FF_ALLOC_EVENT;
1813  event.user.data1 = is;
1814  SDL_PushEvent(&event);
1815 
1816  /* wait until the picture is allocated */
1817  SDL_LockMutex(is->pictq.mutex);
1818  while (!vp->allocated && !is->videoq.abort_request) {
1819  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1820  }
1821  /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
1822  if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
1823  while (!vp->allocated && !is->abort_request) {
1824  SDL_CondWait(is->pictq.cond, is->pictq.mutex);
1825  }
1826  }
1827  SDL_UnlockMutex(is->pictq.mutex);
1828 
1829  if (is->videoq.abort_request)
1830  return -1;
1831  }
1832 
1833  /* if the frame is not skipped, then display it */
1834  if (vp->bmp) {
1835  AVPicture pict = { { 0 } };
1836 
1837  /* get a pointer on the bitmap */
1838  SDL_LockYUVOverlay (vp->bmp);
1839 
1840  pict.data[0] = vp->bmp->pixels[0];
1841  pict.data[1] = vp->bmp->pixels[2];
1842  pict.data[2] = vp->bmp->pixels[1];
1843 
1844  pict.linesize[0] = vp->bmp->pitches[0];
1845  pict.linesize[1] = vp->bmp->pitches[2];
1846  pict.linesize[2] = vp->bmp->pitches[1];
1847 
1848 #if CONFIG_AVFILTER
1849  // FIXME use direct rendering
1850  av_picture_copy(&pict, (AVPicture *)src_frame,
1851  src_frame->format, vp->width, vp->height);
1852 #else
1853  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1854  is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1855  vp->width, vp->height, src_frame->format, vp->width, vp->height,
1857  if (!is->img_convert_ctx) {
1858  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
1859  exit(1);
1860  }
1861  sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1862  0, vp->height, pict.data, pict.linesize);
1863 #endif
1864  /* workaround SDL PITCH_WORKAROUND */
1866  /* update the bitmap content */
1867  SDL_UnlockYUVOverlay(vp->bmp);
1868 
1869  vp->pts = pts;
1870  vp->duration = duration;
1871  vp->pos = pos;
1872  vp->serial = serial;
1873 
1874  /* now we can update the picture count */
1875  frame_queue_push(&is->pictq);
1876  }
1877  return 0;
1878 }
1879 
1881 {
1882  int got_picture;
1883 
1884  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1885  return -1;
1886 
1887  if (got_picture) {
1888  double dpts = NAN;
1889 
1890  if (frame->pts != AV_NOPTS_VALUE)
1891  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1892 
1893  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1894 
1896  if (frame->pts != AV_NOPTS_VALUE) {
1897  double diff = dpts - get_master_clock(is);
1898  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1899  diff - is->frame_last_filter_delay < 0 &&
1900  is->viddec.pkt_serial == is->vidclk.serial &&
1901  is->videoq.nb_packets) {
1902  is->frame_drops_early++;
1903  av_frame_unref(frame);
1904  got_picture = 0;
1905  }
1906  }
1907  }
1908  }
1909 
1910  return got_picture;
1911 }
1912 
1913 #if CONFIG_AVFILTER
1914 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1915  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1916 {
1917  int ret, i;
1918  int nb_filters = graph->nb_filters;
1920 
1921  if (filtergraph) {
1922  outputs = avfilter_inout_alloc();
1923  inputs = avfilter_inout_alloc();
1924  if (!outputs || !inputs) {
1925  ret = AVERROR(ENOMEM);
1926  goto fail;
1927  }
1928 
1929  outputs->name = av_strdup("in");
1930  outputs->filter_ctx = source_ctx;
1931  outputs->pad_idx = 0;
1932  outputs->next = NULL;
1933 
1934  inputs->name = av_strdup("out");
1935  inputs->filter_ctx = sink_ctx;
1936  inputs->pad_idx = 0;
1937  inputs->next = NULL;
1938 
1939  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1940  goto fail;
1941  } else {
1942  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1943  goto fail;
1944  }
1945 
1946  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1947  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1948  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1949 
1950  ret = avfilter_graph_config(graph, NULL);
1951 fail:
1952  avfilter_inout_free(&outputs);
1953  avfilter_inout_free(&inputs);
1954  return ret;
1955 }
1956 
1957 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1958 {
1959  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE };
1960  char sws_flags_str[128];
1961  char buffersrc_args[256];
1962  int ret;
1963  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1964  AVCodecContext *codec = is->video_st->codec;
1965  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1966 
1967  av_opt_get_int(sws_opts, "sws_flags", 0, &sws_flags);
1968  snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%"PRId64, sws_flags);
1969  graph->scale_sws_opts = av_strdup(sws_flags_str);
1970 
1971  snprintf(buffersrc_args, sizeof(buffersrc_args),
1972  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1973  frame->width, frame->height, frame->format,
1975  codec->sample_aspect_ratio.num, FFMAX(codec->sample_aspect_ratio.den, 1));
1976  if (fr.num && fr.den)
1977  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1978 
1979  if ((ret = avfilter_graph_create_filter(&filt_src,
1980  avfilter_get_by_name("buffer"),
1981  "ffplay_buffer", buffersrc_args, NULL,
1982  graph)) < 0)
1983  goto fail;
1984 
1985  ret = avfilter_graph_create_filter(&filt_out,
1986  avfilter_get_by_name("buffersink"),
1987  "ffplay_buffersink", NULL, NULL, graph);
1988  if (ret < 0)
1989  goto fail;
1990 
1991  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1992  goto fail;
1993 
1994  last_filter = filt_out;
1995 
1996 /* Note: this macro adds a filter before the lastly added filter, so the
1997  * processing order of the filters is in reverse */
1998 #define INSERT_FILT(name, arg) do { \
1999  AVFilterContext *filt_ctx; \
2000  \
2001  ret = avfilter_graph_create_filter(&filt_ctx, \
2002  avfilter_get_by_name(name), \
2003  "ffplay_" name, arg, NULL, graph); \
2004  if (ret < 0) \
2005  goto fail; \
2006  \
2007  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
2008  if (ret < 0) \
2009  goto fail; \
2010  \
2011  last_filter = filt_ctx; \
2012 } while (0)
2013 
2014  /* SDL YUV code is not handling odd width/height for some driver
2015  * combinations, therefore we crop the picture to an even width/height. */
2016  INSERT_FILT("crop", "floor(in_w/2)*2:floor(in_h/2)*2");
2017 
2018  if (autorotate) {
2019  AVDictionaryEntry *rotate_tag = av_dict_get(is->video_st->metadata, "rotate", NULL, 0);
2020  if (rotate_tag && *rotate_tag->value && strcmp(rotate_tag->value, "0")) {
2021  if (!strcmp(rotate_tag->value, "90")) {
2022  INSERT_FILT("transpose", "clock");
2023  } else if (!strcmp(rotate_tag->value, "180")) {
2024  INSERT_FILT("hflip", NULL);
2025  INSERT_FILT("vflip", NULL);
2026  } else if (!strcmp(rotate_tag->value, "270")) {
2027  INSERT_FILT("transpose", "cclock");
2028  } else {
2029  char rotate_buf[64];
2030  snprintf(rotate_buf, sizeof(rotate_buf), "%s*PI/180", rotate_tag->value);
2031  INSERT_FILT("rotate", rotate_buf);
2032  }
2033  }
2034  }
2035 
2036  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
2037  goto fail;
2038 
2039  is->in_video_filter = filt_src;
2040  is->out_video_filter = filt_out;
2041 
2042 fail:
2043  return ret;
2044 }
2045 
2046 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
2047 {
2049  int sample_rates[2] = { 0, -1 };
2050  int64_t channel_layouts[2] = { 0, -1 };
2051  int channels[2] = { 0, -1 };
2052  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
2053  char aresample_swr_opts[512] = "";
2054  AVDictionaryEntry *e = NULL;
2055  char asrc_args[256];
2056  int ret;
2057 
2059  if (!(is->agraph = avfilter_graph_alloc()))
2060  return AVERROR(ENOMEM);
2061 
2062  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
2063  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
2064  if (strlen(aresample_swr_opts))
2065  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
2066  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
2067 
2068  ret = snprintf(asrc_args, sizeof(asrc_args),
2069  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
2072  1, is->audio_filter_src.freq);
2074  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
2075  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
2076 
2077  ret = avfilter_graph_create_filter(&filt_asrc,
2078  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
2079  asrc_args, NULL, is->agraph);
2080  if (ret < 0)
2081  goto end;
2082 
2083 
2084  ret = avfilter_graph_create_filter(&filt_asink,
2085  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
2086  NULL, NULL, is->agraph);
2087  if (ret < 0)
2088  goto end;
2089 
2090  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
2091  goto end;
2092  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
2093  goto end;
2094 
2095  if (force_output_format) {
2096  channel_layouts[0] = is->audio_tgt.channel_layout;
2097  channels [0] = is->audio_tgt.channels;
2098  sample_rates [0] = is->audio_tgt.freq;
2099  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
2100  goto end;
2101  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2102  goto end;
2103  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2104  goto end;
2105  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
2106  goto end;
2107  }
2108 
2109 
2110  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
2111  goto end;
2112 
2113  is->in_audio_filter = filt_asrc;
2114  is->out_audio_filter = filt_asink;
2115 
2116 end:
2117  if (ret < 0)
2119  return ret;
2120 }
2121 #endif /* CONFIG_AVFILTER */
2122 
2123 static int audio_thread(void *arg)
2124 {
2125  VideoState *is = arg;
2127  Frame *af;
2128 #if CONFIG_AVFILTER
2129  int last_serial = -1;
2130  int64_t dec_channel_layout;
2131  int reconfigure;
2132 #endif
2133  int got_frame = 0;
2134  AVRational tb;
2135  int ret = 0;
2136 
2137  if (!frame)
2138  return AVERROR(ENOMEM);
2139 
2140  do {
2141  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2142  goto the_end;
2143 
2144  if (got_frame) {
2145  tb = (AVRational){1, frame->sample_rate};
2146 
2147 #if CONFIG_AVFILTER
2148  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));
2149 
2150  reconfigure =
2152  frame->format, av_frame_get_channels(frame)) ||
2153  is->audio_filter_src.channel_layout != dec_channel_layout ||
2154  is->audio_filter_src.freq != frame->sample_rate ||
2155  is->auddec.pkt_serial != last_serial;
2156 
2157  if (reconfigure) {
2158  char buf1[1024], buf2[1024];
2159  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2160  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2162  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2165 
2166  is->audio_filter_src.fmt = frame->format;
2168  is->audio_filter_src.channel_layout = dec_channel_layout;
2169  is->audio_filter_src.freq = frame->sample_rate;
2170  last_serial = is->auddec.pkt_serial;
2171 
2172  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2173  goto the_end;
2174  }
2175 
2176  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2177  goto the_end;
2178 
2179  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2180  tb = is->out_audio_filter->inputs[0]->time_base;
2181 #endif
2182  if (!(af = frame_queue_peek_writable(&is->sampq)))
2183  goto the_end;
2184 
2185  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2186  af->pos = av_frame_get_pkt_pos(frame);
2187  af->serial = is->auddec.pkt_serial;
2188  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2189 
2190  av_frame_move_ref(af->frame, frame);
2191  frame_queue_push(&is->sampq);
2192 
2193 #if CONFIG_AVFILTER
2194  if (is->audioq.serial != is->auddec.pkt_serial)
2195  break;
2196  }
2197  if (ret == AVERROR_EOF)
2198  is->auddec.finished = is->auddec.pkt_serial;
2199 #endif
2200  }
2201  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2202  the_end:
2203 #if CONFIG_AVFILTER
2205 #endif
2206  av_frame_free(&frame);
2207  return ret;
2208 }
2209 
2210 static void decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2211 {
2213  d->decoder_tid = SDL_CreateThread(fn, arg);
2214 }
2215 
2216 static int video_thread(void *arg)
2217 {
2218  VideoState *is = arg;
2220  double pts;
2221  double duration;
2222  int ret;
2224  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2225 
2226 #if CONFIG_AVFILTER
2228  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2229  int last_w = 0;
2230  int last_h = 0;
2231  enum AVPixelFormat last_format = -2;
2232  int last_serial = -1;
2233  int last_vfilter_idx = 0;
2234 #endif
2235 
2236  if (!frame)
2237  return AVERROR(ENOMEM);
2238 
2239  for (;;) {
2240  ret = get_video_frame(is, frame);
2241  if (ret < 0)
2242  goto the_end;
2243  if (!ret)
2244  continue;
2245 
2246 #if CONFIG_AVFILTER
2247  if ( last_w != frame->width
2248  || last_h != frame->height
2249  || last_format != frame->format
2250  || last_serial != is->viddec.pkt_serial
2251  || last_vfilter_idx != is->vfilter_idx) {
2253  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2254  last_w, last_h,
2255  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2256  frame->width, frame->height,
2257  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2258  avfilter_graph_free(&graph);
2259  graph = avfilter_graph_alloc();
2260  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2261  SDL_Event event;
2262  event.type = FF_QUIT_EVENT;
2263  event.user.data1 = is;
2264  SDL_PushEvent(&event);
2265  goto the_end;
2266  }
2267  filt_in = is->in_video_filter;
2268  filt_out = is->out_video_filter;
2269  last_w = frame->width;
2270  last_h = frame->height;
2271  last_format = frame->format;
2272  last_serial = is->viddec.pkt_serial;
2273  last_vfilter_idx = is->vfilter_idx;
2274  frame_rate = filt_out->inputs[0]->frame_rate;
2275  }
2276 
2277  ret = av_buffersrc_add_frame(filt_in, frame);
2278  if (ret < 0)
2279  goto the_end;
2280 
2281  while (ret >= 0) {
2282  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2283 
2284  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2285  if (ret < 0) {
2286  if (ret == AVERROR_EOF)
2287  is->viddec.finished = is->viddec.pkt_serial;
2288  ret = 0;
2289  break;
2290  }
2291 
2293  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2294  is->frame_last_filter_delay = 0;
2295  tb = filt_out->inputs[0]->time_base;
2296 #endif
2297  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2298  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2299  ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
2300  av_frame_unref(frame);
2301 #if CONFIG_AVFILTER
2302  }
2303 #endif
2304 
2305  if (ret < 0)
2306  goto the_end;
2307  }
2308  the_end:
2309 #if CONFIG_AVFILTER
2310  avfilter_graph_free(&graph);
2311 #endif
2312  av_frame_free(&frame);
2313  return 0;
2314 }
2315 
2316 static int subtitle_thread(void *arg)
2317 {
2318  VideoState *is = arg;
2319  Frame *sp;
2320  int got_subtitle;
2321  double pts;
2322  int i, j;
2323  int r, g, b, y, u, v, a;
2324 
2325  for (;;) {
2326  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2327  return 0;
2328 
2329  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2330  break;
2331 
2332  pts = 0;
2333 
2334  if (got_subtitle && sp->sub.format == 0) {
2335  if (sp->sub.pts != AV_NOPTS_VALUE)
2336  pts = sp->sub.pts / (double)AV_TIME_BASE;
2337  sp->pts = pts;
2338  sp->serial = is->subdec.pkt_serial;
2339 
2340  for (i = 0; i < sp->sub.num_rects; i++)
2341  {
2342  for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
2343  {
2344  RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
2345  y = RGB_TO_Y_CCIR(r, g, b);
2346  u = RGB_TO_U_CCIR(r, g, b, 0);
2347  v = RGB_TO_V_CCIR(r, g, b, 0);
2348  YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
2349  }
2350  }
2351 
2352  /* now we can update the picture count */
2353  frame_queue_push(&is->subpq);
2354  } else if (got_subtitle) {
2355  avsubtitle_free(&sp->sub);
2356  }
2357  }
2358  return 0;
2359 }
2360 
2361 /* copy samples for viewing in editor window */
2362 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2363 {
2364  int size, len;
2365 
2366  size = samples_size / sizeof(short);
2367  while (size > 0) {
2369  if (len > size)
2370  len = size;
2371  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2372  samples += len;
2373  is->sample_array_index += len;
2375  is->sample_array_index = 0;
2376  size -= len;
2377  }
2378 }
2379 
2380 /* return the wanted number of samples to get better sync if sync_type is video
2381  * or external master clock */
2382 static int synchronize_audio(VideoState *is, int nb_samples)
2383 {
2384  int wanted_nb_samples = nb_samples;
2385 
2386  /* if not master, then we try to remove or add samples to correct the clock */
2388  double diff, avg_diff;
2389  int min_nb_samples, max_nb_samples;
2390 
2391  diff = get_clock(&is->audclk) - get_master_clock(is);
2392 
2393  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2394  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2396  /* not enough measures to have a correct estimate */
2397  is->audio_diff_avg_count++;
2398  } else {
2399  /* estimate the A-V difference */
2400  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2401 
2402  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2403  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2404  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2405  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2406  wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
2407  }
2408  av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2409  diff, avg_diff, wanted_nb_samples - nb_samples,
2411  }
2412  } else {
2413  /* too big difference : may be initial PTS errors, so
2414  reset A-V filter */
2415  is->audio_diff_avg_count = 0;
2416  is->audio_diff_cum = 0;
2417  }
2418  }
2419 
2420  return wanted_nb_samples;
2421 }
2422 
2423 /**
2424  * Decode one audio frame and return its uncompressed size.
2425  *
2426  * The processed audio frame is decoded, converted if required, and
2427  * stored in is->audio_buf, with size in bytes given by the return
2428  * value.
2429  */
2431 {
2432  int data_size, resampled_data_size;
2433  int64_t dec_channel_layout;
2434  av_unused double audio_clock0;
2435  int wanted_nb_samples;
2436  Frame *af;
2437 
2438  if (is->paused)
2439  return -1;
2440 
2441  do {
2442  if (!(af = frame_queue_peek_readable(&is->sampq)))
2443  return -1;
2444  frame_queue_next(&is->sampq);
2445  } while (af->serial != is->audioq.serial);
2446 
2448  af->frame->nb_samples,
2449  af->frame->format, 1);
2450 
2451  dec_channel_layout =
2454  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2455 
2456  if (af->frame->format != is->audio_src.fmt ||
2457  dec_channel_layout != is->audio_src.channel_layout ||
2458  af->frame->sample_rate != is->audio_src.freq ||
2459  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2460  swr_free(&is->swr_ctx);
2463  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2464  0, NULL);
2465  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2467  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2470  swr_free(&is->swr_ctx);
2471  return -1;
2472  }
2473  is->audio_src.channel_layout = dec_channel_layout;
2475  is->audio_src.freq = af->frame->sample_rate;
2476  is->audio_src.fmt = af->frame->format;
2477  }
2478 
2479  if (is->swr_ctx) {
2480  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2481  uint8_t **out = &is->audio_buf1;
2482  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2483  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2484  int len2;
2485  if (out_size < 0) {
2486  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2487  return -1;
2488  }
2489  if (wanted_nb_samples != af->frame->nb_samples) {
2490  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2491  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2492  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2493  return -1;
2494  }
2495  }
2496  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2497  if (!is->audio_buf1)
2498  return AVERROR(ENOMEM);
2499  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2500  if (len2 < 0) {
2501  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2502  return -1;
2503  }
2504  if (len2 == out_count) {
2505  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2506  if (swr_init(is->swr_ctx) < 0)
2507  swr_free(&is->swr_ctx);
2508  }
2509  is->audio_buf = is->audio_buf1;
2510  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2511  } else {
2512  is->audio_buf = af->frame->data[0];
2513  resampled_data_size = data_size;
2514  }
2515 
2516  audio_clock0 = is->audio_clock;
2517  /* update the audio clock with the pts */
2518  if (!isnan(af->pts))
2519  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2520  else
2521  is->audio_clock = NAN;
2522  is->audio_clock_serial = af->serial;
2523 #ifdef DEBUG
2524  {
2525  static double last_clock;
2526  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2527  is->audio_clock - last_clock,
2528  is->audio_clock, audio_clock0);
2529  last_clock = is->audio_clock;
2530  }
2531 #endif
2532  return resampled_data_size;
2533 }
2534 
2535 /* prepare a new audio buffer */
2536 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2537 {
2538  VideoState *is = opaque;
2539  int audio_size, len1;
2540 
2542 
2543  while (len > 0) {
2544  if (is->audio_buf_index >= is->audio_buf_size) {
2545  audio_size = audio_decode_frame(is);
2546  if (audio_size < 0) {
2547  /* if error, just output silence */
2548  is->audio_buf = is->silence_buf;
2549  is->audio_buf_size = sizeof(is->silence_buf) / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
2550  } else {
2551  if (is->show_mode != SHOW_MODE_VIDEO)
2552  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2553  is->audio_buf_size = audio_size;
2554  }
2555  is->audio_buf_index = 0;
2556  }
2557  len1 = is->audio_buf_size - is->audio_buf_index;
2558  if (len1 > len)
2559  len1 = len;
2560  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2561  len -= len1;
2562  stream += len1;
2563  is->audio_buf_index += len1;
2564  }
2566  /* Let's assume the audio driver that is used by SDL has two periods. */
2567  if (!isnan(is->audio_clock)) {
2569  sync_clock_to_slave(&is->extclk, &is->audclk);
2570  }
2571 }
2572 
2573 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2574 {
2575  SDL_AudioSpec wanted_spec, spec;
2576  const char *env;
2577  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2578  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2579  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2580 
2581  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2582  if (env) {
2583  wanted_nb_channels = atoi(env);
2584  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2585  }
2586  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2587  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2588  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2589  }
2590  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2591  wanted_spec.channels = wanted_nb_channels;
2592  wanted_spec.freq = wanted_sample_rate;
2593  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2594  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2595  return -1;
2596  }
2597  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2598  next_sample_rate_idx--;
2599  wanted_spec.format = AUDIO_S16SYS;
2600  wanted_spec.silence = 0;
2601  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2602  wanted_spec.callback = sdl_audio_callback;
2603  wanted_spec.userdata = opaque;
2604  while (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2605  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2606  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2607  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2608  if (!wanted_spec.channels) {
2609  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2610  wanted_spec.channels = wanted_nb_channels;
2611  if (!wanted_spec.freq) {
2613  "No more combinations to try, audio open failed\n");
2614  return -1;
2615  }
2616  }
2617  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2618  }
2619  if (spec.format != AUDIO_S16SYS) {
2621  "SDL advised audio format %d is not supported!\n", spec.format);
2622  return -1;
2623  }
2624  if (spec.channels != wanted_spec.channels) {
2625  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2626  if (!wanted_channel_layout) {
2628  "SDL advised channel count %d is not supported!\n", spec.channels);
2629  return -1;
2630  }
2631  }
2632 
2633  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2634  audio_hw_params->freq = spec.freq;
2635  audio_hw_params->channel_layout = wanted_channel_layout;
2636  audio_hw_params->channels = spec.channels;
2637  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2638  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2639  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2640  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2641  return -1;
2642  }
2643  return spec.size;
2644 }
2645 
2646 /* open a given stream. Return 0 if OK */
2647 static int stream_component_open(VideoState *is, int stream_index)
2648 {
2649  AVFormatContext *ic = is->ic;
2650  AVCodecContext *avctx;
2651  AVCodec *codec;
2652  const char *forced_codec_name = NULL;
2653  AVDictionary *opts;
2654  AVDictionaryEntry *t = NULL;
2655  int sample_rate, nb_channels;
2656  int64_t channel_layout;
2657  int ret = 0;
2658  int stream_lowres = lowres;
2659 
2660  if (stream_index < 0 || stream_index >= ic->nb_streams)
2661  return -1;
2662  avctx = ic->streams[stream_index]->codec;
2663 
2664  codec = avcodec_find_decoder(avctx->codec_id);
2665 
2666  switch(avctx->codec_type){
2667  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2668  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2669  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2670  }
2671  if (forced_codec_name)
2672  codec = avcodec_find_decoder_by_name(forced_codec_name);
2673  if (!codec) {
2674  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2675  "No codec could be found with name '%s'\n", forced_codec_name);
2676  else av_log(NULL, AV_LOG_WARNING,
2677  "No codec could be found with id %d\n", avctx->codec_id);
2678  return -1;
2679  }
2680 
2681  avctx->codec_id = codec->id;
2682  if(stream_lowres > av_codec_get_max_lowres(codec)){
2683  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2684  av_codec_get_max_lowres(codec));
2685  stream_lowres = av_codec_get_max_lowres(codec);
2686  }
2687  av_codec_set_lowres(avctx, stream_lowres);
2688 
2689  if(stream_lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2690  if (fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2691  if(codec->capabilities & CODEC_CAP_DR1)
2692  avctx->flags |= CODEC_FLAG_EMU_EDGE;
2693 
2694  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2695  if (!av_dict_get(opts, "threads", NULL, 0))
2696  av_dict_set(&opts, "threads", "auto", 0);
2697  if (stream_lowres)
2698  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2699  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2700  av_dict_set(&opts, "refcounted_frames", "1", 0);
2701  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2702  goto fail;
2703  }
2704  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2705  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2707  goto fail;
2708  }
2709 
2710  is->eof = 0;
2711  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2712  switch (avctx->codec_type) {
2713  case AVMEDIA_TYPE_AUDIO:
2714 #if CONFIG_AVFILTER
2715  {
2716  AVFilterLink *link;
2717 
2718  is->audio_filter_src.freq = avctx->sample_rate;
2719  is->audio_filter_src.channels = avctx->channels;
2721  is->audio_filter_src.fmt = avctx->sample_fmt;
2722  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2723  goto fail;
2724  link = is->out_audio_filter->inputs[0];
2725  sample_rate = link->sample_rate;
2726  nb_channels = link->channels;
2727  channel_layout = link->channel_layout;
2728  }
2729 #else
2730  sample_rate = avctx->sample_rate;
2731  nb_channels = avctx->channels;
2732  channel_layout = avctx->channel_layout;
2733 #endif
2734 
2735  /* prepare audio output */
2736  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2737  goto fail;
2738  is->audio_hw_buf_size = ret;
2739  is->audio_src = is->audio_tgt;
2740  is->audio_buf_size = 0;
2741  is->audio_buf_index = 0;
2742 
2743  /* init averaging filter */
2744  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2745  is->audio_diff_avg_count = 0;
2746  /* since we do not have a precise anough audio fifo fullness,
2747  we correct audio sync only if larger than this threshold */
2749 
2750  is->audio_stream = stream_index;
2751  is->audio_st = ic->streams[stream_index];
2752 
2753  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2755  is->auddec.start_pts = is->audio_st->start_time;
2757  }
2758  decoder_start(&is->auddec, audio_thread, is);
2759  SDL_PauseAudio(0);
2760  break;
2761  case AVMEDIA_TYPE_VIDEO:
2762  is->video_stream = stream_index;
2763  is->video_st = ic->streams[stream_index];
2764 
2765  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2766  decoder_start(&is->viddec, video_thread, is);
2767  is->queue_attachments_req = 1;
2768  break;
2769  case AVMEDIA_TYPE_SUBTITLE:
2770  is->subtitle_stream = stream_index;
2771  is->subtitle_st = ic->streams[stream_index];
2772 
2773  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2775  break;
2776  default:
2777  break;
2778  }
2779 
2780 fail:
2781  av_dict_free(&opts);
2782 
2783  return ret;
2784 }
2785 
2786 static void stream_component_close(VideoState *is, int stream_index)
2787 {
2788  AVFormatContext *ic = is->ic;
2789  AVCodecContext *avctx;
2790 
2791  if (stream_index < 0 || stream_index >= ic->nb_streams)
2792  return;
2793  avctx = ic->streams[stream_index]->codec;
2794 
2795  switch (avctx->codec_type) {
2796  case AVMEDIA_TYPE_AUDIO:
2797  decoder_abort(&is->auddec, &is->sampq);
2798  SDL_CloseAudio();
2799  decoder_destroy(&is->auddec);
2800  swr_free(&is->swr_ctx);
2801  av_freep(&is->audio_buf1);
2802  is->audio_buf1_size = 0;
2803  is->audio_buf = NULL;
2804 
2805  if (is->rdft) {
2806  av_rdft_end(is->rdft);
2807  av_freep(&is->rdft_data);
2808  is->rdft = NULL;
2809  is->rdft_bits = 0;
2810  }
2811  break;
2812  case AVMEDIA_TYPE_VIDEO:
2813  decoder_abort(&is->viddec, &is->pictq);
2814  decoder_destroy(&is->viddec);
2815  break;
2816  case AVMEDIA_TYPE_SUBTITLE:
2817  decoder_abort(&is->subdec, &is->subpq);
2818  decoder_destroy(&is->subdec);
2819  break;
2820  default:
2821  break;
2822  }
2823 
2824  ic->streams[stream_index]->discard = AVDISCARD_ALL;
2825  avcodec_close(avctx);
2826  switch (avctx->codec_type) {
2827  case AVMEDIA_TYPE_AUDIO:
2828  is->audio_st = NULL;
2829  is->audio_stream = -1;
2830  break;
2831  case AVMEDIA_TYPE_VIDEO:
2832  is->video_st = NULL;
2833  is->video_stream = -1;
2834  break;
2835  case AVMEDIA_TYPE_SUBTITLE:
2836  is->subtitle_st = NULL;
2837  is->subtitle_stream = -1;
2838  break;
2839  default:
2840  break;
2841  }
2842 }
2843 
2844 static int decode_interrupt_cb(void *ctx)
2845 {
2846  VideoState *is = ctx;
2847  return is->abort_request;
2848 }
2849 
2851 {
2852  if( !strcmp(s->iformat->name, "rtp")
2853  || !strcmp(s->iformat->name, "rtsp")
2854  || !strcmp(s->iformat->name, "sdp")
2855  )
2856  return 1;
2857 
2858  if(s->pb && ( !strncmp(s->filename, "rtp:", 4)
2859  || !strncmp(s->filename, "udp:", 4)
2860  )
2861  )
2862  return 1;
2863  return 0;
2864 }
2865 
2866 /* this thread gets the stream from the disk or the network */
2867 static int read_thread(void *arg)
2868 {
2869  VideoState *is = arg;
2870  AVFormatContext *ic = NULL;
2871  int err, i, ret;
2872  int st_index[AVMEDIA_TYPE_NB];
2873  AVPacket pkt1, *pkt = &pkt1;
2874  int64_t stream_start_time;
2875  int pkt_in_play_range = 0;
2876  AVDictionaryEntry *t;
2877  AVDictionary **opts;
2878  int orig_nb_streams;
2879  SDL_mutex *wait_mutex = SDL_CreateMutex();
2880  int scan_all_pmts_set = 0;
2881  int64_t pkt_ts;
2882 
2883  memset(st_index, -1, sizeof(st_index));
2884  is->last_video_stream = is->video_stream = -1;
2885  is->last_audio_stream = is->audio_stream = -1;
2886  is->last_subtitle_stream = is->subtitle_stream = -1;
2887  is->eof = 0;
2888 
2889  ic = avformat_alloc_context();
2890  if (!ic) {
2891  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2892  ret = AVERROR(ENOMEM);
2893  goto fail;
2894  }
2896  ic->interrupt_callback.opaque = is;
2897  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2898  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2899  scan_all_pmts_set = 1;
2900  }
2901  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2902  if (err < 0) {
2903  print_error(is->filename, err);
2904  ret = -1;
2905  goto fail;
2906  }
2907  if (scan_all_pmts_set)
2908  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2909 
2911  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2913  goto fail;
2914  }
2915  is->ic = ic;
2916 
2917  if (genpts)
2918  ic->flags |= AVFMT_FLAG_GENPTS;
2919 
2921 
2923  orig_nb_streams = ic->nb_streams;
2924 
2925  err = avformat_find_stream_info(ic, opts);
2926 
2927  for (i = 0; i < orig_nb_streams; i++)
2928  av_dict_free(&opts[i]);
2929  av_freep(&opts);
2930 
2931  if (err < 0) {
2933  "%s: could not find codec parameters\n", is->filename);
2934  ret = -1;
2935  goto fail;
2936  }
2937 
2938  if (ic->pb)
2939  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2940 
2941  if (seek_by_bytes < 0)
2942  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2943 
2944  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2945 
2946  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2947  window_title = av_asprintf("%s - %s", t->value, input_filename);
2948 
2949  /* if seeking requested, we execute it */
2950  if (start_time != AV_NOPTS_VALUE) {
2951  int64_t timestamp;
2952 
2953  timestamp = start_time;
2954  /* add the stream start time */
2955  if (ic->start_time != AV_NOPTS_VALUE)
2956  timestamp += ic->start_time;
2957  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2958  if (ret < 0) {
2959  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2960  is->filename, (double)timestamp / AV_TIME_BASE);
2961  }
2962  }
2963 
2964  is->realtime = is_realtime(ic);
2965 
2966  if (show_status)
2967  av_dump_format(ic, 0, is->filename, 0);
2968 
2969  for (i = 0; i < ic->nb_streams; i++) {
2970  AVStream *st = ic->streams[i];
2971  enum AVMediaType type = st->codec->codec_type;
2972  st->discard = AVDISCARD_ALL;
2973  if (wanted_stream_spec[type] && st_index[type] == -1)
2974  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2975  st_index[type] = i;
2976  }
2977  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2978  if (wanted_stream_spec[i] && st_index[i] == -1) {
2979  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2980  st_index[i] = INT_MAX;
2981  }
2982  }
2983 
2984  if (!video_disable)
2985  st_index[AVMEDIA_TYPE_VIDEO] =
2987  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2988  if (!audio_disable)
2989  st_index[AVMEDIA_TYPE_AUDIO] =
2991  st_index[AVMEDIA_TYPE_AUDIO],
2992  st_index[AVMEDIA_TYPE_VIDEO],
2993  NULL, 0);
2995  st_index[AVMEDIA_TYPE_SUBTITLE] =
2997  st_index[AVMEDIA_TYPE_SUBTITLE],
2998  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2999  st_index[AVMEDIA_TYPE_AUDIO] :
3000  st_index[AVMEDIA_TYPE_VIDEO]),
3001  NULL, 0);
3002 
3003  is->show_mode = show_mode;
3004  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3005  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
3006  AVCodecContext *avctx = st->codec;
3008  if (avctx->width)
3009  set_default_window_size(avctx->width, avctx->height, sar);
3010  }
3011 
3012  /* open the streams */
3013  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
3014  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
3015  }
3016 
3017  ret = -1;
3018  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
3019  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
3020  }
3021  if (is->show_mode == SHOW_MODE_NONE)
3022  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
3023 
3024  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
3025  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
3026  }
3027 
3028  if (is->video_stream < 0 && is->audio_stream < 0) {
3029  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
3030  is->filename);
3031  ret = -1;
3032  goto fail;
3033  }
3034 
3035  if (infinite_buffer < 0 && is->realtime)
3036  infinite_buffer = 1;
3037 
3038  for (;;) {
3039  if (is->abort_request)
3040  break;
3041  if (is->paused != is->last_paused) {
3042  is->last_paused = is->paused;
3043  if (is->paused)
3044  is->read_pause_return = av_read_pause(ic);
3045  else
3046  av_read_play(ic);
3047  }
3048 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
3049  if (is->paused &&
3050  (!strcmp(ic->iformat->name, "rtsp") ||
3051  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
3052  /* wait 10 ms to avoid trying to get another packet */
3053  /* XXX: horrible */
3054  SDL_Delay(10);
3055  continue;
3056  }
3057 #endif
3058  if (is->seek_req) {
3059  int64_t seek_target = is->seek_pos;
3060  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
3061  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
3062 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
3063 // of the seek_pos/seek_rel variables
3064 
3065  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
3066  if (ret < 0) {
3068  "%s: error while seeking\n", is->ic->filename);
3069  } else {
3070  if (is->audio_stream >= 0) {
3071  packet_queue_flush(&is->audioq);
3072  packet_queue_put(&is->audioq, &flush_pkt);
3073  }
3074  if (is->subtitle_stream >= 0) {
3076  packet_queue_put(&is->subtitleq, &flush_pkt);
3077  }
3078  if (is->video_stream >= 0) {
3079  packet_queue_flush(&is->videoq);
3080  packet_queue_put(&is->videoq, &flush_pkt);
3081  }
3082  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
3083  set_clock(&is->extclk, NAN, 0);
3084  } else {
3085  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
3086  }
3087  }
3088  is->seek_req = 0;
3089  is->queue_attachments_req = 1;
3090  is->eof = 0;
3091  if (is->paused)
3092  step_to_next_frame(is);
3093  }
3094  if (is->queue_attachments_req) {
3096  AVPacket copy;
3097  if ((ret = av_copy_packet(&copy, &is->video_st->attached_pic)) < 0)
3098  goto fail;
3099  packet_queue_put(&is->videoq, &copy);
3101  }
3102  is->queue_attachments_req = 0;
3103  }
3104 
3105  /* if the queue are full, no need to read more */
3106  if (infinite_buffer<1 &&
3107  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
3108  || ( (is->audioq .nb_packets > MIN_FRAMES || is->audio_stream < 0 || is->audioq.abort_request)
3109  && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream < 0 || is->videoq.abort_request
3111  && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0 || is->subtitleq.abort_request)))) {
3112  /* wait 10 ms */
3113  SDL_LockMutex(wait_mutex);
3114  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3115  SDL_UnlockMutex(wait_mutex);
3116  continue;
3117  }
3118  if (!is->paused &&
3119  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
3120  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
3121  if (loop != 1 && (!loop || --loop)) {
3122  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
3123  } else if (autoexit) {
3124  ret = AVERROR_EOF;
3125  goto fail;
3126  }
3127  }
3128  ret = av_read_frame(ic, pkt);
3129  if (ret < 0) {
3130  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
3131  if (is->video_stream >= 0)
3133  if (is->audio_stream >= 0)
3135  if (is->subtitle_stream >= 0)
3137  is->eof = 1;
3138  }
3139  if (ic->pb && ic->pb->error)
3140  break;
3141  SDL_LockMutex(wait_mutex);
3142  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3143  SDL_UnlockMutex(wait_mutex);
3144  continue;
3145  } else {
3146  is->eof = 0;
3147  }
3148  /* check if packet is in play range specified by user, then queue, otherwise discard */
3149  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3150  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3151  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3152  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3153  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3154  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3155  <= ((double)duration / 1000000);
3156  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3157  packet_queue_put(&is->audioq, pkt);
3158  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3160  packet_queue_put(&is->videoq, pkt);
3161  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3162  packet_queue_put(&is->subtitleq, pkt);
3163  } else {
3164  av_free_packet(pkt);
3165  }
3166  }
3167  /* wait until the end */
3168  while (!is->abort_request) {
3169  SDL_Delay(100);
3170  }
3171 
3172  ret = 0;
3173  fail:
3174  /* close each stream */
3175  if (is->audio_stream >= 0)
3177  if (is->video_stream >= 0)
3179  if (is->subtitle_stream >= 0)
3181  if (ic) {
3182  avformat_close_input(&ic);
3183  is->ic = NULL;
3184  }
3185 
3186  if (ret != 0) {
3187  SDL_Event event;
3188 
3189  event.type = FF_QUIT_EVENT;
3190  event.user.data1 = is;
3191  SDL_PushEvent(&event);
3192  }
3193  SDL_DestroyMutex(wait_mutex);
3194  return 0;
3195 }
3196 
3197 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3198 {
3199  VideoState *is;
3200 
3201  is = av_mallocz(sizeof(VideoState));
3202  if (!is)
3203  return NULL;
3204  av_strlcpy(is->filename, filename, sizeof(is->filename));
3205  is->iformat = iformat;
3206  is->ytop = 0;
3207  is->xleft = 0;
3208 
3209  /* start video display */
3210  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3211  goto fail;
3212  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3213  goto fail;
3214  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3215  goto fail;
3216 
3217  packet_queue_init(&is->videoq);
3218  packet_queue_init(&is->audioq);
3220 
3221  is->continue_read_thread = SDL_CreateCond();
3222 
3223  init_clock(&is->vidclk, &is->videoq.serial);
3224  init_clock(&is->audclk, &is->audioq.serial);
3225  init_clock(&is->extclk, &is->extclk.serial);
3226  is->audio_clock_serial = -1;
3227  is->av_sync_type = av_sync_type;
3228  is->read_tid = SDL_CreateThread(read_thread, is);
3229  if (!is->read_tid) {
3230 fail:
3231  stream_close(is);
3232  return NULL;
3233  }
3234  return is;
3235 }
3236 
3238 {
3239  AVFormatContext *ic = is->ic;
3240  int start_index, stream_index;
3241  int old_index;
3242  AVStream *st;
3243  AVProgram *p = NULL;
3244  int nb_streams = is->ic->nb_streams;
3245 
3246  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3247  start_index = is->last_video_stream;
3248  old_index = is->video_stream;
3249  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3250  start_index = is->last_audio_stream;
3251  old_index = is->audio_stream;
3252  } else {
3253  start_index = is->last_subtitle_stream;
3254  old_index = is->subtitle_stream;
3255  }
3256  stream_index = start_index;
3257 
3258  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3260  if (p) {
3261  nb_streams = p->nb_stream_indexes;
3262  for (start_index = 0; start_index < nb_streams; start_index++)
3263  if (p->stream_index[start_index] == stream_index)
3264  break;
3265  if (start_index == nb_streams)
3266  start_index = -1;
3267  stream_index = start_index;
3268  }
3269  }
3270 
3271  for (;;) {
3272  if (++stream_index >= nb_streams)
3273  {
3274  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3275  {
3276  stream_index = -1;
3277  is->last_subtitle_stream = -1;
3278  goto the_end;
3279  }
3280  if (start_index == -1)
3281  return;
3282  stream_index = 0;
3283  }
3284  if (stream_index == start_index)
3285  return;
3286  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3287  if (st->codec->codec_type == codec_type) {
3288  /* check that parameters are OK */
3289  switch (codec_type) {
3290  case AVMEDIA_TYPE_AUDIO:
3291  if (st->codec->sample_rate != 0 &&
3292  st->codec->channels != 0)
3293  goto the_end;
3294  break;
3295  case AVMEDIA_TYPE_VIDEO:
3296  case AVMEDIA_TYPE_SUBTITLE:
3297  goto the_end;
3298  default:
3299  break;
3300  }
3301  }
3302  }
3303  the_end:
3304  if (p && stream_index != -1)
3305  stream_index = p->stream_index[stream_index];
3306  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3307  av_get_media_type_string(codec_type),
3308  old_index,
3309  stream_index);
3310 
3311  stream_component_close(is, old_index);
3312  stream_component_open(is, stream_index);
3313 }
3314 
3315 
3317 {
3318 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
3319  /* OS X needs to reallocate the SDL overlays */
3320  int i;
3321  for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++)
3322  is->pictq.queue[i].reallocate = 1;
3323 #endif
3325  video_open(is, 1, NULL);
3326 }
3327 
3329 {
3330  int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
3331  int next = is->show_mode;
3332  do {
3333  next = (next + 1) % SHOW_MODE_NB;
3334  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3335  if (is->show_mode != next) {
3337  is->xleft, is->ytop, is->width, is->height,
3338  bgcolor, 1);
3339  is->force_refresh = 1;
3340  is->show_mode = next;
3341  }
3342 }
3343 
3344 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3345  double remaining_time = 0.0;
3346  SDL_PumpEvents();
3347  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_ALLEVENTS)) {
3349  SDL_ShowCursor(0);
3350  cursor_hidden = 1;
3351  }
3352  if (remaining_time > 0.0)
3353  av_usleep((int64_t)(remaining_time * 1000000.0));
3354  remaining_time = REFRESH_RATE;
3355  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3356  video_refresh(is, &remaining_time);
3357  SDL_PumpEvents();
3358  }
3359 }
3360 
3361 static void seek_chapter(VideoState *is, int incr)
3362 {
3363  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3364  int i;
3365 
3366  if (!is->ic->nb_chapters)
3367  return;
3368 
3369  /* find the current chapter */
3370  for (i = 0; i < is->ic->nb_chapters; i++) {
3371  AVChapter *ch = is->ic->chapters[i];
3372  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3373  i--;
3374  break;
3375  }
3376  }
3377 
3378  i += incr;
3379  i = FFMAX(i, 0);
3380  if (i >= is->ic->nb_chapters)
3381  return;
3382 
3383  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3384  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3385  AV_TIME_BASE_Q), 0, 0);
3386 }
3387 
3388 /* handle an event sent by the GUI */
3389 static void event_loop(VideoState *cur_stream)
3390 {
3391  SDL_Event event;
3392  double incr, pos, frac;
3393 
3394  for (;;) {
3395  double x;
3396  refresh_loop_wait_event(cur_stream, &event);
3397  switch (event.type) {
3398  case SDL_KEYDOWN:
3399  if (exit_on_keydown) {
3400  do_exit(cur_stream);
3401  break;
3402  }
3403  switch (event.key.keysym.sym) {
3404  case SDLK_ESCAPE:
3405  case SDLK_q:
3406  do_exit(cur_stream);
3407  break;
3408  case SDLK_f:
3409  toggle_full_screen(cur_stream);
3410  cur_stream->force_refresh = 1;
3411  break;
3412  case SDLK_p:
3413  case SDLK_SPACE:
3414  toggle_pause(cur_stream);
3415  break;
3416  case SDLK_s: // S: Step to next frame
3417  step_to_next_frame(cur_stream);
3418  break;
3419  case SDLK_a:
3421  break;
3422  case SDLK_v:
3424  break;
3425  case SDLK_c:
3429  break;
3430  case SDLK_t:
3432  break;
3433  case SDLK_w:
3434 #if CONFIG_AVFILTER
3435  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3436  if (++cur_stream->vfilter_idx >= nb_vfilters)
3437  cur_stream->vfilter_idx = 0;
3438  } else {
3439  cur_stream->vfilter_idx = 0;
3440  toggle_audio_display(cur_stream);
3441  }
3442 #else
3443  toggle_audio_display(cur_stream);
3444 #endif
3445  break;
3446  case SDLK_PAGEUP:
3447  if (cur_stream->ic->nb_chapters <= 1) {
3448  incr = 600.0;
3449  goto do_seek;
3450  }
3451  seek_chapter(cur_stream, 1);
3452  break;
3453  case SDLK_PAGEDOWN:
3454  if (cur_stream->ic->nb_chapters <= 1) {
3455  incr = -600.0;
3456  goto do_seek;
3457  }
3458  seek_chapter(cur_stream, -1);
3459  break;
3460  case SDLK_LEFT:
3461  incr = -10.0;
3462  goto do_seek;
3463  case SDLK_RIGHT:
3464  incr = 10.0;
3465  goto do_seek;
3466  case SDLK_UP:
3467  incr = 60.0;
3468  goto do_seek;
3469  case SDLK_DOWN:
3470  incr = -60.0;
3471  do_seek:
3472  if (seek_by_bytes) {
3473  pos = -1;
3474  if (pos < 0 && cur_stream->video_stream >= 0)
3475  pos = frame_queue_last_pos(&cur_stream->pictq);
3476  if (pos < 0 && cur_stream->audio_stream >= 0)
3477  pos = frame_queue_last_pos(&cur_stream->sampq);
3478  if (pos < 0)
3479  pos = avio_tell(cur_stream->ic->pb);
3480  if (cur_stream->ic->bit_rate)
3481  incr *= cur_stream->ic->bit_rate / 8.0;
3482  else
3483  incr *= 180000.0;
3484  pos += incr;
3485  stream_seek(cur_stream, pos, incr, 1);
3486  } else {
3487  pos = get_master_clock(cur_stream);
3488  if (isnan(pos))
3489  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3490  pos += incr;
3491  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3492  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3493  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3494  }
3495  break;
3496  default:
3497  break;
3498  }
3499  break;
3500  case SDL_VIDEOEXPOSE:
3501  cur_stream->force_refresh = 1;
3502  break;
3503  case SDL_MOUSEBUTTONDOWN:
3504  if (exit_on_mousedown) {
3505  do_exit(cur_stream);
3506  break;
3507  }
3508  case SDL_MOUSEMOTION:
3509  if (cursor_hidden) {
3510  SDL_ShowCursor(1);
3511  cursor_hidden = 0;
3512  }
3514  if (event.type == SDL_MOUSEBUTTONDOWN) {
3515  x = event.button.x;
3516  } else {
3517  if (event.motion.state != SDL_PRESSED)
3518  break;
3519  x = event.motion.x;
3520  }
3521  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3522  uint64_t size = avio_size(cur_stream->ic->pb);
3523  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3524  } else {
3525  int64_t ts;
3526  int ns, hh, mm, ss;
3527  int tns, thh, tmm, tss;
3528  tns = cur_stream->ic->duration / 1000000LL;
3529  thh = tns / 3600;
3530  tmm = (tns % 3600) / 60;
3531  tss = (tns % 60);
3532  frac = x / cur_stream->width;
3533  ns = frac * tns;
3534  hh = ns / 3600;
3535  mm = (ns % 3600) / 60;
3536  ss = (ns % 60);
3538  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3539  hh, mm, ss, thh, tmm, tss);
3540  ts = frac * cur_stream->ic->duration;
3541  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3542  ts += cur_stream->ic->start_time;
3543  stream_seek(cur_stream, ts, 0, 0);
3544  }
3545  break;
3546  case SDL_VIDEORESIZE:
3547  screen = SDL_SetVideoMode(FFMIN(16383, event.resize.w), event.resize.h, 0,
3548  SDL_HWSURFACE|(is_full_screen?SDL_FULLSCREEN:SDL_RESIZABLE)|SDL_ASYNCBLIT|SDL_HWACCEL);
3549  if (!screen) {
3550  av_log(NULL, AV_LOG_FATAL, "Failed to set video mode\n");
3551  do_exit(cur_stream);
3552  }
3553  screen_width = cur_stream->width = screen->w;
3554  screen_height = cur_stream->height = screen->h;
3555  cur_stream->force_refresh = 1;
3556  break;
3557  case SDL_QUIT:
3558  case FF_QUIT_EVENT:
3559  do_exit(cur_stream);
3560  break;
3561  case FF_ALLOC_EVENT:
3562  alloc_picture(event.user.data1);
3563  break;
3564  default:
3565  break;
3566  }
3567  }
3568 }
3569 
3570 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3571 {
3572  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3573  return opt_default(NULL, "video_size", arg);
3574 }
3575 
3576 static int opt_width(void *optctx, const char *opt, const char *arg)
3577 {
3578  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3579  return 0;
3580 }
3581 
3582 static int opt_height(void *optctx, const char *opt, const char *arg)
3583 {
3584  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3585  return 0;
3586 }
3587 
3588 static int opt_format(void *optctx, const char *opt, const char *arg)
3589 {
3590  file_iformat = av_find_input_format(arg);
3591  if (!file_iformat) {
3592  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3593  return AVERROR(EINVAL);
3594  }
3595  return 0;
3596 }
3597 
3598 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3599 {
3600  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3601  return opt_default(NULL, "pixel_format", arg);
3602 }
3603 
3604 static int opt_sync(void *optctx, const char *opt, const char *arg)
3605 {
3606  if (!strcmp(arg, "audio"))
3608  else if (!strcmp(arg, "video"))
3610  else if (!strcmp(arg, "ext"))
3612  else {
3613  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3614  exit(1);
3615  }
3616  return 0;
3617 }
3618 
3619 static int opt_seek(void *optctx, const char *opt, const char *arg)
3620 {
3621  start_time = parse_time_or_die(opt, arg, 1);
3622  return 0;
3623 }
3624 
3625 static int opt_duration(void *optctx, const char *opt, const char *arg)
3626 {
3627  duration = parse_time_or_die(opt, arg, 1);
3628  return 0;
3629 }
3630 
3631 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3632 {
3633  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3634  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3635  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3636  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3637  return 0;
3638 }
3639 
3640 static void opt_input_file(void *optctx, const char *filename)
3641 {
3642  if (input_filename) {
3644  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3645  filename, input_filename);
3646  exit(1);
3647  }
3648  if (!strcmp(filename, "-"))
3649  filename = "pipe:";
3650  input_filename = filename;
3651 }
3652 
3653 static int opt_codec(void *optctx, const char *opt, const char *arg)
3654 {
3655  const char *spec = strchr(opt, ':');
3656  if (!spec) {
3658  "No media specifier was specified in '%s' in option '%s'\n",
3659  arg, opt);
3660  return AVERROR(EINVAL);
3661  }
3662  spec++;
3663  switch (spec[0]) {
3664  case 'a' : audio_codec_name = arg; break;
3665  case 's' : subtitle_codec_name = arg; break;
3666  case 'v' : video_codec_name = arg; break;
3667  default:
3669  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3670  return AVERROR(EINVAL);
3671  }
3672  return 0;
3673 }
3674 
3675 static int dummy;
3676 
3677 static const OptionDef options[] = {
3678 #include "cmdutils_common_opts.h"
3679  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3680  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3681  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3682  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3683  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3684  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3685  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3686  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3687  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3688  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3689  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3690  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3691  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3692  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3693  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3694  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3695  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3696  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3697  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3698  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3699  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3700  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3701  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3702  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3703  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3704  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3705  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3706  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3707  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3708 #if CONFIG_AVFILTER
3709  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3710  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3711 #endif
3712  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3713  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3714  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3715  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3716  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3717  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3718  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3719  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3720  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3721  { NULL, },
3722 };
3723 
3724 static void show_usage(void)
3725 {
3726  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3727  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3728  av_log(NULL, AV_LOG_INFO, "\n");
3729 }
3730 
3731 void show_help_default(const char *opt, const char *arg)
3732 {
3734  show_usage();
3735  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3736  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3737  printf("\n");
3740 #if !CONFIG_AVFILTER
3742 #else
3744 #endif
3745  printf("\nWhile playing:\n"
3746  "q, ESC quit\n"
3747  "f toggle full screen\n"
3748  "p, SPC pause\n"
3749  "a cycle audio channel in the current program\n"
3750  "v cycle video channel\n"
3751  "t cycle subtitle channel in the current program\n"
3752  "c cycle program\n"
3753  "w cycle video filters or show modes\n"
3754  "s activate frame-step mode\n"
3755  "left/right seek backward/forward 10 seconds\n"
3756  "down/up seek backward/forward 1 minute\n"
3757  "page down/page up seek backward/forward 10 minutes\n"
3758  "mouse click seek to percentage in file corresponding to fraction of width\n"
3759  );
3760 }
3761 
3762 static int lockmgr(void **mtx, enum AVLockOp op)
3763 {
3764  switch(op) {
3765  case AV_LOCK_CREATE:
3766  *mtx = SDL_CreateMutex();
3767  if(!*mtx)
3768  return 1;
3769  return 0;
3770  case AV_LOCK_OBTAIN:
3771  return !!SDL_LockMutex(*mtx);
3772  case AV_LOCK_RELEASE:
3773  return !!SDL_UnlockMutex(*mtx);
3774  case AV_LOCK_DESTROY:
3775  SDL_DestroyMutex(*mtx);
3776  return 0;
3777  }
3778  return 1;
3779 }
3780 
3781 /* Called from the main */
3782 int main(int argc, char **argv)
3783 {
3784  int flags;
3785  VideoState *is;
3786  char dummy_videodriver[] = "SDL_VIDEODRIVER=dummy";
3787 
3789  parse_loglevel(argc, argv, options);
3790 
3791  /* register all codecs, demux and protocols */
3792 #if CONFIG_AVDEVICE
3794 #endif
3795 #if CONFIG_AVFILTER
3797 #endif
3798  av_register_all();
3800 
3801  init_opts();
3802 
3803  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3804  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3805 
3806  show_banner(argc, argv, options);
3807 
3808  parse_options(NULL, argc, argv, options, opt_input_file);
3809 
3810  if (!input_filename) {
3811  show_usage();
3812  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3814  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3815  exit(1);
3816  }
3817 
3818  if (display_disable) {
3819  video_disable = 1;
3820  }
3821  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3822  if (audio_disable)
3823  flags &= ~SDL_INIT_AUDIO;
3824  if (display_disable)
3825  SDL_putenv(dummy_videodriver); /* For the event queue, we always need a video driver. */
3826 #if !defined(_WIN32) && !defined(__APPLE__)
3827  flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3828 #endif
3829  if (SDL_Init (flags)) {
3830  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3831  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3832  exit(1);
3833  }
3834 
3835  if (!display_disable) {
3836  const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3837  fs_screen_width = vi->current_w;
3838  fs_screen_height = vi->current_h;
3839  }
3840 
3841  SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3842  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3843  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3844 
3846  av_log(NULL, AV_LOG_FATAL, "Could not initialize lock manager!\n");
3847  do_exit(NULL);
3848  }
3849 
3850  av_init_packet(&flush_pkt);
3851  flush_pkt.data = (uint8_t *)&flush_pkt;
3852 
3853  is = stream_open(input_filename, file_iformat);
3854  if (!is) {
3855  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3856  do_exit(NULL);
3857  }
3858 
3859  event_loop(is);
3860 
3861  /* never returns */
3862 
3863  return 0;
3864 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1412
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:90
AVFilterContext ** filters
Definition: avfilter.h:1173
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:476
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3631
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:721
static void video_image_display(VideoState *is)
Definition: ffplay.c:1096
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:213
const char const char void * val
Definition: avisynth_c.h:672
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:431
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:452
float v
const char * s
Definition: avisynth_c.h:669
int width
Definition: ffplay.c:284
#define OPT_EXPERT
Definition: cmdutils.h:163
#define CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:763
static double get_clock(Clock *c)
Definition: ffplay.c:1393
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:281
enum AVSampleFormat fmt
Definition: ffplay.c:131
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3582
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:65
SDL_cond * cond
Definition: ffplay.c:171
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3059
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2573
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
Definition: avcodec.h:3441
void av_free_packet(AVPacket *pkt)
Free a packet.
Definition: avpacket.c:280
FrameQueue pictq
Definition: ffplay.c:217
static int video_open(VideoState *is, int force_set_video_mode, Frame *vp)
Definition: ffplay.c:1342
Decoder auddec
Definition: ffplay.c:221
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:88
AVStream * subtitle_st
Definition: ffplay.c:267
This structure describes decoded (raw) audio or video data.
Definition: frame.h:163
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:122
static double rint(double x)
Definition: libm.h:141
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3469
#define SWS_BICUBIC
Definition: swscale.h:58
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1461
double rdftspeed
Definition: ffplay.c:335
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
double frame_timer
Definition: ffplay.c:270
static AVInputFormat * file_iformat
Definition: ffplay.c:302
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3640
int av_lockmgr_register(int(*cb)(void **mutex, enum AVLockOp op))
Register a user provided lock manager supporting the operations specified by AVLockOp.
Definition: utils.c:3565
struct AudioParams audio_filter_src
Definition: ffplay.c:247
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3588
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:76
Unlock the mutex.
Definition: avcodec.h:5226
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:181
static const AVFilterPad outputs[]
Definition: af_ashowinfo.c:246
AVRational next_pts_tb
Definition: ffplay.c:193
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1441
Main libavfilter public API header.
int rindex
Definition: ffplay.c:164
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:80
int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:401
#define AV_DICT_DONT_OVERWRITE
Don't overwrite existing entries.
Definition: dict.h:81
const char * g
Definition: vf_curves.c:108
static int default_height
Definition: ffplay.c:308
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:272
FrameQueue sampq
Definition: ffplay.c:219
enum VideoState::ShowMode show_mode
AVFilterGraph * agraph
Definition: ffplay.c:293
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:184
int seek_flags
Definition: ffplay.c:206
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1360
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:748
int serial
Definition: ffplay.c:117
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4174
static int64_t cur_time
Definition: ffserver.c:253
#define OPT_AUDIO
Definition: cmdutils.h:166
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
Definition: ffplay.c:1957
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3237
int num
numerator
Definition: rational.h:44
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3598
int nb_colors
number of colors in pict, undefined when pict is not set
Definition: avcodec.h:3473
int size
Definition: avcodec.h:1161
const char * b
Definition: vf_curves.c:109
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1505
MyAVPacketList * first_pkt
Definition: ffplay.c:113
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1491
static int seek_by_bytes
Definition: ffplay.c:315
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> in
double audio_diff_cum
Definition: ffplay.c:231
static void packet_queue_init(PacketQueue *q)
Definition: ffplay.c:444
Various defines for YUV<->RGB conversion.
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1621
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:96
AVInputFormat * iformat
Definition: ffplay.c:199
enum AVMediaType codec_type
Definition: rtp.c:37
AVCodecContext * avctx
Definition: ffplay.c:185
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1115
int paused
Definition: ffplay.c:202
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3653
static AVStream * video_stream
static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph, AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
Definition: ffplay.c:1914
int abort_request
Definition: ffplay.c:116
unsigned num_rects
Definition: avcodec.h:3498
#define a1
Definition: regdef.h:47
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1405
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1518
SDL_Rect last_display_rect
Definition: ffplay.c:280
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:40
double audio_diff_threshold
Definition: ffplay.c:233
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everythnig contained in src to dst and reset src.
Definition: frame.c:394
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
uint8_t silence_buf[SDL_AUDIO_MIN_BUFFER_SIZE]
Definition: ffplay.c:238
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:76
discard all
Definition: avcodec.h:667
int64_t channel_layout
Definition: ffplay.c:130
static AVPacket pkt
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:90
static int audio_disable
Definition: ffplay.c:311
AVStream * audio_st
Definition: ffplay.c:235
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: utils.c:2713
static const char * audio_codec_name
Definition: ffplay.c:332
int av_dup_packet(AVPacket *pkt)
Definition: avpacket.c:248
Picture data structure.
Definition: avcodec.h:3439
int serial
Definition: ffplay.c:150
AVCodec.
Definition: avcodec.h:3173
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3361
double pts_drift
Definition: ffplay.c:138
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:1979
AVLockOp
Lock operation used by lockmgr.
Definition: avcodec.h:5223
int width
Definition: ffplay.c:157
#define INSERT_FILT(name, arg)
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:1178
AVStream * video_st
Definition: ffplay.c:274
Clock extclk
Definition: ffplay.c:215
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3197
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1367
void * opaque
Definition: avio.h:53
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3499
void av_picture_copy(AVPicture *dst, const AVPicture *src, enum AVPixelFormat pix_fmt, int width, int height)
Copy image src to dst.
Definition: avpicture.c:72
Format I/O context.
Definition: avformat.h:1214
static int64_t sws_flags
Definition: ffplay.c:104
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3328
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:4197
Definition: ffplay.c:147
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:256
int av_sync_type
Definition: ffplay.c:227
unsigned int nb_stream_indexes
Definition: avformat.h:1154
#define AV_LOG_QUIET
Print no output.
Definition: log.h:157
int rindex_shown
Definition: ffplay.c:169
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3471
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:641
double pts
Definition: ffplay.c:151
static AVFilter ** last_filter
Definition: avfilter.c:482
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:232
AVRational start_pts_tb
Definition: ffplay.c:191
static double cb(void *priv, double x, double y)
Definition: vf_geq.c:96
static int read_thread(void *arg)
Definition: ffplay.c:2867
int keep_last
Definition: ffplay.c:168
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:131
int rdft_bits
Definition: ffplay.c:261
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:820
int size
Definition: ffplay.c:115
static const char ** vfilters_list
Definition: ffplay.c:339
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:674
static int64_t start_time
Definition: ffplay.c:319
if()
Definition: avfilter.c:975
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1991
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:93
Lock the mutex.
Definition: avcodec.h:5225
uint8_t
static int nb_streams
Definition: ffprobe.c:216
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:135
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:519
static int default_width
Definition: ffplay.c:307
int last_video_stream
Definition: ffplay.c:296
int last_subtitle_stream
Definition: ffplay.c:296
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:595
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:237
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:2786
static av_always_inline av_const int isnan(float x)
Definition: libm.h:96
uint8_t * data[AV_NUM_DATA_POINTERS]
pointers to the image data planes
Definition: avcodec.h:3440
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2844
struct SwrContext * swr_ctx
Definition: ffplay.c:250
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:67
int finished
Definition: ffplay.c:187
libavcodec/libavfilter gluing utilities
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3389
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:366
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:4218
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:249
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:469
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1326
static int framedrop
Definition: ffplay.c:329
static void alloc_picture(VideoState *is)
Definition: ffplay.c:1736
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:72
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1282
AVPacket pkt
Definition: ffplay.c:107
int bytes_per_sec
Definition: ffplay.c:133
void avfilter_register_all(void)
Initialize the filter system.
Definition: allfilters.c:40
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:787
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:107
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
void av_codec_set_lowres(AVCodecContext *avctx, int val)
static int64_t audio_callback_time
Definition: ffplay.c:347
static double av_q2d(AVRational a)
Convert rational to double.
Definition: rational.h:80
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:377
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1325
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:496
static void sigterm_handler(int sig)
Definition: ffplay.c:1329
uint8_t * data
Definition: avcodec.h:1160
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:369
int freq
Definition: ffplay.c:128
int avformat_network_init(void)
Do global initialization of network components.
Definition: utils.c:4109
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:163
static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
Definition: ffplay.c:2046
static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:357
Definition: mxfdec.c:255
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:191
Definition: ffplay.c:136
static void copy(LZOContext *c, int cnt)
Copies bytes from input to output buffer with checking.
Definition: lzo.c:85
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:129
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the '-loglevel' option in the command line args and apply it.
Definition: cmdutils.c:483
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:779
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:82
ptrdiff_t size
Definition: opengl_enc.c:101
int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame)
Accessors for some AVFrame fields.
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3472
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:273
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:162
static int64_t duration
Definition: ffplay.c:320
AVRational sar
Definition: ffplay.c:159
AVPacket pkt_temp
Definition: ffplay.c:183
unsigned int * stream_index
Definition: avformat.h:1153
#define av_log(a,...)
static void duplicate_right_border_pixels(SDL_Overlay *bmp)
Definition: ffplay.c:1767
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:285
PacketQueue videoq
Definition: ffplay.c:275
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2036
AVDictionary * format_opts
Definition: cmdutils.c:66
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:301
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:100
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:140
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:428
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:3483
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:2826
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3187
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:3466
int audio_diff_avg_count
Definition: ffplay.c:234
const AVS_VideoInfo * vi
Definition: avisynth_c.h:696
int ytop
Definition: ffplay.c:284
int width
width and height of the video frame
Definition: frame.h:212
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:71
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1423
int seek_req
Definition: ffplay.c:205
int(* callback)(void *)
Definition: avio.h:52
int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: utils.c:2336
Create a mutex.
Definition: avcodec.h:5224
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:125
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1581
int read_pause_return
Definition: ffplay.c:209
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:427
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:302
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3470
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:731
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:148
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:68
RDFTContext * rdft
Definition: ffplay.c:260
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:755
const char * r
Definition: vf_curves.c:107
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:433
static int autorotate
Definition: ffplay.c:343
int capabilities
Codec capabilities.
Definition: avcodec.h:3192
#define RGBA_IN(r, g, b, a, s)
Definition: ffplay.c:831
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:196
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:102
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:3549
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1532
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:194
const char * arg
Definition: jacosubdec.c:66
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1333
int reallocate
Definition: ffplay.c:156
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:490
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:387
AVChapter ** chapters
Definition: avformat.h:1413
#define wrap(func)
Definition: neontest.h:62
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:333
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1291
int video_stream
Definition: ffplay.c:273
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:47
int * queue_serial
Definition: ffplay.c:143
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1425
int xpos
Definition: ffplay.c:263
int channels
Definition: ffplay.c:129
static enum ShowMode show_mode
Definition: ffplay.c:331
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1149
#define FFMAX(a, b)
Definition: common.h:64
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:487
static const OptionDef options[]
Definition: ffplay.c:3677
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:126
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3675
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define BPP
Definition: ffplay.c:855
double audio_clock
Definition: ffplay.c:229
static const int sample_rates[]
Definition: dcaenc.h:32
int force_refresh
Definition: ffplay.c:201
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2044
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:145
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3604
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:1980
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2362
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3497
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:627
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3500
static int genpts
Definition: ffplay.c:322
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:814
static AVPacket flush_pkt
Definition: ffplay.c:349
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:419
double frame_last_returned_time
Definition: ffplay.c:271
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: avfilter.c:487
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
static const char * subtitle_codec_name
Definition: ffplay.c:333
static int subtitle_disable
Definition: ffplay.c:313
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:126
int max_size
Definition: ffplay.c:167
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1270
int step
Definition: ffplay.c:285
SDL_Thread * decoder_tid
Definition: ffplay.c:194
static SDL_Surface * screen
Definition: ffplay.c:354
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:64
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:3558
SDL_mutex * mutex
Definition: ffplay.c:118
int audio_write_buf_size
Definition: ffplay.c:244
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:160
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples, int is_silence, int64_t nb_samples_notify, AVRational time_base)
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:124
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:123
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
struct MyAVPacketList * next
Definition: ffplay.c:108
#define AV_CH_LAYOUT_STEREO_DOWNMIX
char filename[1024]
input or output filename
Definition: avformat.h:1290
AVPicture pict
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3479
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:247
#define FFMIN(a, b)
Definition: common.h:66
SDL_mutex * mutex
Definition: ffplay.c:170
float y
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:383
int windex
Definition: ffplay.c:165
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:602
static int cursor_hidden
Definition: ffplay.c:337
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:533
ret
Definition: avfilter.c:974
AVSubtitle sub
Definition: ffplay.c:149
static int lockmgr(void **mtx, enum AVLockOp op)
Definition: ffplay.c:3762
int width
picture width / height.
Definition: avcodec.h:1412
int main(int argc, char **argv)
Definition: ffplay.c:3782
int height
Definition: ffplay.c:158
static void show_usage(void)
Definition: ffplay.c:3724
int nb_packets
Definition: ffplay.c:114
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3576
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1880
int frame_drops_late
Definition: ffplay.c:252
struct AudioParams audio_src
Definition: ffplay.c:245
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3344
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1419
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:73
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:321
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:1908
int last_i_start
Definition: ffplay.c:259
uint16_t format
Definition: avcodec.h:3495
char filename[1024]
Definition: ffplay.c:283
int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: utils.c:2482
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
Definition: ffplay.c:857
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:113
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1524
float u
int n
Definition: avisynth_c.h:589
AVDictionary * metadata
Definition: avformat.h:869
static int frame_queue_prev(FrameQueue *f)
Definition: ffplay.c:747
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2430
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:346
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:78
static void fill_border(int xleft, int ytop, int width, int height, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:793
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:71
static int decoder_reorder_pts
Definition: ffplay.c:324
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:93
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1413
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:257
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1354
int paused
Definition: ffplay.c:142
static const char * input_filename
Definition: ffplay.c:303
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:772
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:684
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3731
int av_codec_get_max_lowres(const AVCodec *codec)
Definition: utils.c:1279
int64_t pos
Definition: ffplay.c:153
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:296
Stream structure.
Definition: avformat.h:795
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: utils.c:3206
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1176
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1349
static int fs_screen_width
Definition: ffplay.c:305
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:85
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:224
int av_opt_get_int(void *obj, const char *name, int search_flags, int64_t *out_val)
Definition: opt.c:822
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4122
static int screen_height
Definition: ffplay.c:310
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3625
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:204
#define AV_LOG_INFO
Standard information.
Definition: log.h:186
int64_t next_pts
Definition: ffplay.c:192
static int autoexit
Definition: ffplay.c:325
AVFrame * frame
Definition: ffplay.c:148
int serial
Definition: ffplay.c:141
enum AVMediaType codec_type
Definition: avcodec.h:1247
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:673
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:1065
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:59
enum AVCodecID codec_id
Definition: avcodec.h:1256
static void do_exit(VideoState *is)
Definition: ffplay.c:1311
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:253
char * av_strdup(const char *s)
Duplicate the string s.
Definition: mem.c:265
int sample_rate
samples per second
Definition: avcodec.h:1983
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
AVIOContext * pb
I/O context.
Definition: avformat.h:1256
static int loop
Definition: ffplay.c:328
int last_paused
Definition: ffplay.c:203
static int exit_on_keydown
Definition: ffplay.c:326
FFT functions.
AVFilterContext * in_video_filter
Definition: ffplay.c:289
main external API structure.
Definition: avcodec.h:1239
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: utils.c:2934
Decoder subdec
Definition: ffplay.c:223
int av_copy_packet(AVPacket *dst, const AVPacket *src)
Copy packet, including contents.
Definition: avpacket.c:265
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:139
double max_frame_duration
Definition: ffplay.c:276
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:2807
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:244
Clock vidclk
Definition: ffplay.c:214
int x
Definition: f_ebur128.c:90
static void decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2210
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don't need to export the SwsContext.
Definition: swscale.c:888
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:432
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1562
GLint GLenum type
Definition: opengl_enc.c:105
static const char * window_title
Definition: ffplay.c:304
double pts
Definition: ffplay.c:137
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:679
static char * afilters
Definition: ffplay.c:341
static int audio_thread(void *arg)
Definition: ffplay.c:2123
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:69
static int av_sync_type
Definition: ffplay.c:318
int pkt_serial
Definition: ffplay.c:186
BYTE int const BYTE int int int height
Definition: avisynth_c.h:714
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:667
int sample_rate
Sample rate of the audio data.
Definition: frame.h:414
static void free_picture(Frame *vp)
Definition: ffplay.c:1057
int av_frame_get_channels(const AVFrame *frame)
Definition: f_ebur128.c:90
static const AVFilterPad inputs[]
Definition: af_ashowinfo.c:237
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1357
PacketQueue audioq
Definition: ffplay.c:236
int packet_pending
Definition: ffplay.c:188
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:117
int64_t seek_pos
Definition: ffplay.c:207
rational number numerator/denominator
Definition: rational.h:43
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:76
int allocated
Definition: ffplay.c:155
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:286
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:150
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1147
SDL_cond * cond
Definition: ffplay.c:119
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:89
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2189
struct SwsContext * sws_opts
Definition: cmdutils.c:64
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:541
AVMediaType
Definition: avutil.h:192
discard useless packets like 0 size packets in avi
Definition: avcodec.h:662
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2850
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1476
AVFilterContext * out_video_filter
Definition: ffplay.c:290
int queue_attachments_req
Definition: ffplay.c:204
unsigned nb_filters
Definition: avfilter.h:1175
AVFilterContext * in_audio_filter
Definition: ffplay.c:291
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:1325
#define snprintf
Definition: snprintf.h:34
int vfilter_idx
Definition: ffplay.c:288
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:631
int error
contains the error code or 0 if no error happened
Definition: avio.h:102
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg[SWR_CH_MAX], int in_count)
Definition: swresample.c:667
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:189
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:86
#define FF_ALLOC_EVENT
Definition: ffplay.c:351
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1469
int audio_stream
Definition: ffplay.c:225
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2154
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:254
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:129
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2647
char * name
unique name for this input/output in the list
Definition: avfilter.h:1351
static int64_t cursor_last_shown
Definition: ffplay.c:336
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:637
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3570
#define RGB_TO_U_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:103
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
static int64_t pts
Global timestamp for the audio frames.
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:72
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: utils.c:2939
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1785
static int flags
Definition: cpu.c:47
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1299
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
int frame_drops_early
Definition: ffplay.c:251
static double lum(void *priv, double x, double y)
Definition: vf_geq.c:95
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:104
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2382
#define RGB_TO_V_CCIR(r1, g1, b1, shift)
Definition: colorspace.h:107
int sample_array_index
Definition: ffplay.c:258
SDL_cond * continue_read_thread
Definition: ffplay.c:298
int64_t start
Definition: avformat.h:1182
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:654
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> out
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:761
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:261
double speed
Definition: ffplay.c:140
static int exit_on_mousedown
Definition: ffplay.c:327
static int op(uint8_t **dst, const uint8_t *dst_end, GetByteContext *gb, int pixel, int count, int *x, int width, int linesize)
Perform decode operation.
Definition: anm.c:78
#define CODEC_FLAG_EMU_EDGE
Definition: avcodec.h:742
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1017
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:513
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
#define YUVA_OUT(d, y, u, v, a)
Definition: ffplay.c:849
static int video_thread(void *arg)
Definition: ffplay.c:2216
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:179
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1334
AVDictionary * codec_opts
Definition: cmdutils.c:66
struct AudioParams audio_tgt
Definition: ffplay.c:249
#define ALPHA_BLEND(a, oldp, newp, s)
Definition: ffplay.c:828
AVRational av_codec_get_pkt_timebase(const AVCodecContext *avctx)
Free mutex resources.
Definition: avcodec.h:5227
uint8_t * audio_buf
Definition: ffplay.c:239
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:73
static int display_disable
Definition: ffplay.c:316
static int video_disable
Definition: ffplay.c:312
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3015
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:847
signed 16 bits
Definition: samplefmt.h:62
int audio_buf_index
Definition: ffplay.c:243
uint8_t * audio_buf1
Definition: ffplay.c:240
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3619
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:139
static int screen_width
Definition: ffplay.c:309
PacketQueue * pktq
Definition: ffplay.c:172
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:858
uint32_t start_display_time
Definition: avcodec.h:3496
FFTSample * rdft_data
Definition: ffplay.c:262
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1574
int audio_clock_serial
Definition: ffplay.c:230
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1181
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:49
char * key
Definition: dict.h:87
int den
denominator
Definition: rational.h:45
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:80
PacketQueue subtitleq
Definition: ffplay.c:268
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1226
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:3631
static int lowres
Definition: ffplay.c:323
int eof
Definition: ffplay.c:281
#define RGB_TO_Y_CCIR(r, g, b)
Definition: colorspace.h:99
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:579
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:330
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:433
double duration
Definition: ffplay.c:152
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:88
int eof_reached
true if eof reached
Definition: avio.h:96
#define NAN
Definition: math.h:28
int len
int channels
number of audio channels
Definition: avcodec.h:1984
#define av_log2
Definition: intmath.h:105
int64_t av_frame_get_pkt_pos(const AVFrame *frame)
unsigned int audio_buf1_size
Definition: ffplay.c:242
SDL_Thread * read_tid
Definition: ffplay.c:198
AVPacket pkt
Definition: ffplay.c:182
int frame_size
Definition: ffplay.c:132
void av_log_set_flags(int arg)
Definition: log.c:373
int64_t start_pts
Definition: ffplay.c:190
int abort_request
Definition: ffplay.c:200
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:770
AVFilterContext * out_audio_filter
Definition: ffplay.c:292
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:415
double last_updated
Definition: ffplay.c:139
Decoder viddec
Definition: ffplay.c:222
AVDictionary * swr_opts
Definition: cmdutils.c:65
int height
Definition: ffplay.c:284
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:191
int flags2
CODEC_FLAG2_*.
Definition: avcodec.h:1340
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:527
An instance of a filter.
Definition: avfilter.h:633
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1159
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1316
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1309
int height
Definition: frame.h:212
static const char * video_codec_name
Definition: ffplay.c:334
#define MAX_QUEUE_SIZE
Definition: ffplay.c:67
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3058
PacketQueue * queue
Definition: ffplay.c:184
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:581
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:689
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:72
static int subtitle_thread(void *arg)
Definition: ffplay.c:2316
FrameQueue subpq
Definition: ffplay.c:218
static int nb_vfilters
Definition: ffplay.c:340
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:169
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1458
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:166
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:300
#define FF_QUIT_EVENT
Definition: ffplay.c:352
int xleft
Definition: ffplay.c:284
#define FFSWAP(type, a, b)
Definition: common.h:69
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:1950
int stream_index
Definition: avcodec.h:1162
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:837
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:97
int subtitle_stream
Definition: ffplay.c:266
unsigned int audio_buf_size
Definition: ffplay.c:241
int64_t seek_rel
Definition: ffplay.c:208
int realtime
Definition: ffplay.c:211
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:207
#define YUVA_IN(y, u, v, a, s, pal)
Definition: ffplay.c:840
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:860
static void video_display(VideoState *is)
Definition: ffplay.c:1383
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:314
SDL_Overlay * bmp
Definition: ffplay.c:154
static int show_status
Definition: ffplay.c:317
static int compute_mod(int a, int b)
Definition: ffplay.c:1142
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
Definition: avcodec.h:1137
void av_register_all(void)
Initialize libavformat and register all the muxers, demuxers and protocols.
Definition: allformats.c:51
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:368
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2536
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:217
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1433
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3316
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1153
static int fs_screen_height
Definition: ffplay.c:306
double last_vis_time
Definition: ffplay.c:264
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:887
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:241
#define av_unused
Definition: attributes.h:118
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:154
AVFormatContext * ic
Definition: ffplay.c:210
static int width
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:705
static int16_t block[64]
Definition: dct-test.c:110