• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

ffplay.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2003 Fabrice Bellard
00003  *
00004  * This file is part of FFmpeg.
00005  *
00006  * FFmpeg is free software; you can redistribute it and/or
00007  * modify it under the terms of the GNU Lesser General Public
00008  * License as published by the Free Software Foundation; either
00009  * version 2.1 of the License, or (at your option) any later version.
00010  *
00011  * FFmpeg is distributed in the hope that it will be useful,
00012  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * Lesser General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU Lesser General Public
00017  * License along with FFmpeg; if not, write to the Free Software
00018  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00019  */
00020 
00026 #include "config.h"
00027 #include <inttypes.h>
00028 #include <math.h>
00029 #include <limits.h>
00030 #include <signal.h>
00031 #include "libavutil/avstring.h"
00032 #include "libavutil/colorspace.h"
00033 #include "libavutil/mathematics.h"
00034 #include "libavutil/pixdesc.h"
00035 #include "libavutil/imgutils.h"
00036 #include "libavutil/dict.h"
00037 #include "libavutil/parseutils.h"
00038 #include "libavutil/samplefmt.h"
00039 #include "libavutil/avassert.h"
00040 #include "libavformat/avformat.h"
00041 #include "libavdevice/avdevice.h"
00042 #include "libswscale/swscale.h"
00043 #include "libavcodec/audioconvert.h"
00044 #include "libavutil/opt.h"
00045 #include "libavcodec/avfft.h"
00046 #include "libswresample/swresample.h"
00047 
00048 #if CONFIG_AVFILTER
00049 # include "libavfilter/avcodec.h"
00050 # include "libavfilter/avfilter.h"
00051 # include "libavfilter/avfiltergraph.h"
00052 # include "libavfilter/buffersink.h"
00053 #endif
00054 
00055 #include <SDL.h>
00056 #include <SDL_thread.h>
00057 
00058 #include "cmdutils.h"
00059 
00060 #include <unistd.h>
00061 #include <assert.h>
00062 
00063 const char program_name[] = "ffplay";
00064 const int program_birth_year = 2003;
00065 
00066 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00067 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00068 #define MIN_FRAMES 5
00069 
00070 /* SDL audio buffer size, in samples. Should be small to have precise
00071    A/V sync as SDL does not have hardware buffer fullness info. */
00072 #define SDL_AUDIO_BUFFER_SIZE 1024
00073 
00074 /* no AV sync correction is done if below the AV sync threshold */
00075 #define AV_SYNC_THRESHOLD 0.01
00076 /* no AV correction is done if too big error */
00077 #define AV_NOSYNC_THRESHOLD 10.0
00078 
00079 /* maximum audio speed change to get correct sync */
00080 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00081 
00082 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
00083 #define AUDIO_DIFF_AVG_NB   20
00084 
00085 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
00086 #define SAMPLE_ARRAY_SIZE (2 * 65536)
00087 
00088 static int sws_flags = SWS_BICUBIC;
00089 
00090 typedef struct PacketQueue {
00091     AVPacketList *first_pkt, *last_pkt;
00092     int nb_packets;
00093     int size;
00094     int abort_request;
00095     SDL_mutex *mutex;
00096     SDL_cond *cond;
00097 } PacketQueue;
00098 
00099 #define VIDEO_PICTURE_QUEUE_SIZE 2
00100 #define SUBPICTURE_QUEUE_SIZE 4
00101 
00102 typedef struct VideoPicture {
00103     double pts;                                  
00104     double duration;                             
00105     int64_t pos;                                 
00106     int skip;
00107     SDL_Overlay *bmp;
00108     int width, height; /* source height & width */
00109     int allocated;
00110     int reallocate;
00111     enum PixelFormat pix_fmt;
00112 
00113 #if CONFIG_AVFILTER
00114     AVFilterBufferRef *picref;
00115 #endif
00116 } VideoPicture;
00117 
00118 typedef struct SubPicture {
00119     double pts; /* presentation time stamp for this picture */
00120     AVSubtitle sub;
00121 } SubPicture;
00122 
00123 enum {
00124     AV_SYNC_AUDIO_MASTER, /* default choice */
00125     AV_SYNC_VIDEO_MASTER,
00126     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
00127 };
00128 
00129 typedef struct VideoState {
00130     SDL_Thread *read_tid;
00131     SDL_Thread *video_tid;
00132     SDL_Thread *refresh_tid;
00133     AVInputFormat *iformat;
00134     int no_background;
00135     int abort_request;
00136     int paused;
00137     int last_paused;
00138     int seek_req;
00139     int seek_flags;
00140     int64_t seek_pos;
00141     int64_t seek_rel;
00142     int read_pause_return;
00143     AVFormatContext *ic;
00144 
00145     int audio_stream;
00146 
00147     int av_sync_type;
00148     double external_clock; /* external clock base */
00149     int64_t external_clock_time;
00150 
00151     double audio_clock;
00152     double audio_diff_cum; /* used for AV difference average computation */
00153     double audio_diff_avg_coef;
00154     double audio_diff_threshold;
00155     int audio_diff_avg_count;
00156     AVStream *audio_st;
00157     PacketQueue audioq;
00158     int audio_hw_buf_size;
00159     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[AVCODEC_MAX_AUDIO_FRAME_SIZE * 4];
00160     uint8_t silence_buf[SDL_AUDIO_BUFFER_SIZE];
00161     uint8_t *audio_buf;
00162     uint8_t *audio_buf1;
00163     unsigned int audio_buf_size; /* in bytes */
00164     int audio_buf_index; /* in bytes */
00165     int audio_write_buf_size;
00166     AVPacket audio_pkt_temp;
00167     AVPacket audio_pkt;
00168     enum AVSampleFormat audio_src_fmt;
00169     enum AVSampleFormat audio_tgt_fmt;
00170     int audio_src_channels;
00171     int audio_tgt_channels;
00172     int64_t audio_src_channel_layout;
00173     int64_t audio_tgt_channel_layout;
00174     int audio_src_freq;
00175     int audio_tgt_freq;
00176     struct SwrContext *swr_ctx;
00177     double audio_current_pts;
00178     double audio_current_pts_drift;
00179     int frame_drops_early;
00180     int frame_drops_late;
00181     AVFrame *frame;
00182 
00183     enum ShowMode {
00184         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
00185     } show_mode;
00186     int16_t sample_array[SAMPLE_ARRAY_SIZE];
00187     int sample_array_index;
00188     int last_i_start;
00189     RDFTContext *rdft;
00190     int rdft_bits;
00191     FFTSample *rdft_data;
00192     int xpos;
00193 
00194     SDL_Thread *subtitle_tid;
00195     int subtitle_stream;
00196     int subtitle_stream_changed;
00197     AVStream *subtitle_st;
00198     PacketQueue subtitleq;
00199     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00200     int subpq_size, subpq_rindex, subpq_windex;
00201     SDL_mutex *subpq_mutex;
00202     SDL_cond *subpq_cond;
00203 
00204     double frame_timer;
00205     double frame_last_pts;
00206     double frame_last_duration;
00207     double frame_last_dropped_pts;
00208     double frame_last_returned_time;
00209     double frame_last_filter_delay;
00210     int64_t frame_last_dropped_pos;
00211     double video_clock;                          
00212     int video_stream;
00213     AVStream *video_st;
00214     PacketQueue videoq;
00215     double video_current_pts;                    
00216     double video_current_pts_drift;              
00217     int64_t video_current_pos;                   
00218     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00219     int pictq_size, pictq_rindex, pictq_windex;
00220     SDL_mutex *pictq_mutex;
00221     SDL_cond *pictq_cond;
00222 #if !CONFIG_AVFILTER
00223     struct SwsContext *img_convert_ctx;
00224 #endif
00225 
00226     char filename[1024];
00227     int width, height, xleft, ytop;
00228     int step;
00229 
00230 #if CONFIG_AVFILTER
00231     AVFilterContext *out_video_filter;          
00232 #endif
00233 
00234     int refresh;
00235 } VideoState;
00236 
00237 static int opt_help(const char *opt, const char *arg);
00238 
00239 /* options specified by the user */
00240 static AVInputFormat *file_iformat;
00241 static const char *input_filename;
00242 static const char *window_title;
00243 static int fs_screen_width;
00244 static int fs_screen_height;
00245 static int screen_width  = 0;
00246 static int screen_height = 0;
00247 static int audio_disable;
00248 static int video_disable;
00249 static int wanted_stream[AVMEDIA_TYPE_NB] = {
00250     [AVMEDIA_TYPE_AUDIO]    = -1,
00251     [AVMEDIA_TYPE_VIDEO]    = -1,
00252     [AVMEDIA_TYPE_SUBTITLE] = -1,
00253 };
00254 static int seek_by_bytes = -1;
00255 static int display_disable;
00256 static int show_status = 1;
00257 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00258 static int64_t start_time = AV_NOPTS_VALUE;
00259 static int64_t duration = AV_NOPTS_VALUE;
00260 static int workaround_bugs = 1;
00261 static int fast = 0;
00262 static int genpts = 0;
00263 static int lowres = 0;
00264 static int idct = FF_IDCT_AUTO;
00265 static enum AVDiscard skip_frame       = AVDISCARD_DEFAULT;
00266 static enum AVDiscard skip_idct        = AVDISCARD_DEFAULT;
00267 static enum AVDiscard skip_loop_filter = AVDISCARD_DEFAULT;
00268 static int error_concealment = 3;
00269 static int decoder_reorder_pts = -1;
00270 static int autoexit;
00271 static int exit_on_keydown;
00272 static int exit_on_mousedown;
00273 static int loop = 1;
00274 static int framedrop = -1;
00275 static enum ShowMode show_mode = SHOW_MODE_NONE;
00276 static const char *audio_codec_name;
00277 static const char *subtitle_codec_name;
00278 static const char *video_codec_name;
00279 static int rdftspeed = 20;
00280 #if CONFIG_AVFILTER
00281 static char *vfilters = NULL;
00282 #endif
00283 
00284 /* current context */
00285 static int is_full_screen;
00286 static int64_t audio_callback_time;
00287 
00288 static AVPacket flush_pkt;
00289 
00290 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
00291 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00292 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
00293 
00294 static SDL_Surface *screen;
00295 
00296 void av_noreturn exit_program(int ret)
00297 {
00298     exit(ret);
00299 }
00300 
00301 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00302 {
00303     AVPacketList *pkt1;
00304 
00305     /* duplicate the packet */
00306     if (pkt != &flush_pkt && av_dup_packet(pkt) < 0)
00307         return -1;
00308 
00309     pkt1 = av_malloc(sizeof(AVPacketList));
00310     if (!pkt1)
00311         return -1;
00312     pkt1->pkt = *pkt;
00313     pkt1->next = NULL;
00314 
00315 
00316     SDL_LockMutex(q->mutex);
00317 
00318     if (!q->last_pkt)
00319 
00320         q->first_pkt = pkt1;
00321     else
00322         q->last_pkt->next = pkt1;
00323     q->last_pkt = pkt1;
00324     q->nb_packets++;
00325     q->size += pkt1->pkt.size + sizeof(*pkt1);
00326     /* XXX: should duplicate packet data in DV case */
00327     SDL_CondSignal(q->cond);
00328 
00329     SDL_UnlockMutex(q->mutex);
00330     return 0;
00331 }
00332 
00333 /* packet queue handling */
00334 static void packet_queue_init(PacketQueue *q)
00335 {
00336     memset(q, 0, sizeof(PacketQueue));
00337     q->mutex = SDL_CreateMutex();
00338     q->cond = SDL_CreateCond();
00339     packet_queue_put(q, &flush_pkt);
00340 }
00341 
00342 static void packet_queue_flush(PacketQueue *q)
00343 {
00344     AVPacketList *pkt, *pkt1;
00345 
00346     SDL_LockMutex(q->mutex);
00347     for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00348         pkt1 = pkt->next;
00349         av_free_packet(&pkt->pkt);
00350         av_freep(&pkt);
00351     }
00352     q->last_pkt = NULL;
00353     q->first_pkt = NULL;
00354     q->nb_packets = 0;
00355     q->size = 0;
00356     SDL_UnlockMutex(q->mutex);
00357 }
00358 
00359 static void packet_queue_end(PacketQueue *q)
00360 {
00361     packet_queue_flush(q);
00362     SDL_DestroyMutex(q->mutex);
00363     SDL_DestroyCond(q->cond);
00364 }
00365 
00366 static void packet_queue_abort(PacketQueue *q)
00367 {
00368     SDL_LockMutex(q->mutex);
00369 
00370     q->abort_request = 1;
00371 
00372     SDL_CondSignal(q->cond);
00373 
00374     SDL_UnlockMutex(q->mutex);
00375 }
00376 
00377 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
00378 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00379 {
00380     AVPacketList *pkt1;
00381     int ret;
00382 
00383     SDL_LockMutex(q->mutex);
00384 
00385     for (;;) {
00386         if (q->abort_request) {
00387             ret = -1;
00388             break;
00389         }
00390 
00391         pkt1 = q->first_pkt;
00392         if (pkt1) {
00393             q->first_pkt = pkt1->next;
00394             if (!q->first_pkt)
00395                 q->last_pkt = NULL;
00396             q->nb_packets--;
00397             q->size -= pkt1->pkt.size + sizeof(*pkt1);
00398             *pkt = pkt1->pkt;
00399             av_free(pkt1);
00400             ret = 1;
00401             break;
00402         } else if (!block) {
00403             ret = 0;
00404             break;
00405         } else {
00406             SDL_CondWait(q->cond, q->mutex);
00407         }
00408     }
00409     SDL_UnlockMutex(q->mutex);
00410     return ret;
00411 }
00412 
00413 static inline void fill_rectangle(SDL_Surface *screen,
00414                                   int x, int y, int w, int h, int color)
00415 {
00416     SDL_Rect rect;
00417     rect.x = x;
00418     rect.y = y;
00419     rect.w = w;
00420     rect.h = h;
00421     SDL_FillRect(screen, &rect, color);
00422 }
00423 
00424 #define ALPHA_BLEND(a, oldp, newp, s)\
00425 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00426 
00427 #define RGBA_IN(r, g, b, a, s)\
00428 {\
00429     unsigned int v = ((const uint32_t *)(s))[0];\
00430     a = (v >> 24) & 0xff;\
00431     r = (v >> 16) & 0xff;\
00432     g = (v >> 8) & 0xff;\
00433     b = v & 0xff;\
00434 }
00435 
00436 #define YUVA_IN(y, u, v, a, s, pal)\
00437 {\
00438     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00439     a = (val >> 24) & 0xff;\
00440     y = (val >> 16) & 0xff;\
00441     u = (val >> 8) & 0xff;\
00442     v = val & 0xff;\
00443 }
00444 
00445 #define YUVA_OUT(d, y, u, v, a)\
00446 {\
00447     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00448 }
00449 
00450 
00451 #define BPP 1
00452 
00453 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00454 {
00455     int wrap, wrap3, width2, skip2;
00456     int y, u, v, a, u1, v1, a1, w, h;
00457     uint8_t *lum, *cb, *cr;
00458     const uint8_t *p;
00459     const uint32_t *pal;
00460     int dstx, dsty, dstw, dsth;
00461 
00462     dstw = av_clip(rect->w, 0, imgw);
00463     dsth = av_clip(rect->h, 0, imgh);
00464     dstx = av_clip(rect->x, 0, imgw - dstw);
00465     dsty = av_clip(rect->y, 0, imgh - dsth);
00466     lum = dst->data[0] + dsty * dst->linesize[0];
00467     cb  = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00468     cr  = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00469 
00470     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00471     skip2 = dstx >> 1;
00472     wrap = dst->linesize[0];
00473     wrap3 = rect->pict.linesize[0];
00474     p = rect->pict.data[0];
00475     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
00476 
00477     if (dsty & 1) {
00478         lum += dstx;
00479         cb += skip2;
00480         cr += skip2;
00481 
00482         if (dstx & 1) {
00483             YUVA_IN(y, u, v, a, p, pal);
00484             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00485             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00486             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00487             cb++;
00488             cr++;
00489             lum++;
00490             p += BPP;
00491         }
00492         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00493             YUVA_IN(y, u, v, a, p, pal);
00494             u1 = u;
00495             v1 = v;
00496             a1 = a;
00497             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00498 
00499             YUVA_IN(y, u, v, a, p + BPP, pal);
00500             u1 += u;
00501             v1 += v;
00502             a1 += a;
00503             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00504             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00505             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00506             cb++;
00507             cr++;
00508             p += 2 * BPP;
00509             lum += 2;
00510         }
00511         if (w) {
00512             YUVA_IN(y, u, v, a, p, pal);
00513             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00514             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00515             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00516             p++;
00517             lum++;
00518         }
00519         p += wrap3 - dstw * BPP;
00520         lum += wrap - dstw - dstx;
00521         cb += dst->linesize[1] - width2 - skip2;
00522         cr += dst->linesize[2] - width2 - skip2;
00523     }
00524     for (h = dsth - (dsty & 1); h >= 2; h -= 2) {
00525         lum += dstx;
00526         cb += skip2;
00527         cr += skip2;
00528 
00529         if (dstx & 1) {
00530             YUVA_IN(y, u, v, a, p, pal);
00531             u1 = u;
00532             v1 = v;
00533             a1 = a;
00534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00535             p += wrap3;
00536             lum += wrap;
00537             YUVA_IN(y, u, v, a, p, pal);
00538             u1 += u;
00539             v1 += v;
00540             a1 += a;
00541             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00542             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00543             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00544             cb++;
00545             cr++;
00546             p += -wrap3 + BPP;
00547             lum += -wrap + 1;
00548         }
00549         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00550             YUVA_IN(y, u, v, a, p, pal);
00551             u1 = u;
00552             v1 = v;
00553             a1 = a;
00554             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00555 
00556             YUVA_IN(y, u, v, a, p + BPP, pal);
00557             u1 += u;
00558             v1 += v;
00559             a1 += a;
00560             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00561             p += wrap3;
00562             lum += wrap;
00563 
00564             YUVA_IN(y, u, v, a, p, pal);
00565             u1 += u;
00566             v1 += v;
00567             a1 += a;
00568             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00569 
00570             YUVA_IN(y, u, v, a, p + BPP, pal);
00571             u1 += u;
00572             v1 += v;
00573             a1 += a;
00574             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00575 
00576             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00577             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00578 
00579             cb++;
00580             cr++;
00581             p += -wrap3 + 2 * BPP;
00582             lum += -wrap + 2;
00583         }
00584         if (w) {
00585             YUVA_IN(y, u, v, a, p, pal);
00586             u1 = u;
00587             v1 = v;
00588             a1 = a;
00589             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00590             p += wrap3;
00591             lum += wrap;
00592             YUVA_IN(y, u, v, a, p, pal);
00593             u1 += u;
00594             v1 += v;
00595             a1 += a;
00596             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00597             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00598             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00599             cb++;
00600             cr++;
00601             p += -wrap3 + BPP;
00602             lum += -wrap + 1;
00603         }
00604         p += wrap3 + (wrap3 - dstw * BPP);
00605         lum += wrap + (wrap - dstw - dstx);
00606         cb += dst->linesize[1] - width2 - skip2;
00607         cr += dst->linesize[2] - width2 - skip2;
00608     }
00609     /* handle odd height */
00610     if (h) {
00611         lum += dstx;
00612         cb += skip2;
00613         cr += skip2;
00614 
00615         if (dstx & 1) {
00616             YUVA_IN(y, u, v, a, p, pal);
00617             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00618             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00619             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00620             cb++;
00621             cr++;
00622             lum++;
00623             p += BPP;
00624         }
00625         for (w = dstw - (dstx & 1); w >= 2; w -= 2) {
00626             YUVA_IN(y, u, v, a, p, pal);
00627             u1 = u;
00628             v1 = v;
00629             a1 = a;
00630             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00631 
00632             YUVA_IN(y, u, v, a, p + BPP, pal);
00633             u1 += u;
00634             v1 += v;
00635             a1 += a;
00636             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00637             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00638             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00639             cb++;
00640             cr++;
00641             p += 2 * BPP;
00642             lum += 2;
00643         }
00644         if (w) {
00645             YUVA_IN(y, u, v, a, p, pal);
00646             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00647             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00648             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00649         }
00650     }
00651 }
00652 
00653 static void free_subpicture(SubPicture *sp)
00654 {
00655     avsubtitle_free(&sp->sub);
00656 }
00657 
00658 static void video_image_display(VideoState *is)
00659 {
00660     VideoPicture *vp;
00661     SubPicture *sp;
00662     AVPicture pict;
00663     float aspect_ratio;
00664     int width, height, x, y;
00665     SDL_Rect rect;
00666     int i;
00667 
00668     vp = &is->pictq[is->pictq_rindex];
00669     if (vp->bmp) {
00670 #if CONFIG_AVFILTER
00671          if (vp->picref->video->sample_aspect_ratio.num == 0)
00672              aspect_ratio = 0;
00673          else
00674              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
00675 #else
00676 
00677         /* XXX: use variable in the frame */
00678         if (is->video_st->sample_aspect_ratio.num)
00679             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00680         else if (is->video_st->codec->sample_aspect_ratio.num)
00681             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00682         else
00683             aspect_ratio = 0;
00684 #endif
00685         if (aspect_ratio <= 0.0)
00686             aspect_ratio = 1.0;
00687         aspect_ratio *= (float)vp->width / (float)vp->height;
00688 
00689         if (is->subtitle_st) {
00690             if (is->subpq_size > 0) {
00691                 sp = &is->subpq[is->subpq_rindex];
00692 
00693                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
00694                     SDL_LockYUVOverlay (vp->bmp);
00695 
00696                     pict.data[0] = vp->bmp->pixels[0];
00697                     pict.data[1] = vp->bmp->pixels[2];
00698                     pict.data[2] = vp->bmp->pixels[1];
00699 
00700                     pict.linesize[0] = vp->bmp->pitches[0];
00701                     pict.linesize[1] = vp->bmp->pitches[2];
00702                     pict.linesize[2] = vp->bmp->pitches[1];
00703 
00704                     for (i = 0; i < sp->sub.num_rects; i++)
00705                         blend_subrect(&pict, sp->sub.rects[i],
00706                                       vp->bmp->w, vp->bmp->h);
00707 
00708                     SDL_UnlockYUVOverlay (vp->bmp);
00709                 }
00710             }
00711         }
00712 
00713 
00714         /* XXX: we suppose the screen has a 1.0 pixel ratio */
00715         height = is->height;
00716         width = ((int)rint(height * aspect_ratio)) & ~1;
00717         if (width > is->width) {
00718             width = is->width;
00719             height = ((int)rint(width / aspect_ratio)) & ~1;
00720         }
00721         x = (is->width - width) / 2;
00722         y = (is->height - height) / 2;
00723         is->no_background = 0;
00724         rect.x = is->xleft + x;
00725         rect.y = is->ytop  + y;
00726         rect.w = FFMAX(width,  1);
00727         rect.h = FFMAX(height, 1);
00728         SDL_DisplayYUVOverlay(vp->bmp, &rect);
00729     }
00730 }
00731 
00732 static inline int compute_mod(int a, int b)
00733 {
00734     return a < 0 ? a%b + b : a%b;
00735 }
00736 
00737 static void video_audio_display(VideoState *s)
00738 {
00739     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00740     int ch, channels, h, h2, bgcolor, fgcolor;
00741     int16_t time_diff;
00742     int rdft_bits, nb_freq;
00743 
00744     for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
00745         ;
00746     nb_freq = 1 << (rdft_bits - 1);
00747 
00748     /* compute display index : center on currently output samples */
00749     channels = s->audio_tgt_channels;
00750     nb_display_channels = channels;
00751     if (!s->paused) {
00752         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
00753         n = 2 * channels;
00754         delay = s->audio_write_buf_size;
00755         delay /= n;
00756 
00757         /* to be more precise, we take into account the time spent since
00758            the last buffer computation */
00759         if (audio_callback_time) {
00760             time_diff = av_gettime() - audio_callback_time;
00761             delay -= (time_diff * s->audio_tgt_freq) / 1000000;
00762         }
00763 
00764         delay += 2 * data_used;
00765         if (delay < data_used)
00766             delay = data_used;
00767 
00768         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00769         if (s->show_mode == SHOW_MODE_WAVES) {
00770             h = INT_MIN;
00771             for (i = 0; i < 1000; i += channels) {
00772                 int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00773                 int a = s->sample_array[idx];
00774                 int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
00775                 int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
00776                 int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
00777                 int score = a - d;
00778                 if (h < score && (b ^ c) < 0) {
00779                     h = score;
00780                     i_start = idx;
00781                 }
00782             }
00783         }
00784 
00785         s->last_i_start = i_start;
00786     } else {
00787         i_start = s->last_i_start;
00788     }
00789 
00790     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00791     if (s->show_mode == SHOW_MODE_WAVES) {
00792         fill_rectangle(screen,
00793                        s->xleft, s->ytop, s->width, s->height,
00794                        bgcolor);
00795 
00796         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00797 
00798         /* total height for one channel */
00799         h = s->height / nb_display_channels;
00800         /* graph height / 2 */
00801         h2 = (h * 9) / 20;
00802         for (ch = 0; ch < nb_display_channels; ch++) {
00803             i = i_start + ch;
00804             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
00805             for (x = 0; x < s->width; x++) {
00806                 y = (s->sample_array[i] * h2) >> 15;
00807                 if (y < 0) {
00808                     y = -y;
00809                     ys = y1 - y;
00810                 } else {
00811                     ys = y1;
00812                 }
00813                 fill_rectangle(screen,
00814                                s->xleft + x, ys, 1, y,
00815                                fgcolor);
00816                 i += channels;
00817                 if (i >= SAMPLE_ARRAY_SIZE)
00818                     i -= SAMPLE_ARRAY_SIZE;
00819             }
00820         }
00821 
00822         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00823 
00824         for (ch = 1; ch < nb_display_channels; ch++) {
00825             y = s->ytop + ch * h;
00826             fill_rectangle(screen,
00827                            s->xleft, y, s->width, 1,
00828                            fgcolor);
00829         }
00830         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00831     } else {
00832         nb_display_channels= FFMIN(nb_display_channels, 2);
00833         if (rdft_bits != s->rdft_bits) {
00834             av_rdft_end(s->rdft);
00835             av_free(s->rdft_data);
00836             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00837             s->rdft_bits = rdft_bits;
00838             s->rdft_data = av_malloc(4 * nb_freq * sizeof(*s->rdft_data));
00839         }
00840         {
00841             FFTSample *data[2];
00842             for (ch = 0; ch < nb_display_channels; ch++) {
00843                 data[ch] = s->rdft_data + 2 * nb_freq * ch;
00844                 i = i_start + ch;
00845                 for (x = 0; x < 2 * nb_freq; x++) {
00846                     double w = (x-nb_freq) * (1.0 / nb_freq);
00847                     data[ch][x] = s->sample_array[i] * (1.0 - w * w);
00848                     i += channels;
00849                     if (i >= SAMPLE_ARRAY_SIZE)
00850                         i -= SAMPLE_ARRAY_SIZE;
00851                 }
00852                 av_rdft_calc(s->rdft, data[ch]);
00853             }
00854             // least efficient way to do this, we should of course directly access it but its more than fast enough
00855             for (y = 0; y < s->height; y++) {
00856                 double w = 1 / sqrt(nb_freq);
00857                 int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
00858                 int b = (nb_display_channels == 2 ) ? sqrt(w * sqrt(data[1][2 * y + 0] * data[1][2 * y + 0]
00859                        + data[1][2 * y + 1] * data[1][2 * y + 1])) : a;
00860                 a = FFMIN(a, 255);
00861                 b = FFMIN(b, 255);
00862                 fgcolor = SDL_MapRGB(screen->format, a, b, (a + b) / 2);
00863 
00864                 fill_rectangle(screen,
00865                             s->xpos, s->height-y, 1, 1,
00866                             fgcolor);
00867             }
00868         }
00869         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00870         s->xpos++;
00871         if (s->xpos >= s->width)
00872             s->xpos= s->xleft;
00873     }
00874 }
00875 
00876 static void stream_close(VideoState *is)
00877 {
00878     VideoPicture *vp;
00879     int i;
00880     /* XXX: use a special url_shutdown call to abort parse cleanly */
00881     is->abort_request = 1;
00882     SDL_WaitThread(is->read_tid, NULL);
00883     SDL_WaitThread(is->refresh_tid, NULL);
00884 
00885     /* free all pictures */
00886     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
00887         vp = &is->pictq[i];
00888 #if CONFIG_AVFILTER
00889         if (vp->picref) {
00890             avfilter_unref_buffer(vp->picref);
00891             vp->picref = NULL;
00892         }
00893 #endif
00894         if (vp->bmp) {
00895             SDL_FreeYUVOverlay(vp->bmp);
00896             vp->bmp = NULL;
00897         }
00898     }
00899     SDL_DestroyMutex(is->pictq_mutex);
00900     SDL_DestroyCond(is->pictq_cond);
00901     SDL_DestroyMutex(is->subpq_mutex);
00902     SDL_DestroyCond(is->subpq_cond);
00903 #if !CONFIG_AVFILTER
00904     if (is->img_convert_ctx)
00905         sws_freeContext(is->img_convert_ctx);
00906 #endif
00907     av_free(is);
00908 }
00909 
00910 static void do_exit(VideoState *is)
00911 {
00912     if (is) {
00913         stream_close(is);
00914     }
00915     av_lockmgr_register(NULL);
00916     uninit_opts();
00917 #if CONFIG_AVFILTER
00918     avfilter_uninit();
00919 #endif
00920     avformat_network_deinit();
00921     if (show_status)
00922         printf("\n");
00923     SDL_Quit();
00924     av_log(NULL, AV_LOG_QUIET, "%s", "");
00925     exit(0);
00926 }
00927 
00928 static void sigterm_handler(int sig)
00929 {
00930     exit(123);
00931 }
00932 
00933 static int video_open(VideoState *is, int force_set_video_mode)
00934 {
00935     int flags = SDL_HWSURFACE | SDL_ASYNCBLIT | SDL_HWACCEL;
00936     int w,h;
00937 
00938     if (is_full_screen) flags |= SDL_FULLSCREEN;
00939     else                flags |= SDL_RESIZABLE;
00940 
00941     if (is_full_screen && fs_screen_width) {
00942         w = fs_screen_width;
00943         h = fs_screen_height;
00944     } else if (!is_full_screen && screen_width) {
00945         w = screen_width;
00946         h = screen_height;
00947 #if CONFIG_AVFILTER
00948     } else if (is->out_video_filter && is->out_video_filter->inputs[0]) {
00949         w = is->out_video_filter->inputs[0]->w;
00950         h = is->out_video_filter->inputs[0]->h;
00951 #else
00952     } else if (is->video_st && is->video_st->codec->width) {
00953         w = is->video_st->codec->width;
00954         h = is->video_st->codec->height;
00955 #endif
00956     } else {
00957         w = 640;
00958         h = 480;
00959     }
00960     if (screen && is->width == screen->w && screen->w == w
00961        && is->height== screen->h && screen->h == h && !force_set_video_mode)
00962         return 0;
00963     screen = SDL_SetVideoMode(w, h, 0, flags);
00964     if (!screen) {
00965         fprintf(stderr, "SDL: could not set video mode - exiting\n");
00966         do_exit(is);
00967     }
00968     if (!window_title)
00969         window_title = input_filename;
00970     SDL_WM_SetCaption(window_title, window_title);
00971 
00972     is->width  = screen->w;
00973     is->height = screen->h;
00974 
00975     return 0;
00976 }
00977 
00978 /* display the current picture, if any */
00979 static void video_display(VideoState *is)
00980 {
00981     if (!screen)
00982         video_open(is, 0);
00983     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
00984         video_audio_display(is);
00985     else if (is->video_st)
00986         video_image_display(is);
00987 }
00988 
00989 static int refresh_thread(void *opaque)
00990 {
00991     VideoState *is= opaque;
00992     while (!is->abort_request) {
00993         SDL_Event event;
00994         event.type = FF_REFRESH_EVENT;
00995         event.user.data1 = opaque;
00996         if (!is->refresh) {
00997             is->refresh = 1;
00998             SDL_PushEvent(&event);
00999         }
01000         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
01001         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
01002     }
01003     return 0;
01004 }
01005 
01006 /* get the current audio clock value */
01007 static double get_audio_clock(VideoState *is)
01008 {
01009     if (is->paused) {
01010         return is->audio_current_pts;
01011     } else {
01012         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
01013     }
01014 }
01015 
01016 /* get the current video clock value */
01017 static double get_video_clock(VideoState *is)
01018 {
01019     if (is->paused) {
01020         return is->video_current_pts;
01021     } else {
01022         return is->video_current_pts_drift + av_gettime() / 1000000.0;
01023     }
01024 }
01025 
01026 /* get the current external clock value */
01027 static double get_external_clock(VideoState *is)
01028 {
01029     int64_t ti;
01030     ti = av_gettime();
01031     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
01032 }
01033 
01034 /* get the current master clock value */
01035 static double get_master_clock(VideoState *is)
01036 {
01037     double val;
01038 
01039     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
01040         if (is->video_st)
01041             val = get_video_clock(is);
01042         else
01043             val = get_audio_clock(is);
01044     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01045         if (is->audio_st)
01046             val = get_audio_clock(is);
01047         else
01048             val = get_video_clock(is);
01049     } else {
01050         val = get_external_clock(is);
01051     }
01052     return val;
01053 }
01054 
01055 /* seek in the stream */
01056 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01057 {
01058     if (!is->seek_req) {
01059         is->seek_pos = pos;
01060         is->seek_rel = rel;
01061         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01062         if (seek_by_bytes)
01063             is->seek_flags |= AVSEEK_FLAG_BYTE;
01064         is->seek_req = 1;
01065     }
01066 }
01067 
01068 /* pause or resume the video */
01069 static void stream_toggle_pause(VideoState *is)
01070 {
01071     if (is->paused) {
01072         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01073         if (is->read_pause_return != AVERROR(ENOSYS)) {
01074             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01075         }
01076         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01077     }
01078     is->paused = !is->paused;
01079 }
01080 
01081 static double compute_target_delay(double delay, VideoState *is)
01082 {
01083     double sync_threshold, diff;
01084 
01085     /* update delay to follow master synchronisation source */
01086     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01087          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01088         /* if video is slave, we try to correct big delays by
01089            duplicating or deleting a frame */
01090         diff = get_video_clock(is) - get_master_clock(is);
01091 
01092         /* skip or repeat frame. We take into account the
01093            delay to compute the threshold. I still don't know
01094            if it is the best guess */
01095         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01096         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01097             if (diff <= -sync_threshold)
01098                 delay = 0;
01099             else if (diff >= sync_threshold)
01100                 delay = 2 * delay;
01101         }
01102     }
01103 
01104     av_dlog(NULL, "video: delay=%0.3f A-V=%f\n",
01105             delay, -diff);
01106 
01107     return delay;
01108 }
01109 
01110 static void pictq_next_picture(VideoState *is) {
01111     /* update queue size and signal for next picture */
01112     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01113         is->pictq_rindex = 0;
01114 
01115     SDL_LockMutex(is->pictq_mutex);
01116     is->pictq_size--;
01117     SDL_CondSignal(is->pictq_cond);
01118     SDL_UnlockMutex(is->pictq_mutex);
01119 }
01120 
01121 static void update_video_pts(VideoState *is, double pts, int64_t pos) {
01122     double time = av_gettime() / 1000000.0;
01123     /* update current video pts */
01124     is->video_current_pts = pts;
01125     is->video_current_pts_drift = is->video_current_pts - time;
01126     is->video_current_pos = pos;
01127     is->frame_last_pts = pts;
01128 }
01129 
01130 /* called to display each frame */
01131 static void video_refresh(void *opaque)
01132 {
01133     VideoState *is = opaque;
01134     VideoPicture *vp;
01135     double time;
01136 
01137     SubPicture *sp, *sp2;
01138 
01139     if (is->video_st) {
01140 retry:
01141         if (is->pictq_size == 0) {
01142             SDL_LockMutex(is->pictq_mutex);
01143             if (is->frame_last_dropped_pts != AV_NOPTS_VALUE && is->frame_last_dropped_pts > is->frame_last_pts) {
01144                 update_video_pts(is, is->frame_last_dropped_pts, is->frame_last_dropped_pos);
01145                 is->frame_last_dropped_pts = AV_NOPTS_VALUE;
01146             }
01147             SDL_UnlockMutex(is->pictq_mutex);
01148             // nothing to do, no picture to display in the que
01149         } else {
01150             double last_duration, duration, delay;
01151             /* dequeue the picture */
01152             vp = &is->pictq[is->pictq_rindex];
01153 
01154             if (vp->skip) {
01155                 pictq_next_picture(is);
01156                 goto retry;
01157             }
01158 
01159             /* compute nominal last_duration */
01160             last_duration = vp->pts - is->frame_last_pts;
01161             if (last_duration > 0 && last_duration < 10.0) {
01162                 /* if duration of the last frame was sane, update last_duration in video state */
01163                 is->frame_last_duration = last_duration;
01164             }
01165             delay = compute_target_delay(is->frame_last_duration, is);
01166 
01167             time= av_gettime()/1000000.0;
01168             if (time < is->frame_timer + delay)
01169                 return;
01170 
01171             if (delay > 0)
01172                 is->frame_timer += delay * FFMAX(1, floor((time-is->frame_timer) / delay));
01173 
01174             SDL_LockMutex(is->pictq_mutex);
01175             update_video_pts(is, vp->pts, vp->pos);
01176             SDL_UnlockMutex(is->pictq_mutex);
01177 
01178             if (is->pictq_size > 1) {
01179                 VideoPicture *nextvp = &is->pictq[(is->pictq_rindex + 1) % VIDEO_PICTURE_QUEUE_SIZE];
01180                 duration = nextvp->pts - vp->pts; // More accurate this way, 1/time_base is often not reflecting FPS
01181             } else {
01182                 duration = vp->duration;
01183             }
01184 
01185             if((framedrop>0 || (framedrop && is->audio_st)) && time > is->frame_timer + duration){
01186                 if(is->pictq_size > 1){
01187                     is->frame_drops_late++;
01188                     pictq_next_picture(is);
01189                     goto retry;
01190                 }
01191             }
01192 
01193             if (is->subtitle_st) {
01194                 if (is->subtitle_stream_changed) {
01195                     SDL_LockMutex(is->subpq_mutex);
01196 
01197                     while (is->subpq_size) {
01198                         free_subpicture(&is->subpq[is->subpq_rindex]);
01199 
01200                         /* update queue size and signal for next picture */
01201                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01202                             is->subpq_rindex = 0;
01203 
01204                         is->subpq_size--;
01205                     }
01206                     is->subtitle_stream_changed = 0;
01207 
01208                     SDL_CondSignal(is->subpq_cond);
01209                     SDL_UnlockMutex(is->subpq_mutex);
01210                 } else {
01211                     if (is->subpq_size > 0) {
01212                         sp = &is->subpq[is->subpq_rindex];
01213 
01214                         if (is->subpq_size > 1)
01215                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01216                         else
01217                             sp2 = NULL;
01218 
01219                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01220                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01221                         {
01222                             free_subpicture(sp);
01223 
01224                             /* update queue size and signal for next picture */
01225                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01226                                 is->subpq_rindex = 0;
01227 
01228                             SDL_LockMutex(is->subpq_mutex);
01229                             is->subpq_size--;
01230                             SDL_CondSignal(is->subpq_cond);
01231                             SDL_UnlockMutex(is->subpq_mutex);
01232                         }
01233                     }
01234                 }
01235             }
01236 
01237             /* display picture */
01238             if (!display_disable)
01239                 video_display(is);
01240 
01241             pictq_next_picture(is);
01242         }
01243     } else if (is->audio_st) {
01244         /* draw the next audio frame */
01245 
01246         /* if only audio stream, then display the audio bars (better
01247            than nothing, just to test the implementation */
01248 
01249         /* display picture */
01250         if (!display_disable)
01251             video_display(is);
01252     }
01253     if (show_status) {
01254         static int64_t last_time;
01255         int64_t cur_time;
01256         int aqsize, vqsize, sqsize;
01257         double av_diff;
01258 
01259         cur_time = av_gettime();
01260         if (!last_time || (cur_time - last_time) >= 30000) {
01261             aqsize = 0;
01262             vqsize = 0;
01263             sqsize = 0;
01264             if (is->audio_st)
01265                 aqsize = is->audioq.size;
01266             if (is->video_st)
01267                 vqsize = is->videoq.size;
01268             if (is->subtitle_st)
01269                 sqsize = is->subtitleq.size;
01270             av_diff = 0;
01271             if (is->audio_st && is->video_st)
01272                 av_diff = get_audio_clock(is) - get_video_clock(is);
01273             printf("%7.2f A-V:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
01274                    get_master_clock(is),
01275                    av_diff,
01276                    is->frame_drops_early + is->frame_drops_late,
01277                    aqsize / 1024,
01278                    vqsize / 1024,
01279                    sqsize,
01280                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
01281                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
01282             fflush(stdout);
01283             last_time = cur_time;
01284         }
01285     }
01286 }
01287 
01288 /* allocate a picture (needs to do that in main thread to avoid
01289    potential locking problems */
01290 static void alloc_picture(void *opaque)
01291 {
01292     VideoState *is = opaque;
01293     VideoPicture *vp;
01294 
01295     vp = &is->pictq[is->pictq_windex];
01296 
01297     if (vp->bmp)
01298         SDL_FreeYUVOverlay(vp->bmp);
01299 
01300 #if CONFIG_AVFILTER
01301     if (vp->picref)
01302         avfilter_unref_buffer(vp->picref);
01303     vp->picref = NULL;
01304 
01305     vp->width   = is->out_video_filter->inputs[0]->w;
01306     vp->height  = is->out_video_filter->inputs[0]->h;
01307     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01308 #else
01309     vp->width   = is->video_st->codec->width;
01310     vp->height  = is->video_st->codec->height;
01311     vp->pix_fmt = is->video_st->codec->pix_fmt;
01312 #endif
01313 
01314     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01315                                    SDL_YV12_OVERLAY,
01316                                    screen);
01317     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
01318         /* SDL allocates a buffer smaller than requested if the video
01319          * overlay hardware is unable to support the requested size. */
01320         fprintf(stderr, "Error: the video system does not support an image\n"
01321                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
01322                         "to reduce the image size.\n", vp->width, vp->height );
01323         do_exit(is);
01324     }
01325 
01326     SDL_LockMutex(is->pictq_mutex);
01327     vp->allocated = 1;
01328     SDL_CondSignal(is->pictq_cond);
01329     SDL_UnlockMutex(is->pictq_mutex);
01330 }
01331 
01332 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01333 {
01334     VideoPicture *vp;
01335     double frame_delay, pts = pts1;
01336 
01337     /* compute the exact PTS for the picture if it is omitted in the stream
01338      * pts1 is the dts of the pkt / pts of the frame */
01339     if (pts != 0) {
01340         /* update video clock with pts, if present */
01341         is->video_clock = pts;
01342     } else {
01343         pts = is->video_clock;
01344     }
01345     /* update video clock for next frame */
01346     frame_delay = av_q2d(is->video_st->codec->time_base);
01347     /* for MPEG2, the frame can be repeated, so we update the
01348        clock accordingly */
01349     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01350     is->video_clock += frame_delay;
01351 
01352 #if defined(DEBUG_SYNC) && 0
01353     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01354            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
01355 #endif
01356 
01357     /* wait until we have space to put a new picture */
01358     SDL_LockMutex(is->pictq_mutex);
01359 
01360     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01361            !is->videoq.abort_request) {
01362         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01363     }
01364     SDL_UnlockMutex(is->pictq_mutex);
01365 
01366     if (is->videoq.abort_request)
01367         return -1;
01368 
01369     vp = &is->pictq[is->pictq_windex];
01370 
01371     vp->duration = frame_delay;
01372 
01373     /* alloc or resize hardware picture buffer */
01374     if (!vp->bmp || vp->reallocate ||
01375 #if CONFIG_AVFILTER
01376         vp->width  != is->out_video_filter->inputs[0]->w ||
01377         vp->height != is->out_video_filter->inputs[0]->h) {
01378 #else
01379         vp->width != is->video_st->codec->width ||
01380         vp->height != is->video_st->codec->height) {
01381 #endif
01382         SDL_Event event;
01383 
01384         vp->allocated  = 0;
01385         vp->reallocate = 0;
01386 
01387         /* the allocation must be done in the main thread to avoid
01388            locking problems */
01389         event.type = FF_ALLOC_EVENT;
01390         event.user.data1 = is;
01391         SDL_PushEvent(&event);
01392 
01393         /* wait until the picture is allocated */
01394         SDL_LockMutex(is->pictq_mutex);
01395         while (!vp->allocated && !is->videoq.abort_request) {
01396             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01397         }
01398         /* if the queue is aborted, we have to pop the pending ALLOC event or wait for the allocation to complete */
01399         if (is->videoq.abort_request && SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_EVENTMASK(FF_ALLOC_EVENT)) != 1) {
01400             while (!vp->allocated) {
01401                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01402             }
01403         }
01404         SDL_UnlockMutex(is->pictq_mutex);
01405 
01406         if (is->videoq.abort_request)
01407             return -1;
01408     }
01409 
01410     /* if the frame is not skipped, then display it */
01411     if (vp->bmp) {
01412         AVPicture pict;
01413 #if CONFIG_AVFILTER
01414         if (vp->picref)
01415             avfilter_unref_buffer(vp->picref);
01416         vp->picref = src_frame->opaque;
01417 #endif
01418 
01419         /* get a pointer on the bitmap */
01420         SDL_LockYUVOverlay (vp->bmp);
01421 
01422         memset(&pict, 0, sizeof(AVPicture));
01423         pict.data[0] = vp->bmp->pixels[0];
01424         pict.data[1] = vp->bmp->pixels[2];
01425         pict.data[2] = vp->bmp->pixels[1];
01426 
01427         pict.linesize[0] = vp->bmp->pitches[0];
01428         pict.linesize[1] = vp->bmp->pitches[2];
01429         pict.linesize[2] = vp->bmp->pitches[1];
01430 
01431 #if CONFIG_AVFILTER
01432         // FIXME use direct rendering
01433         av_picture_copy(&pict, (AVPicture *)src_frame,
01434                         vp->pix_fmt, vp->width, vp->height);
01435 #else
01436         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01437         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01438             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01439             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
01440         if (is->img_convert_ctx == NULL) {
01441             fprintf(stderr, "Cannot initialize the conversion context\n");
01442             exit(1);
01443         }
01444         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01445                   0, vp->height, pict.data, pict.linesize);
01446 #endif
01447         /* update the bitmap content */
01448         SDL_UnlockYUVOverlay(vp->bmp);
01449 
01450         vp->pts = pts;
01451         vp->pos = pos;
01452         vp->skip = 0;
01453 
01454         /* now we can update the picture count */
01455         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01456             is->pictq_windex = 0;
01457         SDL_LockMutex(is->pictq_mutex);
01458         is->pictq_size++;
01459         SDL_UnlockMutex(is->pictq_mutex);
01460     }
01461     return 0;
01462 }
01463 
01464 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01465 {
01466     int got_picture, i;
01467 
01468     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01469         return -1;
01470 
01471     if (pkt->data == flush_pkt.data) {
01472         avcodec_flush_buffers(is->video_st->codec);
01473 
01474         SDL_LockMutex(is->pictq_mutex);
01475         // Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
01476         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01477             is->pictq[i].skip = 1;
01478         }
01479         while (is->pictq_size && !is->videoq.abort_request) {
01480             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01481         }
01482         is->video_current_pos = -1;
01483         is->frame_last_pts = AV_NOPTS_VALUE;
01484         is->frame_last_duration = 0;
01485         is->frame_timer = (double)av_gettime() / 1000000.0;
01486         is->frame_last_dropped_pts = AV_NOPTS_VALUE;
01487         SDL_UnlockMutex(is->pictq_mutex);
01488 
01489         return 0;
01490     }
01491 
01492     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
01493 
01494     if (got_picture) {
01495         int ret = 1;
01496 
01497         if (decoder_reorder_pts == -1) {
01498             *pts = *(int64_t*)av_opt_ptr(avcodec_get_frame_class(), frame, "best_effort_timestamp");
01499         } else if (decoder_reorder_pts) {
01500             *pts = frame->pkt_pts;
01501         } else {
01502             *pts = frame->pkt_dts;
01503         }
01504 
01505         if (*pts == AV_NOPTS_VALUE) {
01506             *pts = 0;
01507         }
01508 
01509         if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) || is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
01510              (framedrop>0 || (framedrop && is->audio_st))) {
01511             SDL_LockMutex(is->pictq_mutex);
01512             if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
01513                 double clockdiff = get_video_clock(is) - get_master_clock(is);
01514                 double dpts = av_q2d(is->video_st->time_base) * *pts;
01515                 double ptsdiff = dpts - is->frame_last_pts;
01516                 if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
01517                      ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
01518                      clockdiff + ptsdiff - is->frame_last_filter_delay < 0) {
01519                     is->frame_last_dropped_pos = pkt->pos;
01520                     is->frame_last_dropped_pts = dpts;
01521                     is->frame_drops_early++;
01522                     ret = 0;
01523                 }
01524             }
01525             SDL_UnlockMutex(is->pictq_mutex);
01526         }
01527 
01528         if (ret)
01529             is->frame_last_returned_time = av_gettime() / 1000000.0;
01530 
01531         return ret;
01532     }
01533     return 0;
01534 }
01535 
01536 #if CONFIG_AVFILTER
01537 typedef struct {
01538     VideoState *is;
01539     AVFrame *frame;
01540     int use_dr1;
01541 } FilterPriv;
01542 
01543 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01544 {
01545     AVFilterContext *ctx = codec->opaque;
01546     AVFilterBufferRef  *ref;
01547     int perms = AV_PERM_WRITE;
01548     int i, w, h, stride[4];
01549     unsigned edge;
01550     int pixel_size;
01551 
01552     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
01553 
01554     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
01555         perms |= AV_PERM_NEG_LINESIZES;
01556 
01557     if (pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01558         if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01559         if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01560         if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01561     }
01562     if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01563 
01564     w = codec->width;
01565     h = codec->height;
01566 
01567     if(av_image_check_size(w, h, 0, codec))
01568         return -1;
01569 
01570     avcodec_align_dimensions2(codec, &w, &h, stride);
01571     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01572     w += edge << 1;
01573     h += edge << 1;
01574     if (codec->pix_fmt != ctx->outputs[0]->format) {
01575         av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
01576         return -1;
01577     }
01578     if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01579         return -1;
01580 
01581     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
01582     ref->video->w = codec->width;
01583     ref->video->h = codec->height;
01584     for (i = 0; i < 4; i ++) {
01585         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
01586         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
01587 
01588         if (ref->data[i]) {
01589             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01590         }
01591         pic->data[i]     = ref->data[i];
01592         pic->linesize[i] = ref->linesize[i];
01593     }
01594     pic->opaque = ref;
01595     pic->type   = FF_BUFFER_TYPE_USER;
01596     pic->reordered_opaque = codec->reordered_opaque;
01597     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
01598     else            pic->pkt_pts = AV_NOPTS_VALUE;
01599     return 0;
01600 }
01601 
01602 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01603 {
01604     memset(pic->data, 0, sizeof(pic->data));
01605     avfilter_unref_buffer(pic->opaque);
01606 }
01607 
01608 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
01609 {
01610     AVFilterBufferRef *ref = pic->opaque;
01611 
01612     if (pic->data[0] == NULL) {
01613         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
01614         return codec->get_buffer(codec, pic);
01615     }
01616 
01617     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
01618         (codec->pix_fmt != ref->format)) {
01619         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
01620         return -1;
01621     }
01622 
01623     pic->reordered_opaque = codec->reordered_opaque;
01624     if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
01625     else            pic->pkt_pts = AV_NOPTS_VALUE;
01626     return 0;
01627 }
01628 
01629 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01630 {
01631     FilterPriv *priv = ctx->priv;
01632     AVCodecContext *codec;
01633     if (!opaque) return -1;
01634 
01635     priv->is = opaque;
01636     codec    = priv->is->video_st->codec;
01637     codec->opaque = ctx;
01638     if (codec->codec->capabilities & CODEC_CAP_DR1) {
01639         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
01640         priv->use_dr1 = 1;
01641         codec->get_buffer     = input_get_buffer;
01642         codec->release_buffer = input_release_buffer;
01643         codec->reget_buffer   = input_reget_buffer;
01644         codec->thread_safe_callbacks = 1;
01645     }
01646 
01647     priv->frame = avcodec_alloc_frame();
01648 
01649     return 0;
01650 }
01651 
01652 static void input_uninit(AVFilterContext *ctx)
01653 {
01654     FilterPriv *priv = ctx->priv;
01655     av_free(priv->frame);
01656 }
01657 
01658 static int input_request_frame(AVFilterLink *link)
01659 {
01660     FilterPriv *priv = link->src->priv;
01661     AVFilterBufferRef *picref;
01662     int64_t pts = 0;
01663     AVPacket pkt;
01664     int ret;
01665 
01666     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01667         av_free_packet(&pkt);
01668     if (ret < 0)
01669         return -1;
01670 
01671     if (priv->use_dr1 && priv->frame->opaque) {
01672         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
01673     } else {
01674         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01675         av_image_copy(picref->data, picref->linesize,
01676                       priv->frame->data, priv->frame->linesize,
01677                       picref->format, link->w, link->h);
01678     }
01679     av_free_packet(&pkt);
01680 
01681     avfilter_copy_frame_props(picref, priv->frame);
01682     picref->pts = pts;
01683 
01684     avfilter_start_frame(link, picref);
01685     avfilter_draw_slice(link, 0, link->h, 1);
01686     avfilter_end_frame(link);
01687 
01688     return 0;
01689 }
01690 
01691 static int input_query_formats(AVFilterContext *ctx)
01692 {
01693     FilterPriv *priv = ctx->priv;
01694     enum PixelFormat pix_fmts[] = {
01695         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01696     };
01697 
01698     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
01699     return 0;
01700 }
01701 
01702 static int input_config_props(AVFilterLink *link)
01703 {
01704     FilterPriv *priv  = link->src->priv;
01705     AVStream *s = priv->is->video_st;
01706 
01707     link->w = s->codec->width;
01708     link->h = s->codec->height;
01709     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
01710         s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
01711     link->time_base = s->time_base;
01712 
01713     return 0;
01714 }
01715 
01716 static AVFilter input_filter =
01717 {
01718     .name      = "ffplay_input",
01719 
01720     .priv_size = sizeof(FilterPriv),
01721 
01722     .init      = input_init,
01723     .uninit    = input_uninit,
01724 
01725     .query_formats = input_query_formats,
01726 
01727     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
01728     .outputs   = (AVFilterPad[]) {{ .name = "default",
01729                                     .type = AVMEDIA_TYPE_VIDEO,
01730                                     .request_frame = input_request_frame,
01731                                     .config_props  = input_config_props, },
01732                                   { .name = NULL }},
01733 };
01734 
01735 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
01736 {
01737     char sws_flags_str[128];
01738     int ret;
01739     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
01740     AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();
01741     AVFilterContext *filt_src = NULL, *filt_out = NULL;
01742     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
01743     graph->scale_sws_opts = av_strdup(sws_flags_str);
01744 
01745     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
01746                                             NULL, is, graph)) < 0)
01747         return ret;
01748 
01749 #if FF_API_OLD_VSINK_API
01750     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
01751                                        NULL, pix_fmts, graph);
01752 #else
01753     buffersink_params->pixel_fmts = pix_fmts;
01754     ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
01755                                        NULL, buffersink_params, graph);
01756 #endif
01757     av_freep(&buffersink_params);
01758     if (ret < 0)
01759         return ret;
01760 
01761     if (vfilters) {
01762         AVFilterInOut *outputs = avfilter_inout_alloc();
01763         AVFilterInOut *inputs  = avfilter_inout_alloc();
01764 
01765         outputs->name    = av_strdup("in");
01766         outputs->filter_ctx = filt_src;
01767         outputs->pad_idx = 0;
01768         outputs->next    = NULL;
01769 
01770         inputs->name    = av_strdup("out");
01771         inputs->filter_ctx = filt_out;
01772         inputs->pad_idx = 0;
01773         inputs->next    = NULL;
01774 
01775         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
01776             return ret;
01777     } else {
01778         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
01779             return ret;
01780     }
01781 
01782     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
01783         return ret;
01784 
01785     is->out_video_filter = filt_out;
01786 
01787     return ret;
01788 }
01789 
01790 #endif  /* CONFIG_AVFILTER */
01791 
01792 static int video_thread(void *arg)
01793 {
01794     VideoState *is = arg;
01795     AVFrame *frame = avcodec_alloc_frame();
01796     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
01797     double pts;
01798     int ret;
01799 
01800 #if CONFIG_AVFILTER
01801     AVFilterGraph *graph = avfilter_graph_alloc();
01802     AVFilterContext *filt_out = NULL;
01803     int last_w = is->video_st->codec->width;
01804     int last_h = is->video_st->codec->height;
01805 
01806     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01807         goto the_end;
01808     filt_out = is->out_video_filter;
01809 #endif
01810 
01811     for (;;) {
01812 #if !CONFIG_AVFILTER
01813         AVPacket pkt;
01814 #else
01815         AVFilterBufferRef *picref;
01816         AVRational tb = filt_out->inputs[0]->time_base;
01817 #endif
01818         while (is->paused && !is->videoq.abort_request)
01819             SDL_Delay(10);
01820 #if CONFIG_AVFILTER
01821         if (   last_w != is->video_st->codec->width
01822             || last_h != is->video_st->codec->height) {
01823             av_log(NULL, AV_LOG_INFO, "Frame changed from size:%dx%d to size:%dx%d\n",
01824                    last_w, last_h, is->video_st->codec->width, is->video_st->codec->height);
01825             avfilter_graph_free(&graph);
01826             graph = avfilter_graph_alloc();
01827             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01828                 goto the_end;
01829             filt_out = is->out_video_filter;
01830             last_w = is->video_st->codec->width;
01831             last_h = is->video_st->codec->height;
01832         }
01833         ret = av_buffersink_get_buffer_ref(filt_out, &picref, 0);
01834         if (picref) {
01835             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
01836             pts_int = picref->pts;
01837             pos     = picref->pos;
01838             frame->opaque = picref;
01839         }
01840 
01841         if (av_cmp_q(tb, is->video_st->time_base)) {
01842             av_unused int64_t pts1 = pts_int;
01843             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
01844             av_dlog(NULL, "video_thread(): "
01845                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
01846                     tb.num, tb.den, pts1,
01847                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
01848         }
01849 #else
01850         ret = get_video_frame(is, frame, &pts_int, &pkt);
01851         pos = pkt.pos;
01852         av_free_packet(&pkt);
01853 #endif
01854 
01855         if (ret < 0)
01856             goto the_end;
01857 
01858         is->frame_last_filter_delay = av_gettime() / 1000000.0 - is->frame_last_returned_time;
01859         if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
01860             is->frame_last_filter_delay = 0;
01861 
01862 #if CONFIG_AVFILTER
01863         if (!picref)
01864             continue;
01865 #endif
01866 
01867         pts = pts_int * av_q2d(is->video_st->time_base);
01868 
01869         ret = queue_picture(is, frame, pts, pos);
01870 
01871         if (ret < 0)
01872             goto the_end;
01873 
01874         if (is->step)
01875             stream_toggle_pause(is);
01876     }
01877  the_end:
01878 #if CONFIG_AVFILTER
01879     avfilter_graph_free(&graph);
01880 #endif
01881     av_free(frame);
01882     return 0;
01883 }
01884 
01885 static int subtitle_thread(void *arg)
01886 {
01887     VideoState *is = arg;
01888     SubPicture *sp;
01889     AVPacket pkt1, *pkt = &pkt1;
01890     int got_subtitle;
01891     double pts;
01892     int i, j;
01893     int r, g, b, y, u, v, a;
01894 
01895     for (;;) {
01896         while (is->paused && !is->subtitleq.abort_request) {
01897             SDL_Delay(10);
01898         }
01899         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01900             break;
01901 
01902         if (pkt->data == flush_pkt.data) {
01903             avcodec_flush_buffers(is->subtitle_st->codec);
01904             continue;
01905         }
01906         SDL_LockMutex(is->subpq_mutex);
01907         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01908                !is->subtitleq.abort_request) {
01909             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01910         }
01911         SDL_UnlockMutex(is->subpq_mutex);
01912 
01913         if (is->subtitleq.abort_request)
01914             return 0;
01915 
01916         sp = &is->subpq[is->subpq_windex];
01917 
01918        /* NOTE: ipts is the PTS of the _first_ picture beginning in
01919            this packet, if any */
01920         pts = 0;
01921         if (pkt->pts != AV_NOPTS_VALUE)
01922             pts = av_q2d(is->subtitle_st->time_base) * pkt->pts;
01923 
01924         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
01925                                  &got_subtitle, pkt);
01926 
01927         if (got_subtitle && sp->sub.format == 0) {
01928             sp->pts = pts;
01929 
01930             for (i = 0; i < sp->sub.num_rects; i++)
01931             {
01932                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01933                 {
01934                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01935                     y = RGB_TO_Y_CCIR(r, g, b);
01936                     u = RGB_TO_U_CCIR(r, g, b, 0);
01937                     v = RGB_TO_V_CCIR(r, g, b, 0);
01938                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01939                 }
01940             }
01941 
01942             /* now we can update the picture count */
01943             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01944                 is->subpq_windex = 0;
01945             SDL_LockMutex(is->subpq_mutex);
01946             is->subpq_size++;
01947             SDL_UnlockMutex(is->subpq_mutex);
01948         }
01949         av_free_packet(pkt);
01950     }
01951     return 0;
01952 }
01953 
01954 /* copy samples for viewing in editor window */
01955 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01956 {
01957     int size, len;
01958 
01959     size = samples_size / sizeof(short);
01960     while (size > 0) {
01961         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01962         if (len > size)
01963             len = size;
01964         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01965         samples += len;
01966         is->sample_array_index += len;
01967         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01968             is->sample_array_index = 0;
01969         size -= len;
01970     }
01971 }
01972 
01973 /* return the wanted number of samples to get better sync if sync_type is video
01974  * or external master clock */
01975 static int synchronize_audio(VideoState *is, int nb_samples)
01976 {
01977     int wanted_nb_samples = nb_samples;
01978 
01979     /* if not master, then we try to remove or add samples to correct the clock */
01980     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01981          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01982         double diff, avg_diff;
01983         int min_nb_samples, max_nb_samples;
01984 
01985         diff = get_audio_clock(is) - get_master_clock(is);
01986 
01987         if (diff < AV_NOSYNC_THRESHOLD) {
01988             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01989             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01990                 /* not enough measures to have a correct estimate */
01991                 is->audio_diff_avg_count++;
01992             } else {
01993                 /* estimate the A-V difference */
01994                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01995 
01996                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
01997                     wanted_nb_samples = nb_samples + (int)(diff * is->audio_src_freq);
01998                     min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
01999                     max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
02000                     wanted_nb_samples = FFMIN(FFMAX(wanted_nb_samples, min_nb_samples), max_nb_samples);
02001                 }
02002                 av_dlog(NULL, "diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
02003                         diff, avg_diff, wanted_nb_samples - nb_samples,
02004                         is->audio_clock, is->video_clock, is->audio_diff_threshold);
02005             }
02006         } else {
02007             /* too big difference : may be initial PTS errors, so
02008                reset A-V filter */
02009             is->audio_diff_avg_count = 0;
02010             is->audio_diff_cum       = 0;
02011         }
02012     }
02013 
02014     return wanted_nb_samples;
02015 }
02016 
02017 /* decode one audio frame and returns its uncompressed size */
02018 static int audio_decode_frame(VideoState *is, double *pts_ptr)
02019 {
02020     AVPacket *pkt_temp = &is->audio_pkt_temp;
02021     AVPacket *pkt = &is->audio_pkt;
02022     AVCodecContext *dec = is->audio_st->codec;
02023     int len1, len2, data_size, resampled_data_size;
02024     int64_t dec_channel_layout;
02025     int got_frame;
02026     double pts;
02027     int new_packet = 0;
02028     int flush_complete = 0;
02029     int wanted_nb_samples;
02030 
02031     for (;;) {
02032         /* NOTE: the audio packet can contain several frames */
02033         while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
02034             if (!is->frame) {
02035                 if (!(is->frame = avcodec_alloc_frame()))
02036                     return AVERROR(ENOMEM);
02037             } else
02038                 avcodec_get_frame_defaults(is->frame);
02039 
02040             if (flush_complete)
02041                 break;
02042             new_packet = 0;
02043             len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
02044             if (len1 < 0) {
02045                 /* if error, we skip the frame */
02046                 pkt_temp->size = 0;
02047                 break;
02048             }
02049 
02050             pkt_temp->data += len1;
02051             pkt_temp->size -= len1;
02052 
02053             if (!got_frame) {
02054                 /* stop sending empty packets if the decoder is finished */
02055                 if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
02056                     flush_complete = 1;
02057                 continue;
02058             }
02059             data_size = av_samples_get_buffer_size(NULL, dec->channels,
02060                                                    is->frame->nb_samples,
02061                                                    dec->sample_fmt, 1);
02062 
02063             dec_channel_layout = (dec->channel_layout && dec->channels == av_get_channel_layout_nb_channels(dec->channel_layout)) ? dec->channel_layout : av_get_default_channel_layout(dec->channels);
02064             wanted_nb_samples = synchronize_audio(is, is->frame->nb_samples);
02065 
02066             if (dec->sample_fmt != is->audio_src_fmt ||
02067                 dec_channel_layout != is->audio_src_channel_layout ||
02068                 dec->sample_rate != is->audio_src_freq ||
02069                 (wanted_nb_samples != is->frame->nb_samples && !is->swr_ctx)) {
02070                 if (is->swr_ctx)
02071                     swr_free(&is->swr_ctx);
02072                 is->swr_ctx = swr_alloc_set_opts(NULL,
02073                                                  is->audio_tgt_channel_layout, is->audio_tgt_fmt, is->audio_tgt_freq,
02074                                                  dec_channel_layout,           dec->sample_fmt,   dec->sample_rate,
02075                                                  0, NULL);
02076                 if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
02077                     fprintf(stderr, "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
02078                         dec->sample_rate,
02079                         av_get_sample_fmt_name(dec->sample_fmt),
02080                         dec->channels,
02081                         is->audio_tgt_freq,
02082                         av_get_sample_fmt_name(is->audio_tgt_fmt),
02083                         is->audio_tgt_channels);
02084                     break;
02085                 }
02086                 is->audio_src_channel_layout = dec_channel_layout;
02087                 is->audio_src_channels = dec->channels;
02088                 is->audio_src_freq = dec->sample_rate;
02089                 is->audio_src_fmt = dec->sample_fmt;
02090             }
02091 
02092             resampled_data_size = data_size;
02093             if (is->swr_ctx) {
02094                 const uint8_t *in[] = { is->frame->data[0] };
02095                 uint8_t *out[] = {is->audio_buf2};
02096                 if (wanted_nb_samples != is->frame->nb_samples) {
02097                     if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - is->frame->nb_samples) * is->audio_tgt_freq / dec->sample_rate,
02098                                                 wanted_nb_samples * is->audio_tgt_freq / dec->sample_rate) < 0) {
02099                         fprintf(stderr, "swr_set_compensation() failed\n");
02100                         break;
02101                     }
02102                 }
02103                 len2 = swr_convert(is->swr_ctx, out, sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt),
02104                                                 in, is->frame->nb_samples);
02105                 if (len2 < 0) {
02106                     fprintf(stderr, "audio_resample() failed\n");
02107                     break;
02108                 }
02109                 if (len2 == sizeof(is->audio_buf2) / is->audio_tgt_channels / av_get_bytes_per_sample(is->audio_tgt_fmt)) {
02110                     fprintf(stderr, "warning: audio buffer is probably too small\n");
02111                     swr_init(is->swr_ctx);
02112                 }
02113                 is->audio_buf = is->audio_buf2;
02114                 resampled_data_size = len2 * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
02115             } else {
02116                 is->audio_buf = is->frame->data[0];
02117             }
02118 
02119             /* if no pts, then compute it */
02120             pts = is->audio_clock;
02121             *pts_ptr = pts;
02122             is->audio_clock += (double)data_size / (dec->channels * dec->sample_rate * av_get_bytes_per_sample(dec->sample_fmt));
02123 #ifdef DEBUG
02124             {
02125                 static double last_clock;
02126                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02127                        is->audio_clock - last_clock,
02128                        is->audio_clock, pts);
02129                 last_clock = is->audio_clock;
02130             }
02131 #endif
02132             return resampled_data_size;
02133         }
02134 
02135         /* free the current packet */
02136         if (pkt->data)
02137             av_free_packet(pkt);
02138         memset(pkt_temp, 0, sizeof(*pkt_temp));
02139 
02140         if (is->paused || is->audioq.abort_request) {
02141             return -1;
02142         }
02143 
02144         /* read next packet */
02145         if ((new_packet = packet_queue_get(&is->audioq, pkt, 1)) < 0)
02146             return -1;
02147 
02148         if (pkt->data == flush_pkt.data)
02149             avcodec_flush_buffers(dec);
02150 
02151         *pkt_temp = *pkt;
02152 
02153         /* if update the audio clock with the pts */
02154         if (pkt->pts != AV_NOPTS_VALUE) {
02155             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02156         }
02157     }
02158 }
02159 
02160 /* prepare a new audio buffer */
02161 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02162 {
02163     VideoState *is = opaque;
02164     int audio_size, len1;
02165     int bytes_per_sec;
02166     int frame_size = av_samples_get_buffer_size(NULL, is->audio_tgt_channels, 1, is->audio_tgt_fmt, 1);
02167     double pts;
02168 
02169     audio_callback_time = av_gettime();
02170 
02171     while (len > 0) {
02172         if (is->audio_buf_index >= is->audio_buf_size) {
02173            audio_size = audio_decode_frame(is, &pts);
02174            if (audio_size < 0) {
02175                 /* if error, just output silence */
02176                is->audio_buf      = is->silence_buf;
02177                is->audio_buf_size = sizeof(is->silence_buf) / frame_size * frame_size;
02178            } else {
02179                if (is->show_mode != SHOW_MODE_VIDEO)
02180                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02181                is->audio_buf_size = audio_size;
02182            }
02183            is->audio_buf_index = 0;
02184         }
02185         len1 = is->audio_buf_size - is->audio_buf_index;
02186         if (len1 > len)
02187             len1 = len;
02188         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02189         len -= len1;
02190         stream += len1;
02191         is->audio_buf_index += len1;
02192     }
02193     bytes_per_sec = is->audio_tgt_freq * is->audio_tgt_channels * av_get_bytes_per_sample(is->audio_tgt_fmt);
02194     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
02195     /* Let's assume the audio driver that is used by SDL has two periods. */
02196     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
02197     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
02198 }
02199 
02200 /* open a given stream. Return 0 if OK */
02201 static int stream_component_open(VideoState *is, int stream_index)
02202 {
02203     AVFormatContext *ic = is->ic;
02204     AVCodecContext *avctx;
02205     AVCodec *codec;
02206     SDL_AudioSpec wanted_spec, spec;
02207     AVDictionary *opts;
02208     AVDictionaryEntry *t = NULL;
02209     int64_t wanted_channel_layout = 0;
02210     int wanted_nb_channels;
02211     const char *env;
02212 
02213     if (stream_index < 0 || stream_index >= ic->nb_streams)
02214         return -1;
02215     avctx = ic->streams[stream_index]->codec;
02216 
02217     codec = avcodec_find_decoder(avctx->codec_id);
02218     opts = filter_codec_opts(codec_opts, codec, ic, ic->streams[stream_index]);
02219 
02220     switch(avctx->codec_type){
02221         case AVMEDIA_TYPE_AUDIO   : if(audio_codec_name   ) codec= avcodec_find_decoder_by_name(   audio_codec_name); break;
02222         case AVMEDIA_TYPE_SUBTITLE: if(subtitle_codec_name) codec= avcodec_find_decoder_by_name(subtitle_codec_name); break;
02223         case AVMEDIA_TYPE_VIDEO   : if(video_codec_name   ) codec= avcodec_find_decoder_by_name(   video_codec_name); break;
02224     }
02225     if (!codec)
02226         return -1;
02227 
02228     avctx->workaround_bugs   = workaround_bugs;
02229     avctx->lowres            = lowres;
02230     if(avctx->lowres > codec->max_lowres){
02231         av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
02232                 codec->max_lowres);
02233         avctx->lowres= codec->max_lowres;
02234     }
02235     avctx->idct_algo         = idct;
02236     avctx->skip_frame        = skip_frame;
02237     avctx->skip_idct         = skip_idct;
02238     avctx->skip_loop_filter  = skip_loop_filter;
02239     avctx->error_concealment = error_concealment;
02240 
02241     if(avctx->lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02242     if (fast)   avctx->flags2 |= CODEC_FLAG2_FAST;
02243     if(codec->capabilities & CODEC_CAP_DR1)
02244         avctx->flags |= CODEC_FLAG_EMU_EDGE;
02245 
02246     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02247         env = SDL_getenv("SDL_AUDIO_CHANNELS");
02248         if (env)
02249             wanted_channel_layout = av_get_default_channel_layout(SDL_atoi(env));
02250         if (!wanted_channel_layout) {
02251             wanted_channel_layout = (avctx->channel_layout && avctx->channels == av_get_channel_layout_nb_channels(avctx->channel_layout)) ? avctx->channel_layout : av_get_default_channel_layout(avctx->channels);
02252             wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
02253             wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
02254             /* SDL only supports 1, 2, 4 or 6 channels at the moment, so we have to make sure not to request anything else. */
02255             while (wanted_nb_channels > 0 && (wanted_nb_channels == 3 || wanted_nb_channels == 5 || wanted_nb_channels > 6)) {
02256                 wanted_nb_channels--;
02257                 wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
02258             }
02259         }
02260         wanted_spec.channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
02261         wanted_spec.freq = avctx->sample_rate;
02262         if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
02263             fprintf(stderr, "Invalid sample rate or channel count!\n");
02264             return -1;
02265         }
02266     }
02267 
02268     if (!av_dict_get(opts, "threads", NULL, 0))
02269         av_dict_set(&opts, "threads", "auto", 0);
02270     if (!codec ||
02271         avcodec_open2(avctx, codec, &opts) < 0)
02272         return -1;
02273     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02274         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02275         return AVERROR_OPTION_NOT_FOUND;
02276     }
02277 
02278     /* prepare audio output */
02279     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02280         wanted_spec.format = AUDIO_S16SYS;
02281         wanted_spec.silence = 0;
02282         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02283         wanted_spec.callback = sdl_audio_callback;
02284         wanted_spec.userdata = is;
02285         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02286             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02287             return -1;
02288         }
02289         is->audio_hw_buf_size = spec.size;
02290         if (spec.format != AUDIO_S16SYS) {
02291             fprintf(stderr, "SDL advised audio format %d is not supported!\n", spec.format);
02292             return -1;
02293         }
02294         if (spec.channels != wanted_spec.channels) {
02295             wanted_channel_layout = av_get_default_channel_layout(spec.channels);
02296             if (!wanted_channel_layout) {
02297                 fprintf(stderr, "SDL advised channel count %d is not supported!\n", spec.channels);
02298                 return -1;
02299             }
02300         }
02301         is->audio_src_fmt = is->audio_tgt_fmt = AV_SAMPLE_FMT_S16;
02302         is->audio_src_freq = is->audio_tgt_freq = spec.freq;
02303         is->audio_src_channel_layout = is->audio_tgt_channel_layout = wanted_channel_layout;
02304         is->audio_src_channels = is->audio_tgt_channels = spec.channels;
02305     }
02306 
02307     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02308     switch (avctx->codec_type) {
02309     case AVMEDIA_TYPE_AUDIO:
02310         is->audio_stream = stream_index;
02311         is->audio_st = ic->streams[stream_index];
02312         is->audio_buf_size  = 0;
02313         is->audio_buf_index = 0;
02314 
02315         /* init averaging filter */
02316         is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02317         is->audio_diff_avg_count = 0;
02318         /* since we do not have a precise anough audio fifo fullness,
02319            we correct audio sync only if larger than this threshold */
02320         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / wanted_spec.freq;
02321 
02322         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02323         packet_queue_init(&is->audioq);
02324         SDL_PauseAudio(0);
02325         break;
02326     case AVMEDIA_TYPE_VIDEO:
02327         is->video_stream = stream_index;
02328         is->video_st = ic->streams[stream_index];
02329 
02330         packet_queue_init(&is->videoq);
02331         is->video_tid = SDL_CreateThread(video_thread, is);
02332         break;
02333     case AVMEDIA_TYPE_SUBTITLE:
02334         is->subtitle_stream = stream_index;
02335         is->subtitle_st = ic->streams[stream_index];
02336         packet_queue_init(&is->subtitleq);
02337 
02338         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02339         break;
02340     default:
02341         break;
02342     }
02343     return 0;
02344 }
02345 
02346 static void stream_component_close(VideoState *is, int stream_index)
02347 {
02348     AVFormatContext *ic = is->ic;
02349     AVCodecContext *avctx;
02350 
02351     if (stream_index < 0 || stream_index >= ic->nb_streams)
02352         return;
02353     avctx = ic->streams[stream_index]->codec;
02354 
02355     switch (avctx->codec_type) {
02356     case AVMEDIA_TYPE_AUDIO:
02357         packet_queue_abort(&is->audioq);
02358 
02359         SDL_CloseAudio();
02360 
02361         packet_queue_end(&is->audioq);
02362         if (is->swr_ctx)
02363             swr_free(&is->swr_ctx);
02364         av_free_packet(&is->audio_pkt);
02365         av_freep(&is->audio_buf1);
02366         is->audio_buf = NULL;
02367         av_freep(&is->frame);
02368 
02369         if (is->rdft) {
02370             av_rdft_end(is->rdft);
02371             av_freep(&is->rdft_data);
02372             is->rdft = NULL;
02373             is->rdft_bits = 0;
02374         }
02375         break;
02376     case AVMEDIA_TYPE_VIDEO:
02377         packet_queue_abort(&is->videoq);
02378 
02379         /* note: we also signal this mutex to make sure we deblock the
02380            video thread in all cases */
02381         SDL_LockMutex(is->pictq_mutex);
02382         SDL_CondSignal(is->pictq_cond);
02383         SDL_UnlockMutex(is->pictq_mutex);
02384 
02385         SDL_WaitThread(is->video_tid, NULL);
02386 
02387         packet_queue_end(&is->videoq);
02388         break;
02389     case AVMEDIA_TYPE_SUBTITLE:
02390         packet_queue_abort(&is->subtitleq);
02391 
02392         /* note: we also signal this mutex to make sure we deblock the
02393            video thread in all cases */
02394         SDL_LockMutex(is->subpq_mutex);
02395         is->subtitle_stream_changed = 1;
02396 
02397         SDL_CondSignal(is->subpq_cond);
02398         SDL_UnlockMutex(is->subpq_mutex);
02399 
02400         SDL_WaitThread(is->subtitle_tid, NULL);
02401 
02402         packet_queue_end(&is->subtitleq);
02403         break;
02404     default:
02405         break;
02406     }
02407 
02408     ic->streams[stream_index]->discard = AVDISCARD_ALL;
02409     avcodec_close(avctx);
02410     switch (avctx->codec_type) {
02411     case AVMEDIA_TYPE_AUDIO:
02412         is->audio_st = NULL;
02413         is->audio_stream = -1;
02414         break;
02415     case AVMEDIA_TYPE_VIDEO:
02416         is->video_st = NULL;
02417         is->video_stream = -1;
02418         break;
02419     case AVMEDIA_TYPE_SUBTITLE:
02420         is->subtitle_st = NULL;
02421         is->subtitle_stream = -1;
02422         break;
02423     default:
02424         break;
02425     }
02426 }
02427 
02428 static int decode_interrupt_cb(void *ctx)
02429 {
02430     VideoState *is = ctx;
02431     return is->abort_request;
02432 }
02433 
02434 /* this thread gets the stream from the disk or the network */
02435 static int read_thread(void *arg)
02436 {
02437     VideoState *is = arg;
02438     AVFormatContext *ic = NULL;
02439     int err, i, ret;
02440     int st_index[AVMEDIA_TYPE_NB];
02441     AVPacket pkt1, *pkt = &pkt1;
02442     int eof = 0;
02443     int pkt_in_play_range = 0;
02444     AVDictionaryEntry *t;
02445     AVDictionary **opts;
02446     int orig_nb_streams;
02447 
02448     memset(st_index, -1, sizeof(st_index));
02449     is->video_stream = -1;
02450     is->audio_stream = -1;
02451     is->subtitle_stream = -1;
02452 
02453     ic = avformat_alloc_context();
02454     ic->interrupt_callback.callback = decode_interrupt_cb;
02455     ic->interrupt_callback.opaque = is;
02456     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
02457     if (err < 0) {
02458         print_error(is->filename, err);
02459         ret = -1;
02460         goto fail;
02461     }
02462     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
02463         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
02464         ret = AVERROR_OPTION_NOT_FOUND;
02465         goto fail;
02466     }
02467     is->ic = ic;
02468 
02469     if (genpts)
02470         ic->flags |= AVFMT_FLAG_GENPTS;
02471 
02472     opts = setup_find_stream_info_opts(ic, codec_opts);
02473     orig_nb_streams = ic->nb_streams;
02474 
02475     err = avformat_find_stream_info(ic, opts);
02476     if (err < 0) {
02477         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02478         ret = -1;
02479         goto fail;
02480     }
02481     for (i = 0; i < orig_nb_streams; i++)
02482         av_dict_free(&opts[i]);
02483     av_freep(&opts);
02484 
02485     if (ic->pb)
02486         ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use url_feof() to test for the end
02487 
02488     if (seek_by_bytes < 0)
02489         seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02490 
02491     /* if seeking requested, we execute it */
02492     if (start_time != AV_NOPTS_VALUE) {
02493         int64_t timestamp;
02494 
02495         timestamp = start_time;
02496         /* add the stream start time */
02497         if (ic->start_time != AV_NOPTS_VALUE)
02498             timestamp += ic->start_time;
02499         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02500         if (ret < 0) {
02501             fprintf(stderr, "%s: could not seek to position %0.3f\n",
02502                     is->filename, (double)timestamp / AV_TIME_BASE);
02503         }
02504     }
02505 
02506     for (i = 0; i < ic->nb_streams; i++)
02507         ic->streams[i]->discard = AVDISCARD_ALL;
02508     if (!video_disable)
02509         st_index[AVMEDIA_TYPE_VIDEO] =
02510             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
02511                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
02512     if (!audio_disable)
02513         st_index[AVMEDIA_TYPE_AUDIO] =
02514             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
02515                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
02516                                 st_index[AVMEDIA_TYPE_VIDEO],
02517                                 NULL, 0);
02518     if (!video_disable)
02519         st_index[AVMEDIA_TYPE_SUBTITLE] =
02520             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
02521                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
02522                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
02523                                  st_index[AVMEDIA_TYPE_AUDIO] :
02524                                  st_index[AVMEDIA_TYPE_VIDEO]),
02525                                 NULL, 0);
02526     if (show_status) {
02527         av_dump_format(ic, 0, is->filename, 0);
02528     }
02529 
02530     is->show_mode = show_mode;
02531 
02532     /* open the streams */
02533     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02534         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02535     }
02536 
02537     ret = -1;
02538     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02539         ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02540     }
02541     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02542     if (is->show_mode == SHOW_MODE_NONE)
02543         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
02544 
02545     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02546         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02547     }
02548 
02549     if (is->video_stream < 0 && is->audio_stream < 0) {
02550         fprintf(stderr, "%s: could not open codecs\n", is->filename);
02551         ret = -1;
02552         goto fail;
02553     }
02554 
02555     for (;;) {
02556         if (is->abort_request)
02557             break;
02558         if (is->paused != is->last_paused) {
02559             is->last_paused = is->paused;
02560             if (is->paused)
02561                 is->read_pause_return = av_read_pause(ic);
02562             else
02563                 av_read_play(ic);
02564         }
02565 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
02566         if (is->paused &&
02567                 (!strcmp(ic->iformat->name, "rtsp") ||
02568                  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
02569             /* wait 10 ms to avoid trying to get another packet */
02570             /* XXX: horrible */
02571             SDL_Delay(10);
02572             continue;
02573         }
02574 #endif
02575         if (is->seek_req) {
02576             int64_t seek_target = is->seek_pos;
02577             int64_t seek_min    = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02578             int64_t seek_max    = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02579 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
02580 //      of the seek_pos/seek_rel variables
02581 
02582             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02583             if (ret < 0) {
02584                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02585             } else {
02586                 if (is->audio_stream >= 0) {
02587                     packet_queue_flush(&is->audioq);
02588                     packet_queue_put(&is->audioq, &flush_pkt);
02589                 }
02590                 if (is->subtitle_stream >= 0) {
02591                     packet_queue_flush(&is->subtitleq);
02592                     packet_queue_put(&is->subtitleq, &flush_pkt);
02593                 }
02594                 if (is->video_stream >= 0) {
02595                     packet_queue_flush(&is->videoq);
02596                     packet_queue_put(&is->videoq, &flush_pkt);
02597                 }
02598             }
02599             is->seek_req = 0;
02600             eof = 0;
02601         }
02602 
02603         /* if the queue are full, no need to read more */
02604         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02605             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream < 0)
02606                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream < 0)
02607                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream < 0))) {
02608             /* wait 10 ms */
02609             SDL_Delay(10);
02610             continue;
02611         }
02612         if (eof) {
02613             if (is->video_stream >= 0) {
02614                 av_init_packet(pkt);
02615                 pkt->data = NULL;
02616                 pkt->size = 0;
02617                 pkt->stream_index = is->video_stream;
02618                 packet_queue_put(&is->videoq, pkt);
02619             }
02620             if (is->audio_stream >= 0 &&
02621                 is->audio_st->codec->codec->capabilities & CODEC_CAP_DELAY) {
02622                 av_init_packet(pkt);
02623                 pkt->data = NULL;
02624                 pkt->size = 0;
02625                 pkt->stream_index = is->audio_stream;
02626                 packet_queue_put(&is->audioq, pkt);
02627             }
02628             SDL_Delay(10);
02629             if (is->audioq.size + is->videoq.size + is->subtitleq.size == 0) {
02630                 if (loop != 1 && (!loop || --loop)) {
02631                     stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02632                 } else if (autoexit) {
02633                     ret = AVERROR_EOF;
02634                     goto fail;
02635                 }
02636             }
02637             eof=0;
02638             continue;
02639         }
02640         ret = av_read_frame(ic, pkt);
02641         if (ret < 0) {
02642             if (ret == AVERROR_EOF || url_feof(ic->pb))
02643                 eof = 1;
02644             if (ic->pb && ic->pb->error)
02645                 break;
02646             SDL_Delay(100); /* wait for user event */
02647             continue;
02648         }
02649         /* check if packet is in play range specified by user, then queue, otherwise discard */
02650         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02651                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02652                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02653                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
02654                 <= ((double)duration / 1000000);
02655         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02656             packet_queue_put(&is->audioq, pkt);
02657         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02658             packet_queue_put(&is->videoq, pkt);
02659         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02660             packet_queue_put(&is->subtitleq, pkt);
02661         } else {
02662             av_free_packet(pkt);
02663         }
02664     }
02665     /* wait until the end */
02666     while (!is->abort_request) {
02667         SDL_Delay(100);
02668     }
02669 
02670     ret = 0;
02671  fail:
02672     /* close each stream */
02673     if (is->audio_stream >= 0)
02674         stream_component_close(is, is->audio_stream);
02675     if (is->video_stream >= 0)
02676         stream_component_close(is, is->video_stream);
02677     if (is->subtitle_stream >= 0)
02678         stream_component_close(is, is->subtitle_stream);
02679     if (is->ic) {
02680         avformat_close_input(&is->ic);
02681     }
02682 
02683     if (ret != 0) {
02684         SDL_Event event;
02685 
02686         event.type = FF_QUIT_EVENT;
02687         event.user.data1 = is;
02688         SDL_PushEvent(&event);
02689     }
02690     return 0;
02691 }
02692 
02693 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02694 {
02695     VideoState *is;
02696 
02697     is = av_mallocz(sizeof(VideoState));
02698     if (!is)
02699         return NULL;
02700     av_strlcpy(is->filename, filename, sizeof(is->filename));
02701     is->iformat = iformat;
02702     is->ytop    = 0;
02703     is->xleft   = 0;
02704 
02705     /* start video display */
02706     is->pictq_mutex = SDL_CreateMutex();
02707     is->pictq_cond  = SDL_CreateCond();
02708 
02709     is->subpq_mutex = SDL_CreateMutex();
02710     is->subpq_cond  = SDL_CreateCond();
02711 
02712     is->av_sync_type = av_sync_type;
02713     is->read_tid     = SDL_CreateThread(read_thread, is);
02714     if (!is->read_tid) {
02715         av_free(is);
02716         return NULL;
02717     }
02718     return is;
02719 }
02720 
02721 static void stream_cycle_channel(VideoState *is, int codec_type)
02722 {
02723     AVFormatContext *ic = is->ic;
02724     int start_index, stream_index;
02725     AVStream *st;
02726 
02727     if (codec_type == AVMEDIA_TYPE_VIDEO)
02728         start_index = is->video_stream;
02729     else if (codec_type == AVMEDIA_TYPE_AUDIO)
02730         start_index = is->audio_stream;
02731     else
02732         start_index = is->subtitle_stream;
02733     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02734         return;
02735     stream_index = start_index;
02736     for (;;) {
02737         if (++stream_index >= is->ic->nb_streams)
02738         {
02739             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02740             {
02741                 stream_index = -1;
02742                 goto the_end;
02743             } else
02744                 stream_index = 0;
02745         }
02746         if (stream_index == start_index)
02747             return;
02748         st = ic->streams[stream_index];
02749         if (st->codec->codec_type == codec_type) {
02750             /* check that parameters are OK */
02751             switch (codec_type) {
02752             case AVMEDIA_TYPE_AUDIO:
02753                 if (st->codec->sample_rate != 0 &&
02754                     st->codec->channels != 0)
02755                     goto the_end;
02756                 break;
02757             case AVMEDIA_TYPE_VIDEO:
02758             case AVMEDIA_TYPE_SUBTITLE:
02759                 goto the_end;
02760             default:
02761                 break;
02762             }
02763         }
02764     }
02765  the_end:
02766     stream_component_close(is, start_index);
02767     stream_component_open(is, stream_index);
02768 }
02769 
02770 
02771 static void toggle_full_screen(VideoState *is)
02772 {
02773     av_unused int i;
02774     is_full_screen = !is_full_screen;
02775 #if defined(__APPLE__) && SDL_VERSION_ATLEAST(1, 2, 14)
02776     /* OS X needs to reallocate the SDL overlays */
02777     for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
02778         is->pictq[i].reallocate = 1;
02779     }
02780 #endif
02781     video_open(is, 1);
02782 }
02783 
02784 static void toggle_pause(VideoState *is)
02785 {
02786     stream_toggle_pause(is);
02787     is->step = 0;
02788 }
02789 
02790 static void step_to_next_frame(VideoState *is)
02791 {
02792     /* if the stream is paused unpause it, then step */
02793     if (is->paused)
02794         stream_toggle_pause(is);
02795     is->step = 1;
02796 }
02797 
02798 static void toggle_audio_display(VideoState *is)
02799 {
02800     int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02801     is->show_mode = (is->show_mode + 1) % SHOW_MODE_NB;
02802     fill_rectangle(screen,
02803                 is->xleft, is->ytop, is->width, is->height,
02804                 bgcolor);
02805     SDL_UpdateRect(screen, is->xleft, is->ytop, is->width, is->height);
02806 }
02807 
02808 /* handle an event sent by the GUI */
02809 static void event_loop(VideoState *cur_stream)
02810 {
02811     SDL_Event event;
02812     double incr, pos, frac;
02813 
02814     for (;;) {
02815         double x;
02816         SDL_WaitEvent(&event);
02817         switch (event.type) {
02818         case SDL_KEYDOWN:
02819             if (exit_on_keydown) {
02820                 do_exit(cur_stream);
02821                 break;
02822             }
02823             switch (event.key.keysym.sym) {
02824             case SDLK_ESCAPE:
02825             case SDLK_q:
02826                 do_exit(cur_stream);
02827                 break;
02828             case SDLK_f:
02829                 toggle_full_screen(cur_stream);
02830                 break;
02831             case SDLK_p:
02832             case SDLK_SPACE:
02833                 toggle_pause(cur_stream);
02834                 break;
02835             case SDLK_s: // S: Step to next frame
02836                 step_to_next_frame(cur_stream);
02837                 break;
02838             case SDLK_a:
02839                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02840                 break;
02841             case SDLK_v:
02842                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02843                 break;
02844             case SDLK_t:
02845                 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02846                 break;
02847             case SDLK_w:
02848                 toggle_audio_display(cur_stream);
02849                 break;
02850             case SDLK_PAGEUP:
02851                 incr = 600.0;
02852                 goto do_seek;
02853             case SDLK_PAGEDOWN:
02854                 incr = -600.0;
02855                 goto do_seek;
02856             case SDLK_LEFT:
02857                 incr = -10.0;
02858                 goto do_seek;
02859             case SDLK_RIGHT:
02860                 incr = 10.0;
02861                 goto do_seek;
02862             case SDLK_UP:
02863                 incr = 60.0;
02864                 goto do_seek;
02865             case SDLK_DOWN:
02866                 incr = -60.0;
02867             do_seek:
02868                     if (seek_by_bytes) {
02869                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos >= 0) {
02870                             pos = cur_stream->video_current_pos;
02871                         } else if (cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos >= 0) {
02872                             pos = cur_stream->audio_pkt.pos;
02873                         } else
02874                             pos = avio_tell(cur_stream->ic->pb);
02875                         if (cur_stream->ic->bit_rate)
02876                             incr *= cur_stream->ic->bit_rate / 8.0;
02877                         else
02878                             incr *= 180000.0;
02879                         pos += incr;
02880                         stream_seek(cur_stream, pos, incr, 1);
02881                     } else {
02882                         pos = get_master_clock(cur_stream);
02883                         pos += incr;
02884                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02885                     }
02886                 break;
02887             default:
02888                 break;
02889             }
02890             break;
02891         case SDL_MOUSEBUTTONDOWN:
02892             if (exit_on_mousedown) {
02893                 do_exit(cur_stream);
02894                 break;
02895             }
02896         case SDL_MOUSEMOTION:
02897             if (event.type == SDL_MOUSEBUTTONDOWN) {
02898                 x = event.button.x;
02899             } else {
02900                 if (event.motion.state != SDL_PRESSED)
02901                     break;
02902                 x = event.motion.x;
02903             }
02904                 if (seek_by_bytes || cur_stream->ic->duration <= 0) {
02905                     uint64_t size =  avio_size(cur_stream->ic->pb);
02906                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02907                 } else {
02908                     int64_t ts;
02909                     int ns, hh, mm, ss;
02910                     int tns, thh, tmm, tss;
02911                     tns  = cur_stream->ic->duration / 1000000LL;
02912                     thh  = tns / 3600;
02913                     tmm  = (tns % 3600) / 60;
02914                     tss  = (tns % 60);
02915                     frac = x / cur_stream->width;
02916                     ns   = frac * tns;
02917                     hh   = ns / 3600;
02918                     mm   = (ns % 3600) / 60;
02919                     ss   = (ns % 60);
02920                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
02921                             hh, mm, ss, thh, tmm, tss);
02922                     ts = frac * cur_stream->ic->duration;
02923                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02924                         ts += cur_stream->ic->start_time;
02925                     stream_seek(cur_stream, ts, 0, 0);
02926                 }
02927             break;
02928         case SDL_VIDEORESIZE:
02929                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02930                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02931                 screen_width  = cur_stream->width  = event.resize.w;
02932                 screen_height = cur_stream->height = event.resize.h;
02933             break;
02934         case SDL_QUIT:
02935         case FF_QUIT_EVENT:
02936             do_exit(cur_stream);
02937             break;
02938         case FF_ALLOC_EVENT:
02939             video_open(event.user.data1, 0);
02940             alloc_picture(event.user.data1);
02941             break;
02942         case FF_REFRESH_EVENT:
02943             video_refresh(event.user.data1);
02944             cur_stream->refresh = 0;
02945             break;
02946         default:
02947             break;
02948         }
02949     }
02950 }
02951 
02952 static int opt_frame_size(const char *opt, const char *arg)
02953 {
02954     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
02955     return opt_default("video_size", arg);
02956 }
02957 
02958 static int opt_width(const char *opt, const char *arg)
02959 {
02960     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02961     return 0;
02962 }
02963 
02964 static int opt_height(const char *opt, const char *arg)
02965 {
02966     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02967     return 0;
02968 }
02969 
02970 static int opt_format(const char *opt, const char *arg)
02971 {
02972     file_iformat = av_find_input_format(arg);
02973     if (!file_iformat) {
02974         fprintf(stderr, "Unknown input format: %s\n", arg);
02975         return AVERROR(EINVAL);
02976     }
02977     return 0;
02978 }
02979 
02980 static int opt_frame_pix_fmt(const char *opt, const char *arg)
02981 {
02982     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
02983     return opt_default("pixel_format", arg);
02984 }
02985 
02986 static int opt_sync(const char *opt, const char *arg)
02987 {
02988     if (!strcmp(arg, "audio"))
02989         av_sync_type = AV_SYNC_AUDIO_MASTER;
02990     else if (!strcmp(arg, "video"))
02991         av_sync_type = AV_SYNC_VIDEO_MASTER;
02992     else if (!strcmp(arg, "ext"))
02993         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02994     else {
02995         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02996         exit(1);
02997     }
02998     return 0;
02999 }
03000 
03001 static int opt_seek(const char *opt, const char *arg)
03002 {
03003     start_time = parse_time_or_die(opt, arg, 1);
03004     return 0;
03005 }
03006 
03007 static int opt_duration(const char *opt, const char *arg)
03008 {
03009     duration = parse_time_or_die(opt, arg, 1);
03010     return 0;
03011 }
03012 
03013 static int opt_show_mode(const char *opt, const char *arg)
03014 {
03015     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
03016                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
03017                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
03018                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
03019     return 0;
03020 }
03021 
03022 static void opt_input_file(void *optctx, const char *filename)
03023 {
03024     if (input_filename) {
03025         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03026                 filename, input_filename);
03027         exit_program(1);
03028     }
03029     if (!strcmp(filename, "-"))
03030         filename = "pipe:";
03031     input_filename = filename;
03032 }
03033 
03034 static int opt_codec(void *o, const char *opt, const char *arg)
03035 {
03036     switch(opt[strlen(opt)-1]){
03037     case 'a' :    audio_codec_name = arg; break;
03038     case 's' : subtitle_codec_name = arg; break;
03039     case 'v' :    video_codec_name = arg; break;
03040     }
03041     return 0;
03042 }
03043 
03044 static int dummy;
03045 
03046 static const OptionDef options[] = {
03047 #include "cmdutils_common_opts.h"
03048     { "x", HAS_ARG, { (void*)opt_width }, "force displayed width", "width" },
03049     { "y", HAS_ARG, { (void*)opt_height }, "force displayed height", "height" },
03050     { "s", HAS_ARG | OPT_VIDEO, { (void*)opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
03051     { "fs", OPT_BOOL, { (void*)&is_full_screen }, "force full screen" },
03052     { "an", OPT_BOOL, { (void*)&audio_disable }, "disable audio" },
03053     { "vn", OPT_BOOL, { (void*)&video_disable }, "disable video" },
03054     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_number" },
03055     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_number" },
03056     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_number" },
03057     { "ss", HAS_ARG, { (void*)&opt_seek }, "seek to a given position in seconds", "pos" },
03058     { "t", HAS_ARG, { (void*)&opt_duration }, "play  \"duration\" seconds of audio/video", "duration" },
03059     { "bytes", OPT_INT | HAS_ARG, { (void*)&seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
03060     { "nodisp", OPT_BOOL, { (void*)&display_disable }, "disable graphical display" },
03061     { "f", HAS_ARG, { (void*)opt_format }, "force format", "fmt" },
03062     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { (void*)opt_frame_pix_fmt }, "set pixel format", "format" },
03063     { "stats", OPT_BOOL | OPT_EXPERT, { (void*)&show_status }, "show status", "" },
03064     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&workaround_bugs }, "workaround bugs", "" },
03065     { "fast", OPT_BOOL | OPT_EXPERT, { (void*)&fast }, "non spec compliant optimizations", "" },
03066     { "genpts", OPT_BOOL | OPT_EXPERT, { (void*)&genpts }, "generate pts", "" },
03067     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
03068     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&lowres }, "", "" },
03069     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_loop_filter }, "", "" },
03070     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_frame }, "", "" },
03071     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&skip_idct }, "", "" },
03072     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&idct }, "set idct algo",  "algo" },
03073     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&error_concealment }, "set error concealment options",  "bit_mask" },
03074     { "sync", HAS_ARG | OPT_EXPERT, { (void*)opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
03075     { "autoexit", OPT_BOOL | OPT_EXPERT, { (void*)&autoexit }, "exit at the end", "" },
03076     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_keydown }, "exit on key down", "" },
03077     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { (void*)&exit_on_mousedown }, "exit on mouse down", "" },
03078     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { (void*)&loop }, "set number of times the playback shall be looped", "loop count" },
03079     { "framedrop", OPT_BOOL | OPT_EXPERT, { (void*)&framedrop }, "drop frames when cpu is too slow", "" },
03080     { "window_title", OPT_STRING | HAS_ARG, { (void*)&window_title }, "set window title", "window title" },
03081 #if CONFIG_AVFILTER
03082     { "vf", OPT_STRING | HAS_ARG, { (void*)&vfilters }, "video filters", "filter list" },
03083 #endif
03084     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { (void*)&rdftspeed }, "rdft speed", "msecs" },
03085     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
03086     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { (void*)opt_default }, "generic catch all option", "" },
03087     { "i", OPT_BOOL, {(void *)&dummy}, "read specified file", "input_file"},
03088     { "codec", HAS_ARG | OPT_FUNC2, {(void*)opt_codec}, "force decoder", "decoder" },
03089     { NULL, },
03090 };
03091 
03092 static void show_usage(void)
03093 {
03094     av_log(NULL, AV_LOG_INFO, "Simple media player\n");
03095     av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
03096     av_log(NULL, AV_LOG_INFO, "\n");
03097 }
03098 
03099 static int opt_help(const char *opt, const char *arg)
03100 {
03101     av_log_set_callback(log_callback_help);
03102     show_usage();
03103     show_help_options(options, "Main options:\n",
03104                       OPT_EXPERT, 0);
03105     show_help_options(options, "\nAdvanced options:\n",
03106                       OPT_EXPERT, OPT_EXPERT);
03107     printf("\n");
03108     show_help_children(avcodec_get_class(), AV_OPT_FLAG_DECODING_PARAM);
03109     show_help_children(avformat_get_class(), AV_OPT_FLAG_DECODING_PARAM);
03110 #if !CONFIG_AVFILTER
03111     show_help_children(sws_get_class(), AV_OPT_FLAG_ENCODING_PARAM);
03112 #endif
03113     printf("\nWhile playing:\n"
03114            "q, ESC              quit\n"
03115            "f                   toggle full screen\n"
03116            "p, SPC              pause\n"
03117            "a                   cycle audio channel\n"
03118            "v                   cycle video channel\n"
03119            "t                   cycle subtitle channel\n"
03120            "w                   show audio waves\n"
03121            "s                   activate frame-step mode\n"
03122            "left/right          seek backward/forward 10 seconds\n"
03123            "down/up             seek backward/forward 1 minute\n"
03124            "page down/page up   seek backward/forward 10 minutes\n"
03125            "mouse click         seek to percentage in file corresponding to fraction of width\n"
03126            );
03127     return 0;
03128 }
03129 
03130 static int lockmgr(void **mtx, enum AVLockOp op)
03131 {
03132    switch(op) {
03133       case AV_LOCK_CREATE:
03134           *mtx = SDL_CreateMutex();
03135           if(!*mtx)
03136               return 1;
03137           return 0;
03138       case AV_LOCK_OBTAIN:
03139           return !!SDL_LockMutex(*mtx);
03140       case AV_LOCK_RELEASE:
03141           return !!SDL_UnlockMutex(*mtx);
03142       case AV_LOCK_DESTROY:
03143           SDL_DestroyMutex(*mtx);
03144           return 0;
03145    }
03146    return 1;
03147 }
03148 
03149 /* Called from the main */
03150 int main(int argc, char **argv)
03151 {
03152     int flags;
03153     VideoState *is;
03154 
03155     av_log_set_flags(AV_LOG_SKIP_REPEATED);
03156     parse_loglevel(argc, argv, options);
03157 
03158     /* register all codecs, demux and protocols */
03159     avcodec_register_all();
03160 #if CONFIG_AVDEVICE
03161     avdevice_register_all();
03162 #endif
03163 #if CONFIG_AVFILTER
03164     avfilter_register_all();
03165 #endif
03166     av_register_all();
03167     avformat_network_init();
03168 
03169     init_opts();
03170 
03171     signal(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
03172     signal(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
03173 
03174     show_banner(argc, argv, options);
03175 
03176     parse_options(NULL, argc, argv, options, opt_input_file);
03177 
03178     if (!input_filename) {
03179         show_usage();
03180         fprintf(stderr, "An input file must be specified\n");
03181         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
03182         exit(1);
03183     }
03184 
03185     if (display_disable) {
03186         video_disable = 1;
03187     }
03188     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03189     if (audio_disable)
03190         flags &= ~SDL_INIT_AUDIO;
03191 #if !defined(__MINGW32__) && !defined(__APPLE__)
03192     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
03193 #endif
03194     if (SDL_Init (flags)) {
03195         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03196         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
03197         exit(1);
03198     }
03199 
03200     if (!display_disable) {
03201 #if HAVE_SDL_VIDEO_SIZE
03202         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03203         fs_screen_width = vi->current_w;
03204         fs_screen_height = vi->current_h;
03205 #endif
03206     }
03207 
03208     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03209     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03210     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03211 
03212     if (av_lockmgr_register(lockmgr)) {
03213         fprintf(stderr, "Could not initialize lock manager!\n");
03214         do_exit(NULL);
03215     }
03216 
03217     av_init_packet(&flush_pkt);
03218     flush_pkt.data = "FLUSH";
03219 
03220     is = stream_open(input_filename, file_iformat);
03221     if (!is) {
03222         fprintf(stderr, "Failed to initialize VideoState!\n");
03223         do_exit(NULL);
03224     }
03225 
03226     event_loop(is);
03227 
03228     /* never returns */
03229 
03230     return 0;
03231 }
Generated on Fri Feb 1 2013 14:34:27 for FFmpeg by doxygen 1.7.1