00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #define _XOPEN_SOURCE 600
00023
00024 #include "config.h"
00025 #include <inttypes.h>
00026 #include <math.h>
00027 #include <limits.h>
00028 #include "libavutil/avstring.h"
00029 #include "libavutil/colorspace.h"
00030 #include "libavutil/pixdesc.h"
00031 #include "libavutil/imgutils.h"
00032 #include "libavutil/parseutils.h"
00033 #include "libavutil/samplefmt.h"
00034 #include "libavformat/avformat.h"
00035 #include "libavdevice/avdevice.h"
00036 #include "libswscale/swscale.h"
00037 #include "libavcodec/audioconvert.h"
00038 #include "libavcodec/opt.h"
00039 #include "libavcodec/avfft.h"
00040
00041 #if CONFIG_AVFILTER
00042 # include "libavfilter/avfilter.h"
00043 # include "libavfilter/avfiltergraph.h"
00044 #endif
00045
00046 #include "cmdutils.h"
00047
00048 #include <SDL.h>
00049 #include <SDL_thread.h>
00050
00051 #ifdef __MINGW32__
00052 #undef main
00053 #endif
00054
00055 #include <unistd.h>
00056 #include <assert.h>
00057
00058 const char program_name[] = "FFplay";
00059 const int program_birth_year = 2003;
00060
00061
00062
00063
00064 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
00065 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
00066 #define MIN_FRAMES 5
00067
00068
00069
00070 #define SDL_AUDIO_BUFFER_SIZE 1024
00071
00072
00073 #define AV_SYNC_THRESHOLD 0.01
00074
00075 #define AV_NOSYNC_THRESHOLD 10.0
00076
00077 #define FRAME_SKIP_FACTOR 0.05
00078
00079
00080 #define SAMPLE_CORRECTION_PERCENT_MAX 10
00081
00082
00083 #define AUDIO_DIFF_AVG_NB 20
00084
00085
00086 #define SAMPLE_ARRAY_SIZE (2*65536)
00087
00088 static int sws_flags = SWS_BICUBIC;
00089
00090 typedef struct PacketQueue {
00091 AVPacketList *first_pkt, *last_pkt;
00092 int nb_packets;
00093 int size;
00094 int abort_request;
00095 SDL_mutex *mutex;
00096 SDL_cond *cond;
00097 } PacketQueue;
00098
00099 #define VIDEO_PICTURE_QUEUE_SIZE 2
00100 #define SUBPICTURE_QUEUE_SIZE 4
00101
00102 typedef struct VideoPicture {
00103 double pts;
00104 double target_clock;
00105 int64_t pos;
00106 SDL_Overlay *bmp;
00107 int width, height;
00108 int allocated;
00109 enum PixelFormat pix_fmt;
00110
00111 #if CONFIG_AVFILTER
00112 AVFilterBufferRef *picref;
00113 #endif
00114 } VideoPicture;
00115
00116 typedef struct SubPicture {
00117 double pts;
00118 AVSubtitle sub;
00119 } SubPicture;
00120
00121 enum {
00122 AV_SYNC_AUDIO_MASTER,
00123 AV_SYNC_VIDEO_MASTER,
00124 AV_SYNC_EXTERNAL_CLOCK,
00125 };
00126
00127 typedef struct VideoState {
00128 SDL_Thread *parse_tid;
00129 SDL_Thread *video_tid;
00130 SDL_Thread *refresh_tid;
00131 AVInputFormat *iformat;
00132 int no_background;
00133 int abort_request;
00134 int paused;
00135 int last_paused;
00136 int seek_req;
00137 int seek_flags;
00138 int64_t seek_pos;
00139 int64_t seek_rel;
00140 int read_pause_return;
00141 AVFormatContext *ic;
00142 int dtg_active_format;
00143
00144 int audio_stream;
00145
00146 int av_sync_type;
00147 double external_clock;
00148 int64_t external_clock_time;
00149
00150 double audio_clock;
00151 double audio_diff_cum;
00152 double audio_diff_avg_coef;
00153 double audio_diff_threshold;
00154 int audio_diff_avg_count;
00155 AVStream *audio_st;
00156 PacketQueue audioq;
00157 int audio_hw_buf_size;
00158
00159
00160 DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00161 DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
00162 uint8_t *audio_buf;
00163 unsigned int audio_buf_size;
00164 int audio_buf_index;
00165 AVPacket audio_pkt_temp;
00166 AVPacket audio_pkt;
00167 enum AVSampleFormat audio_src_fmt;
00168 AVAudioConvert *reformat_ctx;
00169
00170 int show_audio;
00171 int16_t sample_array[SAMPLE_ARRAY_SIZE];
00172 int sample_array_index;
00173 int last_i_start;
00174 RDFTContext *rdft;
00175 int rdft_bits;
00176 FFTSample *rdft_data;
00177 int xpos;
00178
00179 SDL_Thread *subtitle_tid;
00180 int subtitle_stream;
00181 int subtitle_stream_changed;
00182 AVStream *subtitle_st;
00183 PacketQueue subtitleq;
00184 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
00185 int subpq_size, subpq_rindex, subpq_windex;
00186 SDL_mutex *subpq_mutex;
00187 SDL_cond *subpq_cond;
00188
00189 double frame_timer;
00190 double frame_last_pts;
00191 double frame_last_delay;
00192 double video_clock;
00193 int video_stream;
00194 AVStream *video_st;
00195 PacketQueue videoq;
00196 double video_current_pts;
00197 double video_current_pts_drift;
00198 int64_t video_current_pos;
00199 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
00200 int pictq_size, pictq_rindex, pictq_windex;
00201 SDL_mutex *pictq_mutex;
00202 SDL_cond *pictq_cond;
00203 #if !CONFIG_AVFILTER
00204 struct SwsContext *img_convert_ctx;
00205 #endif
00206
00207
00208 char filename[1024];
00209 int width, height, xleft, ytop;
00210
00211 PtsCorrectionContext pts_ctx;
00212
00213 #if CONFIG_AVFILTER
00214 AVFilterContext *out_video_filter;
00215 #endif
00216
00217 float skip_frames;
00218 float skip_frames_index;
00219 int refresh;
00220 } VideoState;
00221
00222 static void show_help(void);
00223 static int audio_write_get_buf_size(VideoState *is);
00224
00225
00226 static AVInputFormat *file_iformat;
00227 static const char *input_filename;
00228 static const char *window_title;
00229 static int fs_screen_width;
00230 static int fs_screen_height;
00231 static int screen_width = 0;
00232 static int screen_height = 0;
00233 static int frame_width = 0;
00234 static int frame_height = 0;
00235 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
00236 static int audio_disable;
00237 static int video_disable;
00238 static int wanted_stream[AVMEDIA_TYPE_NB]={
00239 [AVMEDIA_TYPE_AUDIO]=-1,
00240 [AVMEDIA_TYPE_VIDEO]=-1,
00241 [AVMEDIA_TYPE_SUBTITLE]=-1,
00242 };
00243 static int seek_by_bytes=-1;
00244 static int display_disable;
00245 static int show_status = 1;
00246 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
00247 static int64_t start_time = AV_NOPTS_VALUE;
00248 static int64_t duration = AV_NOPTS_VALUE;
00249 static int debug = 0;
00250 static int debug_mv = 0;
00251 static int step = 0;
00252 static int thread_count = 1;
00253 static int workaround_bugs = 1;
00254 static int fast = 0;
00255 static int genpts = 0;
00256 static int lowres = 0;
00257 static int idct = FF_IDCT_AUTO;
00258 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
00259 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
00260 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
00261 static int error_recognition = FF_ER_CAREFUL;
00262 static int error_concealment = 3;
00263 static int decoder_reorder_pts= -1;
00264 static int autoexit;
00265 static int exit_on_keydown;
00266 static int exit_on_mousedown;
00267 static int loop=1;
00268 static int framedrop=1;
00269
00270 static int rdftspeed=20;
00271 #if CONFIG_AVFILTER
00272 static char *vfilters = NULL;
00273 #endif
00274
00275
00276 static int is_full_screen;
00277 static VideoState *cur_stream;
00278 static int64_t audio_callback_time;
00279
00280 static AVPacket flush_pkt;
00281
00282 #define FF_ALLOC_EVENT (SDL_USEREVENT)
00283 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
00284 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
00285
00286 static SDL_Surface *screen;
00287
00288 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
00289
00290
00291 static void packet_queue_init(PacketQueue *q)
00292 {
00293 memset(q, 0, sizeof(PacketQueue));
00294 q->mutex = SDL_CreateMutex();
00295 q->cond = SDL_CreateCond();
00296 packet_queue_put(q, &flush_pkt);
00297 }
00298
00299 static void packet_queue_flush(PacketQueue *q)
00300 {
00301 AVPacketList *pkt, *pkt1;
00302
00303 SDL_LockMutex(q->mutex);
00304 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
00305 pkt1 = pkt->next;
00306 av_free_packet(&pkt->pkt);
00307 av_freep(&pkt);
00308 }
00309 q->last_pkt = NULL;
00310 q->first_pkt = NULL;
00311 q->nb_packets = 0;
00312 q->size = 0;
00313 SDL_UnlockMutex(q->mutex);
00314 }
00315
00316 static void packet_queue_end(PacketQueue *q)
00317 {
00318 packet_queue_flush(q);
00319 SDL_DestroyMutex(q->mutex);
00320 SDL_DestroyCond(q->cond);
00321 }
00322
00323 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
00324 {
00325 AVPacketList *pkt1;
00326
00327
00328 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
00329 return -1;
00330
00331 pkt1 = av_malloc(sizeof(AVPacketList));
00332 if (!pkt1)
00333 return -1;
00334 pkt1->pkt = *pkt;
00335 pkt1->next = NULL;
00336
00337
00338 SDL_LockMutex(q->mutex);
00339
00340 if (!q->last_pkt)
00341
00342 q->first_pkt = pkt1;
00343 else
00344 q->last_pkt->next = pkt1;
00345 q->last_pkt = pkt1;
00346 q->nb_packets++;
00347 q->size += pkt1->pkt.size + sizeof(*pkt1);
00348
00349 SDL_CondSignal(q->cond);
00350
00351 SDL_UnlockMutex(q->mutex);
00352 return 0;
00353 }
00354
00355 static void packet_queue_abort(PacketQueue *q)
00356 {
00357 SDL_LockMutex(q->mutex);
00358
00359 q->abort_request = 1;
00360
00361 SDL_CondSignal(q->cond);
00362
00363 SDL_UnlockMutex(q->mutex);
00364 }
00365
00366
00367 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
00368 {
00369 AVPacketList *pkt1;
00370 int ret;
00371
00372 SDL_LockMutex(q->mutex);
00373
00374 for(;;) {
00375 if (q->abort_request) {
00376 ret = -1;
00377 break;
00378 }
00379
00380 pkt1 = q->first_pkt;
00381 if (pkt1) {
00382 q->first_pkt = pkt1->next;
00383 if (!q->first_pkt)
00384 q->last_pkt = NULL;
00385 q->nb_packets--;
00386 q->size -= pkt1->pkt.size + sizeof(*pkt1);
00387 *pkt = pkt1->pkt;
00388 av_free(pkt1);
00389 ret = 1;
00390 break;
00391 } else if (!block) {
00392 ret = 0;
00393 break;
00394 } else {
00395 SDL_CondWait(q->cond, q->mutex);
00396 }
00397 }
00398 SDL_UnlockMutex(q->mutex);
00399 return ret;
00400 }
00401
00402 static inline void fill_rectangle(SDL_Surface *screen,
00403 int x, int y, int w, int h, int color)
00404 {
00405 SDL_Rect rect;
00406 rect.x = x;
00407 rect.y = y;
00408 rect.w = w;
00409 rect.h = h;
00410 SDL_FillRect(screen, &rect, color);
00411 }
00412
00413 #if 0
00414
00415 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
00416 {
00417 int w1, w2, h1, h2;
00418
00419
00420 w1 = x;
00421 if (w1 < 0)
00422 w1 = 0;
00423 w2 = s->width - (x + w);
00424 if (w2 < 0)
00425 w2 = 0;
00426 h1 = y;
00427 if (h1 < 0)
00428 h1 = 0;
00429 h2 = s->height - (y + h);
00430 if (h2 < 0)
00431 h2 = 0;
00432 fill_rectangle(screen,
00433 s->xleft, s->ytop,
00434 w1, s->height,
00435 color);
00436 fill_rectangle(screen,
00437 s->xleft + s->width - w2, s->ytop,
00438 w2, s->height,
00439 color);
00440 fill_rectangle(screen,
00441 s->xleft + w1, s->ytop,
00442 s->width - w1 - w2, h1,
00443 color);
00444 fill_rectangle(screen,
00445 s->xleft + w1, s->ytop + s->height - h2,
00446 s->width - w1 - w2, h2,
00447 color);
00448 }
00449 #endif
00450
00451 #define ALPHA_BLEND(a, oldp, newp, s)\
00452 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
00453
00454 #define RGBA_IN(r, g, b, a, s)\
00455 {\
00456 unsigned int v = ((const uint32_t *)(s))[0];\
00457 a = (v >> 24) & 0xff;\
00458 r = (v >> 16) & 0xff;\
00459 g = (v >> 8) & 0xff;\
00460 b = v & 0xff;\
00461 }
00462
00463 #define YUVA_IN(y, u, v, a, s, pal)\
00464 {\
00465 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
00466 a = (val >> 24) & 0xff;\
00467 y = (val >> 16) & 0xff;\
00468 u = (val >> 8) & 0xff;\
00469 v = val & 0xff;\
00470 }
00471
00472 #define YUVA_OUT(d, y, u, v, a)\
00473 {\
00474 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
00475 }
00476
00477
00478 #define BPP 1
00479
00480 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
00481 {
00482 int wrap, wrap3, width2, skip2;
00483 int y, u, v, a, u1, v1, a1, w, h;
00484 uint8_t *lum, *cb, *cr;
00485 const uint8_t *p;
00486 const uint32_t *pal;
00487 int dstx, dsty, dstw, dsth;
00488
00489 dstw = av_clip(rect->w, 0, imgw);
00490 dsth = av_clip(rect->h, 0, imgh);
00491 dstx = av_clip(rect->x, 0, imgw - dstw);
00492 dsty = av_clip(rect->y, 0, imgh - dsth);
00493 lum = dst->data[0] + dsty * dst->linesize[0];
00494 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
00495 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
00496
00497 width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
00498 skip2 = dstx >> 1;
00499 wrap = dst->linesize[0];
00500 wrap3 = rect->pict.linesize[0];
00501 p = rect->pict.data[0];
00502 pal = (const uint32_t *)rect->pict.data[1];
00503
00504 if (dsty & 1) {
00505 lum += dstx;
00506 cb += skip2;
00507 cr += skip2;
00508
00509 if (dstx & 1) {
00510 YUVA_IN(y, u, v, a, p, pal);
00511 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00512 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00513 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00514 cb++;
00515 cr++;
00516 lum++;
00517 p += BPP;
00518 }
00519 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00520 YUVA_IN(y, u, v, a, p, pal);
00521 u1 = u;
00522 v1 = v;
00523 a1 = a;
00524 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00525
00526 YUVA_IN(y, u, v, a, p + BPP, pal);
00527 u1 += u;
00528 v1 += v;
00529 a1 += a;
00530 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00531 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00532 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00533 cb++;
00534 cr++;
00535 p += 2 * BPP;
00536 lum += 2;
00537 }
00538 if (w) {
00539 YUVA_IN(y, u, v, a, p, pal);
00540 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00541 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00542 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00543 p++;
00544 lum++;
00545 }
00546 p += wrap3 - dstw * BPP;
00547 lum += wrap - dstw - dstx;
00548 cb += dst->linesize[1] - width2 - skip2;
00549 cr += dst->linesize[2] - width2 - skip2;
00550 }
00551 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
00552 lum += dstx;
00553 cb += skip2;
00554 cr += skip2;
00555
00556 if (dstx & 1) {
00557 YUVA_IN(y, u, v, a, p, pal);
00558 u1 = u;
00559 v1 = v;
00560 a1 = a;
00561 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00562 p += wrap3;
00563 lum += wrap;
00564 YUVA_IN(y, u, v, a, p, pal);
00565 u1 += u;
00566 v1 += v;
00567 a1 += a;
00568 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00569 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00570 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00571 cb++;
00572 cr++;
00573 p += -wrap3 + BPP;
00574 lum += -wrap + 1;
00575 }
00576 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00577 YUVA_IN(y, u, v, a, p, pal);
00578 u1 = u;
00579 v1 = v;
00580 a1 = a;
00581 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00582
00583 YUVA_IN(y, u, v, a, p + BPP, pal);
00584 u1 += u;
00585 v1 += v;
00586 a1 += a;
00587 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00588 p += wrap3;
00589 lum += wrap;
00590
00591 YUVA_IN(y, u, v, a, p, pal);
00592 u1 += u;
00593 v1 += v;
00594 a1 += a;
00595 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00596
00597 YUVA_IN(y, u, v, a, p + BPP, pal);
00598 u1 += u;
00599 v1 += v;
00600 a1 += a;
00601 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00602
00603 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
00604 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
00605
00606 cb++;
00607 cr++;
00608 p += -wrap3 + 2 * BPP;
00609 lum += -wrap + 2;
00610 }
00611 if (w) {
00612 YUVA_IN(y, u, v, a, p, pal);
00613 u1 = u;
00614 v1 = v;
00615 a1 = a;
00616 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00617 p += wrap3;
00618 lum += wrap;
00619 YUVA_IN(y, u, v, a, p, pal);
00620 u1 += u;
00621 v1 += v;
00622 a1 += a;
00623 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00624 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
00625 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
00626 cb++;
00627 cr++;
00628 p += -wrap3 + BPP;
00629 lum += -wrap + 1;
00630 }
00631 p += wrap3 + (wrap3 - dstw * BPP);
00632 lum += wrap + (wrap - dstw - dstx);
00633 cb += dst->linesize[1] - width2 - skip2;
00634 cr += dst->linesize[2] - width2 - skip2;
00635 }
00636
00637 if (h) {
00638 lum += dstx;
00639 cb += skip2;
00640 cr += skip2;
00641
00642 if (dstx & 1) {
00643 YUVA_IN(y, u, v, a, p, pal);
00644 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00645 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00646 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00647 cb++;
00648 cr++;
00649 lum++;
00650 p += BPP;
00651 }
00652 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
00653 YUVA_IN(y, u, v, a, p, pal);
00654 u1 = u;
00655 v1 = v;
00656 a1 = a;
00657 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00658
00659 YUVA_IN(y, u, v, a, p + BPP, pal);
00660 u1 += u;
00661 v1 += v;
00662 a1 += a;
00663 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
00664 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
00665 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
00666 cb++;
00667 cr++;
00668 p += 2 * BPP;
00669 lum += 2;
00670 }
00671 if (w) {
00672 YUVA_IN(y, u, v, a, p, pal);
00673 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
00674 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
00675 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
00676 }
00677 }
00678 }
00679
00680 static void free_subpicture(SubPicture *sp)
00681 {
00682 avsubtitle_free(&sp->sub);
00683 }
00684
00685 static void video_image_display(VideoState *is)
00686 {
00687 VideoPicture *vp;
00688 SubPicture *sp;
00689 AVPicture pict;
00690 float aspect_ratio;
00691 int width, height, x, y;
00692 SDL_Rect rect;
00693 int i;
00694
00695 vp = &is->pictq[is->pictq_rindex];
00696 if (vp->bmp) {
00697 #if CONFIG_AVFILTER
00698 if (vp->picref->video->pixel_aspect.num == 0)
00699 aspect_ratio = 0;
00700 else
00701 aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
00702 #else
00703
00704
00705 if (is->video_st->sample_aspect_ratio.num)
00706 aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
00707 else if (is->video_st->codec->sample_aspect_ratio.num)
00708 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
00709 else
00710 aspect_ratio = 0;
00711 #endif
00712 if (aspect_ratio <= 0.0)
00713 aspect_ratio = 1.0;
00714 aspect_ratio *= (float)vp->width / (float)vp->height;
00715
00716 if (is->subtitle_st)
00717 {
00718 if (is->subpq_size > 0)
00719 {
00720 sp = &is->subpq[is->subpq_rindex];
00721
00722 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
00723 {
00724 SDL_LockYUVOverlay (vp->bmp);
00725
00726 pict.data[0] = vp->bmp->pixels[0];
00727 pict.data[1] = vp->bmp->pixels[2];
00728 pict.data[2] = vp->bmp->pixels[1];
00729
00730 pict.linesize[0] = vp->bmp->pitches[0];
00731 pict.linesize[1] = vp->bmp->pitches[2];
00732 pict.linesize[2] = vp->bmp->pitches[1];
00733
00734 for (i = 0; i < sp->sub.num_rects; i++)
00735 blend_subrect(&pict, sp->sub.rects[i],
00736 vp->bmp->w, vp->bmp->h);
00737
00738 SDL_UnlockYUVOverlay (vp->bmp);
00739 }
00740 }
00741 }
00742
00743
00744
00745 height = is->height;
00746 width = ((int)rint(height * aspect_ratio)) & ~1;
00747 if (width > is->width) {
00748 width = is->width;
00749 height = ((int)rint(width / aspect_ratio)) & ~1;
00750 }
00751 x = (is->width - width) / 2;
00752 y = (is->height - height) / 2;
00753 if (!is->no_background) {
00754
00755
00756 } else {
00757 is->no_background = 0;
00758 }
00759 rect.x = is->xleft + x;
00760 rect.y = is->ytop + y;
00761 rect.w = width;
00762 rect.h = height;
00763 SDL_DisplayYUVOverlay(vp->bmp, &rect);
00764 } else {
00765 #if 0
00766 fill_rectangle(screen,
00767 is->xleft, is->ytop, is->width, is->height,
00768 QERGB(0x00, 0x00, 0x00));
00769 #endif
00770 }
00771 }
00772
00773 static inline int compute_mod(int a, int b)
00774 {
00775 a = a % b;
00776 if (a >= 0)
00777 return a;
00778 else
00779 return a + b;
00780 }
00781
00782 static void video_audio_display(VideoState *s)
00783 {
00784 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
00785 int ch, channels, h, h2, bgcolor, fgcolor;
00786 int16_t time_diff;
00787 int rdft_bits, nb_freq;
00788
00789 for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
00790 ;
00791 nb_freq= 1<<(rdft_bits-1);
00792
00793
00794 channels = s->audio_st->codec->channels;
00795 nb_display_channels = channels;
00796 if (!s->paused) {
00797 int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
00798 n = 2 * channels;
00799 delay = audio_write_get_buf_size(s);
00800 delay /= n;
00801
00802
00803
00804 if (audio_callback_time) {
00805 time_diff = av_gettime() - audio_callback_time;
00806 delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
00807 }
00808
00809 delay += 2*data_used;
00810 if (delay < data_used)
00811 delay = data_used;
00812
00813 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
00814 if(s->show_audio==1){
00815 h= INT_MIN;
00816 for(i=0; i<1000; i+=channels){
00817 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
00818 int a= s->sample_array[idx];
00819 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
00820 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
00821 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
00822 int score= a-d;
00823 if(h<score && (b^c)<0){
00824 h= score;
00825 i_start= idx;
00826 }
00827 }
00828 }
00829
00830 s->last_i_start = i_start;
00831 } else {
00832 i_start = s->last_i_start;
00833 }
00834
00835 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
00836 if(s->show_audio==1){
00837 fill_rectangle(screen,
00838 s->xleft, s->ytop, s->width, s->height,
00839 bgcolor);
00840
00841 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
00842
00843
00844 h = s->height / nb_display_channels;
00845
00846 h2 = (h * 9) / 20;
00847 for(ch = 0;ch < nb_display_channels; ch++) {
00848 i = i_start + ch;
00849 y1 = s->ytop + ch * h + (h / 2);
00850 for(x = 0; x < s->width; x++) {
00851 y = (s->sample_array[i] * h2) >> 15;
00852 if (y < 0) {
00853 y = -y;
00854 ys = y1 - y;
00855 } else {
00856 ys = y1;
00857 }
00858 fill_rectangle(screen,
00859 s->xleft + x, ys, 1, y,
00860 fgcolor);
00861 i += channels;
00862 if (i >= SAMPLE_ARRAY_SIZE)
00863 i -= SAMPLE_ARRAY_SIZE;
00864 }
00865 }
00866
00867 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
00868
00869 for(ch = 1;ch < nb_display_channels; ch++) {
00870 y = s->ytop + ch * h;
00871 fill_rectangle(screen,
00872 s->xleft, y, s->width, 1,
00873 fgcolor);
00874 }
00875 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
00876 }else{
00877 nb_display_channels= FFMIN(nb_display_channels, 2);
00878 if(rdft_bits != s->rdft_bits){
00879 av_rdft_end(s->rdft);
00880 av_free(s->rdft_data);
00881 s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
00882 s->rdft_bits= rdft_bits;
00883 s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
00884 }
00885 {
00886 FFTSample *data[2];
00887 for(ch = 0;ch < nb_display_channels; ch++) {
00888 data[ch] = s->rdft_data + 2*nb_freq*ch;
00889 i = i_start + ch;
00890 for(x = 0; x < 2*nb_freq; x++) {
00891 double w= (x-nb_freq)*(1.0/nb_freq);
00892 data[ch][x]= s->sample_array[i]*(1.0-w*w);
00893 i += channels;
00894 if (i >= SAMPLE_ARRAY_SIZE)
00895 i -= SAMPLE_ARRAY_SIZE;
00896 }
00897 av_rdft_calc(s->rdft, data[ch]);
00898 }
00899
00900 for(y=0; y<s->height; y++){
00901 double w= 1/sqrt(nb_freq);
00902 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
00903 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
00904 + data[1][2*y+1]*data[1][2*y+1])) : a;
00905 a= FFMIN(a,255);
00906 b= FFMIN(b,255);
00907 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
00908
00909 fill_rectangle(screen,
00910 s->xpos, s->height-y, 1, 1,
00911 fgcolor);
00912 }
00913 }
00914 SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
00915 s->xpos++;
00916 if(s->xpos >= s->width)
00917 s->xpos= s->xleft;
00918 }
00919 }
00920
00921 static int video_open(VideoState *is){
00922 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
00923 int w,h;
00924
00925 if(is_full_screen) flags |= SDL_FULLSCREEN;
00926 else flags |= SDL_RESIZABLE;
00927
00928 if (is_full_screen && fs_screen_width) {
00929 w = fs_screen_width;
00930 h = fs_screen_height;
00931 } else if(!is_full_screen && screen_width){
00932 w = screen_width;
00933 h = screen_height;
00934 #if CONFIG_AVFILTER
00935 }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
00936 w = is->out_video_filter->inputs[0]->w;
00937 h = is->out_video_filter->inputs[0]->h;
00938 #else
00939 }else if (is->video_st && is->video_st->codec->width){
00940 w = is->video_st->codec->width;
00941 h = is->video_st->codec->height;
00942 #endif
00943 } else {
00944 w = 640;
00945 h = 480;
00946 }
00947 if(screen && is->width == screen->w && screen->w == w
00948 && is->height== screen->h && screen->h == h)
00949 return 0;
00950
00951 #ifndef __APPLE__
00952 screen = SDL_SetVideoMode(w, h, 0, flags);
00953 #else
00954
00955 screen = SDL_SetVideoMode(w, h, 24, flags);
00956 #endif
00957 if (!screen) {
00958 fprintf(stderr, "SDL: could not set video mode - exiting\n");
00959 return -1;
00960 }
00961 if (!window_title)
00962 window_title = input_filename;
00963 SDL_WM_SetCaption(window_title, window_title);
00964
00965 is->width = screen->w;
00966 is->height = screen->h;
00967
00968 return 0;
00969 }
00970
00971
00972 static void video_display(VideoState *is)
00973 {
00974 if(!screen)
00975 video_open(cur_stream);
00976 if (is->audio_st && is->show_audio)
00977 video_audio_display(is);
00978 else if (is->video_st)
00979 video_image_display(is);
00980 }
00981
00982 static int refresh_thread(void *opaque)
00983 {
00984 VideoState *is= opaque;
00985 while(!is->abort_request){
00986 SDL_Event event;
00987 event.type = FF_REFRESH_EVENT;
00988 event.user.data1 = opaque;
00989 if(!is->refresh){
00990 is->refresh=1;
00991 SDL_PushEvent(&event);
00992 }
00993 usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000);
00994 }
00995 return 0;
00996 }
00997
00998
00999 static double get_audio_clock(VideoState *is)
01000 {
01001 double pts;
01002 int hw_buf_size, bytes_per_sec;
01003 pts = is->audio_clock;
01004 hw_buf_size = audio_write_get_buf_size(is);
01005 bytes_per_sec = 0;
01006 if (is->audio_st) {
01007 bytes_per_sec = is->audio_st->codec->sample_rate *
01008 2 * is->audio_st->codec->channels;
01009 }
01010 if (bytes_per_sec)
01011 pts -= (double)hw_buf_size / bytes_per_sec;
01012 return pts;
01013 }
01014
01015
01016 static double get_video_clock(VideoState *is)
01017 {
01018 if (is->paused) {
01019 return is->video_current_pts;
01020 } else {
01021 return is->video_current_pts_drift + av_gettime() / 1000000.0;
01022 }
01023 }
01024
01025
01026 static double get_external_clock(VideoState *is)
01027 {
01028 int64_t ti;
01029 ti = av_gettime();
01030 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
01031 }
01032
01033
01034 static double get_master_clock(VideoState *is)
01035 {
01036 double val;
01037
01038 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
01039 if (is->video_st)
01040 val = get_video_clock(is);
01041 else
01042 val = get_audio_clock(is);
01043 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
01044 if (is->audio_st)
01045 val = get_audio_clock(is);
01046 else
01047 val = get_video_clock(is);
01048 } else {
01049 val = get_external_clock(is);
01050 }
01051 return val;
01052 }
01053
01054
01055 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
01056 {
01057 if (!is->seek_req) {
01058 is->seek_pos = pos;
01059 is->seek_rel = rel;
01060 is->seek_flags &= ~AVSEEK_FLAG_BYTE;
01061 if (seek_by_bytes)
01062 is->seek_flags |= AVSEEK_FLAG_BYTE;
01063 is->seek_req = 1;
01064 }
01065 }
01066
01067
01068 static void stream_pause(VideoState *is)
01069 {
01070 if (is->paused) {
01071 is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
01072 if(is->read_pause_return != AVERROR(ENOSYS)){
01073 is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
01074 }
01075 is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
01076 }
01077 is->paused = !is->paused;
01078 }
01079
01080 static double compute_target_time(double frame_current_pts, VideoState *is)
01081 {
01082 double delay, sync_threshold, diff;
01083
01084
01085 delay = frame_current_pts - is->frame_last_pts;
01086 if (delay <= 0 || delay >= 10.0) {
01087
01088 delay = is->frame_last_delay;
01089 } else {
01090 is->frame_last_delay = delay;
01091 }
01092 is->frame_last_pts = frame_current_pts;
01093
01094
01095 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
01096 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01097
01098
01099 diff = get_video_clock(is) - get_master_clock(is);
01100
01101
01102
01103
01104 sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
01105 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
01106 if (diff <= -sync_threshold)
01107 delay = 0;
01108 else if (diff >= sync_threshold)
01109 delay = 2 * delay;
01110 }
01111 }
01112 is->frame_timer += delay;
01113 #if defined(DEBUG_SYNC)
01114 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
01115 delay, actual_delay, frame_current_pts, -diff);
01116 #endif
01117
01118 return is->frame_timer;
01119 }
01120
01121
01122 static void video_refresh_timer(void *opaque)
01123 {
01124 VideoState *is = opaque;
01125 VideoPicture *vp;
01126
01127 SubPicture *sp, *sp2;
01128
01129 if (is->video_st) {
01130 retry:
01131 if (is->pictq_size == 0) {
01132
01133 } else {
01134 double time= av_gettime()/1000000.0;
01135 double next_target;
01136
01137 vp = &is->pictq[is->pictq_rindex];
01138
01139 if(time < vp->target_clock)
01140 return;
01141
01142 is->video_current_pts = vp->pts;
01143 is->video_current_pts_drift = is->video_current_pts - time;
01144 is->video_current_pos = vp->pos;
01145 if(is->pictq_size > 1){
01146 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
01147 assert(nextvp->target_clock >= vp->target_clock);
01148 next_target= nextvp->target_clock;
01149 }else{
01150 next_target= vp->target_clock + is->video_clock - vp->pts;
01151 }
01152 if(framedrop && time > next_target){
01153 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
01154 if(is->pictq_size > 1 || time > next_target + 0.5){
01155
01156 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01157 is->pictq_rindex = 0;
01158
01159 SDL_LockMutex(is->pictq_mutex);
01160 is->pictq_size--;
01161 SDL_CondSignal(is->pictq_cond);
01162 SDL_UnlockMutex(is->pictq_mutex);
01163 goto retry;
01164 }
01165 }
01166
01167 if(is->subtitle_st) {
01168 if (is->subtitle_stream_changed) {
01169 SDL_LockMutex(is->subpq_mutex);
01170
01171 while (is->subpq_size) {
01172 free_subpicture(&is->subpq[is->subpq_rindex]);
01173
01174
01175 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01176 is->subpq_rindex = 0;
01177
01178 is->subpq_size--;
01179 }
01180 is->subtitle_stream_changed = 0;
01181
01182 SDL_CondSignal(is->subpq_cond);
01183 SDL_UnlockMutex(is->subpq_mutex);
01184 } else {
01185 if (is->subpq_size > 0) {
01186 sp = &is->subpq[is->subpq_rindex];
01187
01188 if (is->subpq_size > 1)
01189 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
01190 else
01191 sp2 = NULL;
01192
01193 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
01194 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
01195 {
01196 free_subpicture(sp);
01197
01198
01199 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
01200 is->subpq_rindex = 0;
01201
01202 SDL_LockMutex(is->subpq_mutex);
01203 is->subpq_size--;
01204 SDL_CondSignal(is->subpq_cond);
01205 SDL_UnlockMutex(is->subpq_mutex);
01206 }
01207 }
01208 }
01209 }
01210
01211
01212 if (!display_disable)
01213 video_display(is);
01214
01215
01216 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
01217 is->pictq_rindex = 0;
01218
01219 SDL_LockMutex(is->pictq_mutex);
01220 is->pictq_size--;
01221 SDL_CondSignal(is->pictq_cond);
01222 SDL_UnlockMutex(is->pictq_mutex);
01223 }
01224 } else if (is->audio_st) {
01225
01226
01227
01228
01229
01230
01231 if (!display_disable)
01232 video_display(is);
01233 }
01234 if (show_status) {
01235 static int64_t last_time;
01236 int64_t cur_time;
01237 int aqsize, vqsize, sqsize;
01238 double av_diff;
01239
01240 cur_time = av_gettime();
01241 if (!last_time || (cur_time - last_time) >= 30000) {
01242 aqsize = 0;
01243 vqsize = 0;
01244 sqsize = 0;
01245 if (is->audio_st)
01246 aqsize = is->audioq.size;
01247 if (is->video_st)
01248 vqsize = is->videoq.size;
01249 if (is->subtitle_st)
01250 sqsize = is->subtitleq.size;
01251 av_diff = 0;
01252 if (is->audio_st && is->video_st)
01253 av_diff = get_audio_clock(is) - get_video_clock(is);
01254 printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
01255 get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
01256 fflush(stdout);
01257 last_time = cur_time;
01258 }
01259 }
01260 }
01261
01262 static void stream_close(VideoState *is)
01263 {
01264 VideoPicture *vp;
01265 int i;
01266
01267 is->abort_request = 1;
01268 SDL_WaitThread(is->parse_tid, NULL);
01269 SDL_WaitThread(is->refresh_tid, NULL);
01270
01271
01272 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
01273 vp = &is->pictq[i];
01274 #if CONFIG_AVFILTER
01275 if (vp->picref) {
01276 avfilter_unref_buffer(vp->picref);
01277 vp->picref = NULL;
01278 }
01279 #endif
01280 if (vp->bmp) {
01281 SDL_FreeYUVOverlay(vp->bmp);
01282 vp->bmp = NULL;
01283 }
01284 }
01285 SDL_DestroyMutex(is->pictq_mutex);
01286 SDL_DestroyCond(is->pictq_cond);
01287 SDL_DestroyMutex(is->subpq_mutex);
01288 SDL_DestroyCond(is->subpq_cond);
01289 #if !CONFIG_AVFILTER
01290 if (is->img_convert_ctx)
01291 sws_freeContext(is->img_convert_ctx);
01292 #endif
01293 av_free(is);
01294 }
01295
01296 static void do_exit(void)
01297 {
01298 if (cur_stream) {
01299 stream_close(cur_stream);
01300 cur_stream = NULL;
01301 }
01302 uninit_opts();
01303 #if CONFIG_AVFILTER
01304 avfilter_uninit();
01305 #endif
01306 if (show_status)
01307 printf("\n");
01308 SDL_Quit();
01309 av_log(NULL, AV_LOG_QUIET, "");
01310 exit(0);
01311 }
01312
01313
01314
01315 static void alloc_picture(void *opaque)
01316 {
01317 VideoState *is = opaque;
01318 VideoPicture *vp;
01319
01320 vp = &is->pictq[is->pictq_windex];
01321
01322 if (vp->bmp)
01323 SDL_FreeYUVOverlay(vp->bmp);
01324
01325 #if CONFIG_AVFILTER
01326 if (vp->picref)
01327 avfilter_unref_buffer(vp->picref);
01328 vp->picref = NULL;
01329
01330 vp->width = is->out_video_filter->inputs[0]->w;
01331 vp->height = is->out_video_filter->inputs[0]->h;
01332 vp->pix_fmt = is->out_video_filter->inputs[0]->format;
01333 #else
01334 vp->width = is->video_st->codec->width;
01335 vp->height = is->video_st->codec->height;
01336 vp->pix_fmt = is->video_st->codec->pix_fmt;
01337 #endif
01338
01339 vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
01340 SDL_YV12_OVERLAY,
01341 screen);
01342 if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
01343
01344
01345 fprintf(stderr, "Error: the video system does not support an image\n"
01346 "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
01347 "to reduce the image size.\n", vp->width, vp->height );
01348 do_exit();
01349 }
01350
01351 SDL_LockMutex(is->pictq_mutex);
01352 vp->allocated = 1;
01353 SDL_CondSignal(is->pictq_cond);
01354 SDL_UnlockMutex(is->pictq_mutex);
01355 }
01356
01361 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
01362 {
01363 VideoPicture *vp;
01364 int dst_pix_fmt;
01365 #if CONFIG_AVFILTER
01366 AVPicture pict_src;
01367 #endif
01368
01369 SDL_LockMutex(is->pictq_mutex);
01370
01371 if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
01372 is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
01373
01374 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
01375 !is->videoq.abort_request) {
01376 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01377 }
01378 SDL_UnlockMutex(is->pictq_mutex);
01379
01380 if (is->videoq.abort_request)
01381 return -1;
01382
01383 vp = &is->pictq[is->pictq_windex];
01384
01385
01386 if (!vp->bmp ||
01387 #if CONFIG_AVFILTER
01388 vp->width != is->out_video_filter->inputs[0]->w ||
01389 vp->height != is->out_video_filter->inputs[0]->h) {
01390 #else
01391 vp->width != is->video_st->codec->width ||
01392 vp->height != is->video_st->codec->height) {
01393 #endif
01394 SDL_Event event;
01395
01396 vp->allocated = 0;
01397
01398
01399
01400 event.type = FF_ALLOC_EVENT;
01401 event.user.data1 = is;
01402 SDL_PushEvent(&event);
01403
01404
01405 SDL_LockMutex(is->pictq_mutex);
01406 while (!vp->allocated && !is->videoq.abort_request) {
01407 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01408 }
01409 SDL_UnlockMutex(is->pictq_mutex);
01410
01411 if (is->videoq.abort_request)
01412 return -1;
01413 }
01414
01415
01416 if (vp->bmp) {
01417 AVPicture pict;
01418 #if CONFIG_AVFILTER
01419 if(vp->picref)
01420 avfilter_unref_buffer(vp->picref);
01421 vp->picref = src_frame->opaque;
01422 #endif
01423
01424
01425 SDL_LockYUVOverlay (vp->bmp);
01426
01427 dst_pix_fmt = PIX_FMT_YUV420P;
01428 memset(&pict,0,sizeof(AVPicture));
01429 pict.data[0] = vp->bmp->pixels[0];
01430 pict.data[1] = vp->bmp->pixels[2];
01431 pict.data[2] = vp->bmp->pixels[1];
01432
01433 pict.linesize[0] = vp->bmp->pitches[0];
01434 pict.linesize[1] = vp->bmp->pitches[2];
01435 pict.linesize[2] = vp->bmp->pitches[1];
01436
01437 #if CONFIG_AVFILTER
01438 pict_src.data[0] = src_frame->data[0];
01439 pict_src.data[1] = src_frame->data[1];
01440 pict_src.data[2] = src_frame->data[2];
01441
01442 pict_src.linesize[0] = src_frame->linesize[0];
01443 pict_src.linesize[1] = src_frame->linesize[1];
01444 pict_src.linesize[2] = src_frame->linesize[2];
01445
01446
01447 av_picture_copy(&pict, &pict_src,
01448 vp->pix_fmt, vp->width, vp->height);
01449 #else
01450 sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
01451 is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
01452 vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
01453 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
01454 if (is->img_convert_ctx == NULL) {
01455 fprintf(stderr, "Cannot initialize the conversion context\n");
01456 exit(1);
01457 }
01458 sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
01459 0, vp->height, pict.data, pict.linesize);
01460 #endif
01461
01462 SDL_UnlockYUVOverlay(vp->bmp);
01463
01464 vp->pts = pts;
01465 vp->pos = pos;
01466
01467
01468 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
01469 is->pictq_windex = 0;
01470 SDL_LockMutex(is->pictq_mutex);
01471 vp->target_clock= compute_target_time(vp->pts, is);
01472
01473 is->pictq_size++;
01474 SDL_UnlockMutex(is->pictq_mutex);
01475 }
01476 return 0;
01477 }
01478
01483 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
01484 {
01485 double frame_delay, pts;
01486
01487 pts = pts1;
01488
01489 if (pts != 0) {
01490
01491 is->video_clock = pts;
01492 } else {
01493 pts = is->video_clock;
01494 }
01495
01496 frame_delay = av_q2d(is->video_st->codec->time_base);
01497
01498
01499 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
01500 is->video_clock += frame_delay;
01501
01502 #if defined(DEBUG_SYNC) && 0
01503 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
01504 av_get_pict_type_char(src_frame->pict_type), pts, pts1);
01505 #endif
01506 return queue_picture(is, src_frame, pts, pos);
01507 }
01508
01509 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
01510 {
01511 int len1, got_picture, i;
01512
01513 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
01514 return -1;
01515
01516 if (pkt->data == flush_pkt.data) {
01517 avcodec_flush_buffers(is->video_st->codec);
01518
01519 SDL_LockMutex(is->pictq_mutex);
01520
01521 for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
01522 is->pictq[i].target_clock= 0;
01523 }
01524 while (is->pictq_size && !is->videoq.abort_request) {
01525 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
01526 }
01527 is->video_current_pos = -1;
01528 SDL_UnlockMutex(is->pictq_mutex);
01529
01530 init_pts_correction(&is->pts_ctx);
01531 is->frame_last_pts = AV_NOPTS_VALUE;
01532 is->frame_last_delay = 0;
01533 is->frame_timer = (double)av_gettime() / 1000000.0;
01534 is->skip_frames = 1;
01535 is->skip_frames_index = 0;
01536 return 0;
01537 }
01538
01539 len1 = avcodec_decode_video2(is->video_st->codec,
01540 frame, &got_picture,
01541 pkt);
01542
01543 if (got_picture) {
01544 if (decoder_reorder_pts == -1) {
01545 *pts = guess_correct_pts(&is->pts_ctx, frame->pkt_pts, frame->pkt_dts);
01546 } else if (decoder_reorder_pts) {
01547 *pts = frame->pkt_pts;
01548 } else {
01549 *pts = frame->pkt_dts;
01550 }
01551
01552 if (*pts == AV_NOPTS_VALUE) {
01553 *pts = 0;
01554 }
01555
01556 is->skip_frames_index += 1;
01557 if(is->skip_frames_index >= is->skip_frames){
01558 is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
01559 return 1;
01560 }
01561
01562 }
01563 return 0;
01564 }
01565
01566 #if CONFIG_AVFILTER
01567 typedef struct {
01568 VideoState *is;
01569 AVFrame *frame;
01570 int use_dr1;
01571 } FilterPriv;
01572
01573 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
01574 {
01575 AVFilterContext *ctx = codec->opaque;
01576 AVFilterBufferRef *ref;
01577 int perms = AV_PERM_WRITE;
01578 int i, w, h, stride[4];
01579 unsigned edge;
01580
01581 if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
01582 perms |= AV_PERM_NEG_LINESIZES;
01583
01584 if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
01585 if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
01586 if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
01587 if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
01588 }
01589 if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
01590
01591 w = codec->width;
01592 h = codec->height;
01593 avcodec_align_dimensions2(codec, &w, &h, stride);
01594 edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
01595 w += edge << 1;
01596 h += edge << 1;
01597
01598 if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
01599 return -1;
01600
01601 ref->video->w = codec->width;
01602 ref->video->h = codec->height;
01603 for(i = 0; i < 4; i ++) {
01604 unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
01605 unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
01606
01607 if (ref->data[i]) {
01608 ref->data[i] += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
01609 }
01610 pic->data[i] = ref->data[i];
01611 pic->linesize[i] = ref->linesize[i];
01612 }
01613 pic->opaque = ref;
01614 pic->age = INT_MAX;
01615 pic->type = FF_BUFFER_TYPE_USER;
01616 pic->reordered_opaque = codec->reordered_opaque;
01617 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01618 else pic->pkt_pts = AV_NOPTS_VALUE;
01619 return 0;
01620 }
01621
01622 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
01623 {
01624 memset(pic->data, 0, sizeof(pic->data));
01625 avfilter_unref_buffer(pic->opaque);
01626 }
01627
01628 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
01629 {
01630 AVFilterBufferRef *ref = pic->opaque;
01631
01632 if (pic->data[0] == NULL) {
01633 pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
01634 return codec->get_buffer(codec, pic);
01635 }
01636
01637 if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
01638 (codec->pix_fmt != ref->format)) {
01639 av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
01640 return -1;
01641 }
01642
01643 pic->reordered_opaque = codec->reordered_opaque;
01644 if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
01645 else pic->pkt_pts = AV_NOPTS_VALUE;
01646 return 0;
01647 }
01648
01649 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
01650 {
01651 FilterPriv *priv = ctx->priv;
01652 AVCodecContext *codec;
01653 if(!opaque) return -1;
01654
01655 priv->is = opaque;
01656 codec = priv->is->video_st->codec;
01657 codec->opaque = ctx;
01658 if(codec->codec->capabilities & CODEC_CAP_DR1) {
01659 priv->use_dr1 = 1;
01660 codec->get_buffer = input_get_buffer;
01661 codec->release_buffer = input_release_buffer;
01662 codec->reget_buffer = input_reget_buffer;
01663 codec->thread_safe_callbacks = 1;
01664 }
01665
01666 priv->frame = avcodec_alloc_frame();
01667
01668 return 0;
01669 }
01670
01671 static void input_uninit(AVFilterContext *ctx)
01672 {
01673 FilterPriv *priv = ctx->priv;
01674 av_free(priv->frame);
01675 }
01676
01677 static int input_request_frame(AVFilterLink *link)
01678 {
01679 FilterPriv *priv = link->src->priv;
01680 AVFilterBufferRef *picref;
01681 int64_t pts = 0;
01682 AVPacket pkt;
01683 int ret;
01684
01685 while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
01686 av_free_packet(&pkt);
01687 if (ret < 0)
01688 return -1;
01689
01690 if(priv->use_dr1) {
01691 picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
01692 } else {
01693 picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
01694 av_image_copy(picref->data, picref->linesize,
01695 priv->frame->data, priv->frame->linesize,
01696 picref->format, link->w, link->h);
01697 }
01698 av_free_packet(&pkt);
01699
01700 picref->pts = pts;
01701 picref->pos = pkt.pos;
01702 picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
01703 avfilter_start_frame(link, picref);
01704 avfilter_draw_slice(link, 0, link->h, 1);
01705 avfilter_end_frame(link);
01706
01707 return 0;
01708 }
01709
01710 static int input_query_formats(AVFilterContext *ctx)
01711 {
01712 FilterPriv *priv = ctx->priv;
01713 enum PixelFormat pix_fmts[] = {
01714 priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
01715 };
01716
01717 avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
01718 return 0;
01719 }
01720
01721 static int input_config_props(AVFilterLink *link)
01722 {
01723 FilterPriv *priv = link->src->priv;
01724 AVCodecContext *c = priv->is->video_st->codec;
01725
01726 link->w = c->width;
01727 link->h = c->height;
01728 link->time_base = priv->is->video_st->time_base;
01729
01730 return 0;
01731 }
01732
01733 static AVFilter input_filter =
01734 {
01735 .name = "ffplay_input",
01736
01737 .priv_size = sizeof(FilterPriv),
01738
01739 .init = input_init,
01740 .uninit = input_uninit,
01741
01742 .query_formats = input_query_formats,
01743
01744 .inputs = (AVFilterPad[]) {{ .name = NULL }},
01745 .outputs = (AVFilterPad[]) {{ .name = "default",
01746 .type = AVMEDIA_TYPE_VIDEO,
01747 .request_frame = input_request_frame,
01748 .config_props = input_config_props, },
01749 { .name = NULL }},
01750 };
01751
01752 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
01753 {
01754 char sws_flags_str[128];
01755 int ret;
01756 FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
01757 AVFilterContext *filt_src = NULL, *filt_out = NULL;
01758 snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
01759 graph->scale_sws_opts = av_strdup(sws_flags_str);
01760
01761 if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
01762 NULL, is, graph)) < 0)
01763 goto the_end;
01764 if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
01765 NULL, &ffsink_ctx, graph)) < 0)
01766 goto the_end;
01767
01768 if(vfilters) {
01769 AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
01770 AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
01771
01772 outputs->name = av_strdup("in");
01773 outputs->filter_ctx = filt_src;
01774 outputs->pad_idx = 0;
01775 outputs->next = NULL;
01776
01777 inputs->name = av_strdup("out");
01778 inputs->filter_ctx = filt_out;
01779 inputs->pad_idx = 0;
01780 inputs->next = NULL;
01781
01782 if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
01783 goto the_end;
01784 av_freep(&vfilters);
01785 } else {
01786 if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
01787 goto the_end;
01788 }
01789
01790 if ((ret = avfilter_graph_config(graph, NULL)) < 0)
01791 goto the_end;
01792
01793 is->out_video_filter = filt_out;
01794 the_end:
01795 return ret;
01796 }
01797
01798 #endif
01799
01800 static int video_thread(void *arg)
01801 {
01802 VideoState *is = arg;
01803 AVFrame *frame= avcodec_alloc_frame();
01804 int64_t pts_int;
01805 double pts;
01806 int ret;
01807
01808 #if CONFIG_AVFILTER
01809 AVFilterGraph *graph = avfilter_graph_alloc();
01810 AVFilterContext *filt_out = NULL;
01811 int64_t pos;
01812
01813 if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
01814 goto the_end;
01815 filt_out = is->out_video_filter;
01816 #endif
01817
01818 for(;;) {
01819 #if !CONFIG_AVFILTER
01820 AVPacket pkt;
01821 #else
01822 AVFilterBufferRef *picref;
01823 AVRational tb;
01824 #endif
01825 while (is->paused && !is->videoq.abort_request)
01826 SDL_Delay(10);
01827 #if CONFIG_AVFILTER
01828 ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
01829 if (picref) {
01830 pts_int = picref->pts;
01831 pos = picref->pos;
01832 frame->opaque = picref;
01833 }
01834
01835 if (av_cmp_q(tb, is->video_st->time_base)) {
01836 av_unused int64_t pts1 = pts_int;
01837 pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
01838 av_dlog(NULL, "video_thread(): "
01839 "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
01840 tb.num, tb.den, pts1,
01841 is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
01842 }
01843 #else
01844 ret = get_video_frame(is, frame, &pts_int, &pkt);
01845 #endif
01846
01847 if (ret < 0) goto the_end;
01848
01849 if (!ret)
01850 continue;
01851
01852 pts = pts_int*av_q2d(is->video_st->time_base);
01853
01854 #if CONFIG_AVFILTER
01855 ret = output_picture2(is, frame, pts, pos);
01856 #else
01857 ret = output_picture2(is, frame, pts, pkt.pos);
01858 av_free_packet(&pkt);
01859 #endif
01860 if (ret < 0)
01861 goto the_end;
01862
01863 if (step)
01864 if (cur_stream)
01865 stream_pause(cur_stream);
01866 }
01867 the_end:
01868 #if CONFIG_AVFILTER
01869 avfilter_graph_free(&graph);
01870 #endif
01871 av_free(frame);
01872 return 0;
01873 }
01874
01875 static int subtitle_thread(void *arg)
01876 {
01877 VideoState *is = arg;
01878 SubPicture *sp;
01879 AVPacket pkt1, *pkt = &pkt1;
01880 int len1, got_subtitle;
01881 double pts;
01882 int i, j;
01883 int r, g, b, y, u, v, a;
01884
01885 for(;;) {
01886 while (is->paused && !is->subtitleq.abort_request) {
01887 SDL_Delay(10);
01888 }
01889 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
01890 break;
01891
01892 if(pkt->data == flush_pkt.data){
01893 avcodec_flush_buffers(is->subtitle_st->codec);
01894 continue;
01895 }
01896 SDL_LockMutex(is->subpq_mutex);
01897 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
01898 !is->subtitleq.abort_request) {
01899 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
01900 }
01901 SDL_UnlockMutex(is->subpq_mutex);
01902
01903 if (is->subtitleq.abort_request)
01904 goto the_end;
01905
01906 sp = &is->subpq[is->subpq_windex];
01907
01908
01909
01910 pts = 0;
01911 if (pkt->pts != AV_NOPTS_VALUE)
01912 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
01913
01914 len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
01915 &sp->sub, &got_subtitle,
01916 pkt);
01917
01918
01919 if (got_subtitle && sp->sub.format == 0) {
01920 sp->pts = pts;
01921
01922 for (i = 0; i < sp->sub.num_rects; i++)
01923 {
01924 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
01925 {
01926 RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
01927 y = RGB_TO_Y_CCIR(r, g, b);
01928 u = RGB_TO_U_CCIR(r, g, b, 0);
01929 v = RGB_TO_V_CCIR(r, g, b, 0);
01930 YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
01931 }
01932 }
01933
01934
01935 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
01936 is->subpq_windex = 0;
01937 SDL_LockMutex(is->subpq_mutex);
01938 is->subpq_size++;
01939 SDL_UnlockMutex(is->subpq_mutex);
01940 }
01941 av_free_packet(pkt);
01942
01943
01944
01945 }
01946 the_end:
01947 return 0;
01948 }
01949
01950
01951 static void update_sample_display(VideoState *is, short *samples, int samples_size)
01952 {
01953 int size, len, channels;
01954
01955 channels = is->audio_st->codec->channels;
01956
01957 size = samples_size / sizeof(short);
01958 while (size > 0) {
01959 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
01960 if (len > size)
01961 len = size;
01962 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
01963 samples += len;
01964 is->sample_array_index += len;
01965 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
01966 is->sample_array_index = 0;
01967 size -= len;
01968 }
01969 }
01970
01971
01972
01973 static int synchronize_audio(VideoState *is, short *samples,
01974 int samples_size1, double pts)
01975 {
01976 int n, samples_size;
01977 double ref_clock;
01978
01979 n = 2 * is->audio_st->codec->channels;
01980 samples_size = samples_size1;
01981
01982
01983 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
01984 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
01985 double diff, avg_diff;
01986 int wanted_size, min_size, max_size, nb_samples;
01987
01988 ref_clock = get_master_clock(is);
01989 diff = get_audio_clock(is) - ref_clock;
01990
01991 if (diff < AV_NOSYNC_THRESHOLD) {
01992 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
01993 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
01994
01995 is->audio_diff_avg_count++;
01996 } else {
01997
01998 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
01999
02000 if (fabs(avg_diff) >= is->audio_diff_threshold) {
02001 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
02002 nb_samples = samples_size / n;
02003
02004 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
02005 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
02006 if (wanted_size < min_size)
02007 wanted_size = min_size;
02008 else if (wanted_size > max_size)
02009 wanted_size = max_size;
02010
02011
02012 if (wanted_size < samples_size) {
02013
02014 samples_size = wanted_size;
02015 } else if (wanted_size > samples_size) {
02016 uint8_t *samples_end, *q;
02017 int nb;
02018
02019
02020 nb = (samples_size - wanted_size);
02021 samples_end = (uint8_t *)samples + samples_size - n;
02022 q = samples_end + n;
02023 while (nb > 0) {
02024 memcpy(q, samples_end, n);
02025 q += n;
02026 nb -= n;
02027 }
02028 samples_size = wanted_size;
02029 }
02030 }
02031 #if 0
02032 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
02033 diff, avg_diff, samples_size - samples_size1,
02034 is->audio_clock, is->video_clock, is->audio_diff_threshold);
02035 #endif
02036 }
02037 } else {
02038
02039
02040 is->audio_diff_avg_count = 0;
02041 is->audio_diff_cum = 0;
02042 }
02043 }
02044
02045 return samples_size;
02046 }
02047
02048
02049 static int audio_decode_frame(VideoState *is, double *pts_ptr)
02050 {
02051 AVPacket *pkt_temp = &is->audio_pkt_temp;
02052 AVPacket *pkt = &is->audio_pkt;
02053 AVCodecContext *dec= is->audio_st->codec;
02054 int n, len1, data_size;
02055 double pts;
02056
02057 for(;;) {
02058
02059 while (pkt_temp->size > 0) {
02060 data_size = sizeof(is->audio_buf1);
02061 len1 = avcodec_decode_audio3(dec,
02062 (int16_t *)is->audio_buf1, &data_size,
02063 pkt_temp);
02064 if (len1 < 0) {
02065
02066 pkt_temp->size = 0;
02067 break;
02068 }
02069
02070 pkt_temp->data += len1;
02071 pkt_temp->size -= len1;
02072 if (data_size <= 0)
02073 continue;
02074
02075 if (dec->sample_fmt != is->audio_src_fmt) {
02076 if (is->reformat_ctx)
02077 av_audio_convert_free(is->reformat_ctx);
02078 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
02079 dec->sample_fmt, 1, NULL, 0);
02080 if (!is->reformat_ctx) {
02081 fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
02082 av_get_sample_fmt_name(dec->sample_fmt),
02083 av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
02084 break;
02085 }
02086 is->audio_src_fmt= dec->sample_fmt;
02087 }
02088
02089 if (is->reformat_ctx) {
02090 const void *ibuf[6]= {is->audio_buf1};
02091 void *obuf[6]= {is->audio_buf2};
02092 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
02093 int ostride[6]= {2};
02094 int len= data_size/istride[0];
02095 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
02096 printf("av_audio_convert() failed\n");
02097 break;
02098 }
02099 is->audio_buf= is->audio_buf2;
02100
02101
02102 data_size= len*2;
02103 }else{
02104 is->audio_buf= is->audio_buf1;
02105 }
02106
02107
02108 pts = is->audio_clock;
02109 *pts_ptr = pts;
02110 n = 2 * dec->channels;
02111 is->audio_clock += (double)data_size /
02112 (double)(n * dec->sample_rate);
02113 #if defined(DEBUG_SYNC)
02114 {
02115 static double last_clock;
02116 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
02117 is->audio_clock - last_clock,
02118 is->audio_clock, pts);
02119 last_clock = is->audio_clock;
02120 }
02121 #endif
02122 return data_size;
02123 }
02124
02125
02126 if (pkt->data)
02127 av_free_packet(pkt);
02128
02129 if (is->paused || is->audioq.abort_request) {
02130 return -1;
02131 }
02132
02133
02134 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
02135 return -1;
02136 if(pkt->data == flush_pkt.data){
02137 avcodec_flush_buffers(dec);
02138 continue;
02139 }
02140
02141 pkt_temp->data = pkt->data;
02142 pkt_temp->size = pkt->size;
02143
02144
02145 if (pkt->pts != AV_NOPTS_VALUE) {
02146 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
02147 }
02148 }
02149 }
02150
02151
02152
02153 static int audio_write_get_buf_size(VideoState *is)
02154 {
02155 return is->audio_buf_size - is->audio_buf_index;
02156 }
02157
02158
02159
02160 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
02161 {
02162 VideoState *is = opaque;
02163 int audio_size, len1;
02164 double pts;
02165
02166 audio_callback_time = av_gettime();
02167
02168 while (len > 0) {
02169 if (is->audio_buf_index >= is->audio_buf_size) {
02170 audio_size = audio_decode_frame(is, &pts);
02171 if (audio_size < 0) {
02172
02173 is->audio_buf = is->audio_buf1;
02174 is->audio_buf_size = 1024;
02175 memset(is->audio_buf, 0, is->audio_buf_size);
02176 } else {
02177 if (is->show_audio)
02178 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
02179 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
02180 pts);
02181 is->audio_buf_size = audio_size;
02182 }
02183 is->audio_buf_index = 0;
02184 }
02185 len1 = is->audio_buf_size - is->audio_buf_index;
02186 if (len1 > len)
02187 len1 = len;
02188 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
02189 len -= len1;
02190 stream += len1;
02191 is->audio_buf_index += len1;
02192 }
02193 }
02194
02195
02196 static int stream_component_open(VideoState *is, int stream_index)
02197 {
02198 AVFormatContext *ic = is->ic;
02199 AVCodecContext *avctx;
02200 AVCodec *codec;
02201 SDL_AudioSpec wanted_spec, spec;
02202
02203 if (stream_index < 0 || stream_index >= ic->nb_streams)
02204 return -1;
02205 avctx = ic->streams[stream_index]->codec;
02206
02207
02208 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02209 if (avctx->channels > 0) {
02210 avctx->request_channels = FFMIN(2, avctx->channels);
02211 } else {
02212 avctx->request_channels = 2;
02213 }
02214 }
02215
02216 codec = avcodec_find_decoder(avctx->codec_id);
02217 avctx->debug_mv = debug_mv;
02218 avctx->debug = debug;
02219 avctx->workaround_bugs = workaround_bugs;
02220 avctx->lowres = lowres;
02221 if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
02222 avctx->idct_algo= idct;
02223 if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
02224 avctx->skip_frame= skip_frame;
02225 avctx->skip_idct= skip_idct;
02226 avctx->skip_loop_filter= skip_loop_filter;
02227 avctx->error_recognition= error_recognition;
02228 avctx->error_concealment= error_concealment;
02229 avctx->thread_count= thread_count;
02230
02231 set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
02232
02233 if (!codec ||
02234 avcodec_open(avctx, codec) < 0)
02235 return -1;
02236
02237
02238 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
02239 wanted_spec.freq = avctx->sample_rate;
02240 wanted_spec.format = AUDIO_S16SYS;
02241 wanted_spec.channels = avctx->channels;
02242 wanted_spec.silence = 0;
02243 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
02244 wanted_spec.callback = sdl_audio_callback;
02245 wanted_spec.userdata = is;
02246 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
02247 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
02248 return -1;
02249 }
02250 is->audio_hw_buf_size = spec.size;
02251 is->audio_src_fmt= AV_SAMPLE_FMT_S16;
02252 }
02253
02254 ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
02255 switch(avctx->codec_type) {
02256 case AVMEDIA_TYPE_AUDIO:
02257 is->audio_stream = stream_index;
02258 is->audio_st = ic->streams[stream_index];
02259 is->audio_buf_size = 0;
02260 is->audio_buf_index = 0;
02261
02262
02263 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
02264 is->audio_diff_avg_count = 0;
02265
02266
02267 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
02268
02269 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
02270 packet_queue_init(&is->audioq);
02271 SDL_PauseAudio(0);
02272 break;
02273 case AVMEDIA_TYPE_VIDEO:
02274 is->video_stream = stream_index;
02275 is->video_st = ic->streams[stream_index];
02276
02277
02278
02279 packet_queue_init(&is->videoq);
02280 is->video_tid = SDL_CreateThread(video_thread, is);
02281 break;
02282 case AVMEDIA_TYPE_SUBTITLE:
02283 is->subtitle_stream = stream_index;
02284 is->subtitle_st = ic->streams[stream_index];
02285 packet_queue_init(&is->subtitleq);
02286
02287 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
02288 break;
02289 default:
02290 break;
02291 }
02292 return 0;
02293 }
02294
02295 static void stream_component_close(VideoState *is, int stream_index)
02296 {
02297 AVFormatContext *ic = is->ic;
02298 AVCodecContext *avctx;
02299
02300 if (stream_index < 0 || stream_index >= ic->nb_streams)
02301 return;
02302 avctx = ic->streams[stream_index]->codec;
02303
02304 switch(avctx->codec_type) {
02305 case AVMEDIA_TYPE_AUDIO:
02306 packet_queue_abort(&is->audioq);
02307
02308 SDL_CloseAudio();
02309
02310 packet_queue_end(&is->audioq);
02311 if (is->reformat_ctx)
02312 av_audio_convert_free(is->reformat_ctx);
02313 is->reformat_ctx = NULL;
02314 break;
02315 case AVMEDIA_TYPE_VIDEO:
02316 packet_queue_abort(&is->videoq);
02317
02318
02319
02320 SDL_LockMutex(is->pictq_mutex);
02321 SDL_CondSignal(is->pictq_cond);
02322 SDL_UnlockMutex(is->pictq_mutex);
02323
02324 SDL_WaitThread(is->video_tid, NULL);
02325
02326 packet_queue_end(&is->videoq);
02327 break;
02328 case AVMEDIA_TYPE_SUBTITLE:
02329 packet_queue_abort(&is->subtitleq);
02330
02331
02332
02333 SDL_LockMutex(is->subpq_mutex);
02334 is->subtitle_stream_changed = 1;
02335
02336 SDL_CondSignal(is->subpq_cond);
02337 SDL_UnlockMutex(is->subpq_mutex);
02338
02339 SDL_WaitThread(is->subtitle_tid, NULL);
02340
02341 packet_queue_end(&is->subtitleq);
02342 break;
02343 default:
02344 break;
02345 }
02346
02347 ic->streams[stream_index]->discard = AVDISCARD_ALL;
02348 avcodec_close(avctx);
02349 switch(avctx->codec_type) {
02350 case AVMEDIA_TYPE_AUDIO:
02351 is->audio_st = NULL;
02352 is->audio_stream = -1;
02353 break;
02354 case AVMEDIA_TYPE_VIDEO:
02355 is->video_st = NULL;
02356 is->video_stream = -1;
02357 break;
02358 case AVMEDIA_TYPE_SUBTITLE:
02359 is->subtitle_st = NULL;
02360 is->subtitle_stream = -1;
02361 break;
02362 default:
02363 break;
02364 }
02365 }
02366
02367
02368
02369 static VideoState *global_video_state;
02370
02371 static int decode_interrupt_cb(void)
02372 {
02373 return (global_video_state && global_video_state->abort_request);
02374 }
02375
02376
02377 static int decode_thread(void *arg)
02378 {
02379 VideoState *is = arg;
02380 AVFormatContext *ic;
02381 int err, i, ret;
02382 int st_index[AVMEDIA_TYPE_NB];
02383 AVPacket pkt1, *pkt = &pkt1;
02384 AVFormatParameters params, *ap = ¶ms;
02385 int eof=0;
02386 int pkt_in_play_range = 0;
02387
02388 ic = avformat_alloc_context();
02389
02390 memset(st_index, -1, sizeof(st_index));
02391 is->video_stream = -1;
02392 is->audio_stream = -1;
02393 is->subtitle_stream = -1;
02394
02395 global_video_state = is;
02396 url_set_interrupt_cb(decode_interrupt_cb);
02397
02398 memset(ap, 0, sizeof(*ap));
02399
02400 ap->prealloced_context = 1;
02401 ap->width = frame_width;
02402 ap->height= frame_height;
02403 ap->time_base= (AVRational){1, 25};
02404 ap->pix_fmt = frame_pix_fmt;
02405
02406 set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
02407
02408 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
02409 if (err < 0) {
02410 print_error(is->filename, err);
02411 ret = -1;
02412 goto fail;
02413 }
02414 is->ic = ic;
02415
02416 if(genpts)
02417 ic->flags |= AVFMT_FLAG_GENPTS;
02418
02419 err = av_find_stream_info(ic);
02420 if (err < 0) {
02421 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
02422 ret = -1;
02423 goto fail;
02424 }
02425 if(ic->pb)
02426 ic->pb->eof_reached= 0;
02427
02428 if(seek_by_bytes<0)
02429 seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
02430
02431
02432 if (start_time != AV_NOPTS_VALUE) {
02433 int64_t timestamp;
02434
02435 timestamp = start_time;
02436
02437 if (ic->start_time != AV_NOPTS_VALUE)
02438 timestamp += ic->start_time;
02439 ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
02440 if (ret < 0) {
02441 fprintf(stderr, "%s: could not seek to position %0.3f\n",
02442 is->filename, (double)timestamp / AV_TIME_BASE);
02443 }
02444 }
02445
02446 for (i = 0; i < ic->nb_streams; i++)
02447 ic->streams[i]->discard = AVDISCARD_ALL;
02448 if (!video_disable)
02449 st_index[AVMEDIA_TYPE_VIDEO] =
02450 av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
02451 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
02452 if (!audio_disable)
02453 st_index[AVMEDIA_TYPE_AUDIO] =
02454 av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
02455 wanted_stream[AVMEDIA_TYPE_AUDIO],
02456 st_index[AVMEDIA_TYPE_VIDEO],
02457 NULL, 0);
02458 if (!video_disable)
02459 st_index[AVMEDIA_TYPE_SUBTITLE] =
02460 av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
02461 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
02462 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
02463 st_index[AVMEDIA_TYPE_AUDIO] :
02464 st_index[AVMEDIA_TYPE_VIDEO]),
02465 NULL, 0);
02466 if (show_status) {
02467 av_dump_format(ic, 0, is->filename, 0);
02468 }
02469
02470
02471 if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
02472 stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
02473 }
02474
02475 ret=-1;
02476 if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
02477 ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
02478 }
02479 is->refresh_tid = SDL_CreateThread(refresh_thread, is);
02480 if(ret<0) {
02481 if (!display_disable)
02482 is->show_audio = 2;
02483 }
02484
02485 if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
02486 stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
02487 }
02488
02489 if (is->video_stream < 0 && is->audio_stream < 0) {
02490 fprintf(stderr, "%s: could not open codecs\n", is->filename);
02491 ret = -1;
02492 goto fail;
02493 }
02494
02495 for(;;) {
02496 if (is->abort_request)
02497 break;
02498 if (is->paused != is->last_paused) {
02499 is->last_paused = is->paused;
02500 if (is->paused)
02501 is->read_pause_return= av_read_pause(ic);
02502 else
02503 av_read_play(ic);
02504 }
02505 #if CONFIG_RTSP_DEMUXER
02506 if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
02507
02508
02509 SDL_Delay(10);
02510 continue;
02511 }
02512 #endif
02513 if (is->seek_req) {
02514 int64_t seek_target= is->seek_pos;
02515 int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
02516 int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
02517
02518
02519
02520 ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
02521 if (ret < 0) {
02522 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
02523 }else{
02524 if (is->audio_stream >= 0) {
02525 packet_queue_flush(&is->audioq);
02526 packet_queue_put(&is->audioq, &flush_pkt);
02527 }
02528 if (is->subtitle_stream >= 0) {
02529 packet_queue_flush(&is->subtitleq);
02530 packet_queue_put(&is->subtitleq, &flush_pkt);
02531 }
02532 if (is->video_stream >= 0) {
02533 packet_queue_flush(&is->videoq);
02534 packet_queue_put(&is->videoq, &flush_pkt);
02535 }
02536 }
02537 is->seek_req = 0;
02538 eof= 0;
02539 }
02540
02541
02542 if ( is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
02543 || ( (is->audioq .size > MIN_AUDIOQ_SIZE || is->audio_stream<0)
02544 && (is->videoq .nb_packets > MIN_FRAMES || is->video_stream<0)
02545 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
02546
02547 SDL_Delay(10);
02548 continue;
02549 }
02550 if(eof) {
02551 if(is->video_stream >= 0){
02552 av_init_packet(pkt);
02553 pkt->data=NULL;
02554 pkt->size=0;
02555 pkt->stream_index= is->video_stream;
02556 packet_queue_put(&is->videoq, pkt);
02557 }
02558 SDL_Delay(10);
02559 if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
02560 if(loop!=1 && (!loop || --loop)){
02561 stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
02562 }else if(autoexit){
02563 ret=AVERROR_EOF;
02564 goto fail;
02565 }
02566 }
02567 continue;
02568 }
02569 ret = av_read_frame(ic, pkt);
02570 if (ret < 0) {
02571 if (ret == AVERROR_EOF || url_feof(ic->pb))
02572 eof=1;
02573 if (url_ferror(ic->pb))
02574 break;
02575 SDL_Delay(100);
02576 continue;
02577 }
02578
02579 pkt_in_play_range = duration == AV_NOPTS_VALUE ||
02580 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
02581 av_q2d(ic->streams[pkt->stream_index]->time_base) -
02582 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
02583 <= ((double)duration/1000000);
02584 if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
02585 packet_queue_put(&is->audioq, pkt);
02586 } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
02587 packet_queue_put(&is->videoq, pkt);
02588 } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
02589 packet_queue_put(&is->subtitleq, pkt);
02590 } else {
02591 av_free_packet(pkt);
02592 }
02593 }
02594
02595 while (!is->abort_request) {
02596 SDL_Delay(100);
02597 }
02598
02599 ret = 0;
02600 fail:
02601
02602 global_video_state = NULL;
02603
02604
02605 if (is->audio_stream >= 0)
02606 stream_component_close(is, is->audio_stream);
02607 if (is->video_stream >= 0)
02608 stream_component_close(is, is->video_stream);
02609 if (is->subtitle_stream >= 0)
02610 stream_component_close(is, is->subtitle_stream);
02611 if (is->ic) {
02612 av_close_input_file(is->ic);
02613 is->ic = NULL;
02614 }
02615 url_set_interrupt_cb(NULL);
02616
02617 if (ret != 0) {
02618 SDL_Event event;
02619
02620 event.type = FF_QUIT_EVENT;
02621 event.user.data1 = is;
02622 SDL_PushEvent(&event);
02623 }
02624 return 0;
02625 }
02626
02627 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
02628 {
02629 VideoState *is;
02630
02631 is = av_mallocz(sizeof(VideoState));
02632 if (!is)
02633 return NULL;
02634 av_strlcpy(is->filename, filename, sizeof(is->filename));
02635 is->iformat = iformat;
02636 is->ytop = 0;
02637 is->xleft = 0;
02638
02639
02640 is->pictq_mutex = SDL_CreateMutex();
02641 is->pictq_cond = SDL_CreateCond();
02642
02643 is->subpq_mutex = SDL_CreateMutex();
02644 is->subpq_cond = SDL_CreateCond();
02645
02646 is->av_sync_type = av_sync_type;
02647 is->parse_tid = SDL_CreateThread(decode_thread, is);
02648 if (!is->parse_tid) {
02649 av_free(is);
02650 return NULL;
02651 }
02652 return is;
02653 }
02654
02655 static void stream_cycle_channel(VideoState *is, int codec_type)
02656 {
02657 AVFormatContext *ic = is->ic;
02658 int start_index, stream_index;
02659 AVStream *st;
02660
02661 if (codec_type == AVMEDIA_TYPE_VIDEO)
02662 start_index = is->video_stream;
02663 else if (codec_type == AVMEDIA_TYPE_AUDIO)
02664 start_index = is->audio_stream;
02665 else
02666 start_index = is->subtitle_stream;
02667 if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
02668 return;
02669 stream_index = start_index;
02670 for(;;) {
02671 if (++stream_index >= is->ic->nb_streams)
02672 {
02673 if (codec_type == AVMEDIA_TYPE_SUBTITLE)
02674 {
02675 stream_index = -1;
02676 goto the_end;
02677 } else
02678 stream_index = 0;
02679 }
02680 if (stream_index == start_index)
02681 return;
02682 st = ic->streams[stream_index];
02683 if (st->codec->codec_type == codec_type) {
02684
02685 switch(codec_type) {
02686 case AVMEDIA_TYPE_AUDIO:
02687 if (st->codec->sample_rate != 0 &&
02688 st->codec->channels != 0)
02689 goto the_end;
02690 break;
02691 case AVMEDIA_TYPE_VIDEO:
02692 case AVMEDIA_TYPE_SUBTITLE:
02693 goto the_end;
02694 default:
02695 break;
02696 }
02697 }
02698 }
02699 the_end:
02700 stream_component_close(is, start_index);
02701 stream_component_open(is, stream_index);
02702 }
02703
02704
02705 static void toggle_full_screen(void)
02706 {
02707 is_full_screen = !is_full_screen;
02708 if (!fs_screen_width) {
02709
02710
02711 }
02712 video_open(cur_stream);
02713 }
02714
02715 static void toggle_pause(void)
02716 {
02717 if (cur_stream)
02718 stream_pause(cur_stream);
02719 step = 0;
02720 }
02721
02722 static void step_to_next_frame(void)
02723 {
02724 if (cur_stream) {
02725
02726 if (cur_stream->paused)
02727 stream_pause(cur_stream);
02728 }
02729 step = 1;
02730 }
02731
02732 static void toggle_audio_display(void)
02733 {
02734 if (cur_stream) {
02735 int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
02736 cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
02737 fill_rectangle(screen,
02738 cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
02739 bgcolor);
02740 SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
02741 }
02742 }
02743
02744
02745 static void event_loop(void)
02746 {
02747 SDL_Event event;
02748 double incr, pos, frac;
02749
02750 for(;;) {
02751 double x;
02752 SDL_WaitEvent(&event);
02753 switch(event.type) {
02754 case SDL_KEYDOWN:
02755 if (exit_on_keydown) {
02756 do_exit();
02757 break;
02758 }
02759 switch(event.key.keysym.sym) {
02760 case SDLK_ESCAPE:
02761 case SDLK_q:
02762 do_exit();
02763 break;
02764 case SDLK_f:
02765 toggle_full_screen();
02766 break;
02767 case SDLK_p:
02768 case SDLK_SPACE:
02769 toggle_pause();
02770 break;
02771 case SDLK_s:
02772 step_to_next_frame();
02773 break;
02774 case SDLK_a:
02775 if (cur_stream)
02776 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
02777 break;
02778 case SDLK_v:
02779 if (cur_stream)
02780 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
02781 break;
02782 case SDLK_t:
02783 if (cur_stream)
02784 stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
02785 break;
02786 case SDLK_w:
02787 toggle_audio_display();
02788 break;
02789 case SDLK_LEFT:
02790 incr = -10.0;
02791 goto do_seek;
02792 case SDLK_RIGHT:
02793 incr = 10.0;
02794 goto do_seek;
02795 case SDLK_UP:
02796 incr = 60.0;
02797 goto do_seek;
02798 case SDLK_DOWN:
02799 incr = -60.0;
02800 do_seek:
02801 if (cur_stream) {
02802 if (seek_by_bytes) {
02803 if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
02804 pos= cur_stream->video_current_pos;
02805 }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
02806 pos= cur_stream->audio_pkt.pos;
02807 }else
02808 pos = avio_tell(cur_stream->ic->pb);
02809 if (cur_stream->ic->bit_rate)
02810 incr *= cur_stream->ic->bit_rate / 8.0;
02811 else
02812 incr *= 180000.0;
02813 pos += incr;
02814 stream_seek(cur_stream, pos, incr, 1);
02815 } else {
02816 pos = get_master_clock(cur_stream);
02817 pos += incr;
02818 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
02819 }
02820 }
02821 break;
02822 default:
02823 break;
02824 }
02825 break;
02826 case SDL_MOUSEBUTTONDOWN:
02827 if (exit_on_mousedown) {
02828 do_exit();
02829 break;
02830 }
02831 case SDL_MOUSEMOTION:
02832 if(event.type ==SDL_MOUSEBUTTONDOWN){
02833 x= event.button.x;
02834 }else{
02835 if(event.motion.state != SDL_PRESSED)
02836 break;
02837 x= event.motion.x;
02838 }
02839 if (cur_stream) {
02840 if(seek_by_bytes || cur_stream->ic->duration<=0){
02841 uint64_t size= avio_size(cur_stream->ic->pb);
02842 stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
02843 }else{
02844 int64_t ts;
02845 int ns, hh, mm, ss;
02846 int tns, thh, tmm, tss;
02847 tns = cur_stream->ic->duration/1000000LL;
02848 thh = tns/3600;
02849 tmm = (tns%3600)/60;
02850 tss = (tns%60);
02851 frac = x/cur_stream->width;
02852 ns = frac*tns;
02853 hh = ns/3600;
02854 mm = (ns%3600)/60;
02855 ss = (ns%60);
02856 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
02857 hh, mm, ss, thh, tmm, tss);
02858 ts = frac*cur_stream->ic->duration;
02859 if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
02860 ts += cur_stream->ic->start_time;
02861 stream_seek(cur_stream, ts, 0, 0);
02862 }
02863 }
02864 break;
02865 case SDL_VIDEORESIZE:
02866 if (cur_stream) {
02867 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
02868 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
02869 screen_width = cur_stream->width = event.resize.w;
02870 screen_height= cur_stream->height= event.resize.h;
02871 }
02872 break;
02873 case SDL_QUIT:
02874 case FF_QUIT_EVENT:
02875 do_exit();
02876 break;
02877 case FF_ALLOC_EVENT:
02878 video_open(event.user.data1);
02879 alloc_picture(event.user.data1);
02880 break;
02881 case FF_REFRESH_EVENT:
02882 video_refresh_timer(event.user.data1);
02883 cur_stream->refresh=0;
02884 break;
02885 default:
02886 break;
02887 }
02888 }
02889 }
02890
02891 static void opt_frame_size(const char *arg)
02892 {
02893 if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
02894 fprintf(stderr, "Incorrect frame size\n");
02895 exit(1);
02896 }
02897 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
02898 fprintf(stderr, "Frame size must be a multiple of 2\n");
02899 exit(1);
02900 }
02901 }
02902
02903 static int opt_width(const char *opt, const char *arg)
02904 {
02905 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02906 return 0;
02907 }
02908
02909 static int opt_height(const char *opt, const char *arg)
02910 {
02911 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
02912 return 0;
02913 }
02914
02915 static void opt_format(const char *arg)
02916 {
02917 file_iformat = av_find_input_format(arg);
02918 if (!file_iformat) {
02919 fprintf(stderr, "Unknown input format: %s\n", arg);
02920 exit(1);
02921 }
02922 }
02923
02924 static void opt_frame_pix_fmt(const char *arg)
02925 {
02926 frame_pix_fmt = av_get_pix_fmt(arg);
02927 }
02928
02929 static int opt_sync(const char *opt, const char *arg)
02930 {
02931 if (!strcmp(arg, "audio"))
02932 av_sync_type = AV_SYNC_AUDIO_MASTER;
02933 else if (!strcmp(arg, "video"))
02934 av_sync_type = AV_SYNC_VIDEO_MASTER;
02935 else if (!strcmp(arg, "ext"))
02936 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
02937 else {
02938 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
02939 exit(1);
02940 }
02941 return 0;
02942 }
02943
02944 static int opt_seek(const char *opt, const char *arg)
02945 {
02946 start_time = parse_time_or_die(opt, arg, 1);
02947 return 0;
02948 }
02949
02950 static int opt_duration(const char *opt, const char *arg)
02951 {
02952 duration = parse_time_or_die(opt, arg, 1);
02953 return 0;
02954 }
02955
02956 static int opt_debug(const char *opt, const char *arg)
02957 {
02958 av_log_set_level(99);
02959 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02960 return 0;
02961 }
02962
02963 static int opt_vismv(const char *opt, const char *arg)
02964 {
02965 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
02966 return 0;
02967 }
02968
02969 static int opt_thread_count(const char *opt, const char *arg)
02970 {
02971 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
02972 #if !HAVE_THREADS
02973 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
02974 #endif
02975 return 0;
02976 }
02977
02978 static const OptionDef options[] = {
02979 #include "cmdutils_common_opts.h"
02980 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
02981 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
02982 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
02983 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
02984 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
02985 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
02986 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
02987 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
02988 { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
02989 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
02990 { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play \"duration\" seconds of audio/video", "duration" },
02991 { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
02992 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
02993 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
02994 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
02995 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
02996 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
02997 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
02998 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
02999 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
03000 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
03001 { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
03002 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
03003 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
03004 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
03005 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
03006 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
03007 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)", "threshold" },
03008 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
03009 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
03010 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
03011 { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
03012 { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
03013 { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
03014 { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
03015 { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
03016 { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
03017 #if CONFIG_AVFILTER
03018 { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
03019 #endif
03020 { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
03021 { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
03022 { NULL, },
03023 };
03024
03025 static void show_usage(void)
03026 {
03027 printf("Simple media player\n");
03028 printf("usage: ffplay [options] input_file\n");
03029 printf("\n");
03030 }
03031
03032 static void show_help(void)
03033 {
03034 av_log_set_callback(log_callback_help);
03035 show_usage();
03036 show_help_options(options, "Main options:\n",
03037 OPT_EXPERT, 0);
03038 show_help_options(options, "\nAdvanced options:\n",
03039 OPT_EXPERT, OPT_EXPERT);
03040 printf("\n");
03041 av_opt_show2(avcodec_opts[0], NULL,
03042 AV_OPT_FLAG_DECODING_PARAM, 0);
03043 printf("\n");
03044 av_opt_show2(avformat_opts, NULL,
03045 AV_OPT_FLAG_DECODING_PARAM, 0);
03046 #if !CONFIG_AVFILTER
03047 printf("\n");
03048 av_opt_show2(sws_opts, NULL,
03049 AV_OPT_FLAG_ENCODING_PARAM, 0);
03050 #endif
03051 printf("\nWhile playing:\n"
03052 "q, ESC quit\n"
03053 "f toggle full screen\n"
03054 "p, SPC pause\n"
03055 "a cycle audio channel\n"
03056 "v cycle video channel\n"
03057 "t cycle subtitle channel\n"
03058 "w show audio waves\n"
03059 "s activate frame-step mode\n"
03060 "left/right seek backward/forward 10 seconds\n"
03061 "down/up seek backward/forward 1 minute\n"
03062 "mouse click seek to percentage in file corresponding to fraction of width\n"
03063 );
03064 }
03065
03066 static void opt_input_file(const char *filename)
03067 {
03068 if (input_filename) {
03069 fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
03070 filename, input_filename);
03071 exit(1);
03072 }
03073 if (!strcmp(filename, "-"))
03074 filename = "pipe:";
03075 input_filename = filename;
03076 }
03077
03078
03079 int main(int argc, char **argv)
03080 {
03081 int flags;
03082
03083 av_log_set_flags(AV_LOG_SKIP_REPEATED);
03084
03085
03086 avcodec_register_all();
03087 #if CONFIG_AVDEVICE
03088 avdevice_register_all();
03089 #endif
03090 #if CONFIG_AVFILTER
03091 avfilter_register_all();
03092 #endif
03093 av_register_all();
03094
03095 init_opts();
03096
03097 show_banner();
03098
03099 parse_options(argc, argv, options, opt_input_file);
03100
03101 if (!input_filename) {
03102 show_usage();
03103 fprintf(stderr, "An input file must be specified\n");
03104 fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
03105 exit(1);
03106 }
03107
03108 if (display_disable) {
03109 video_disable = 1;
03110 }
03111 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
03112 #if !defined(__MINGW32__) && !defined(__APPLE__)
03113 flags |= SDL_INIT_EVENTTHREAD;
03114 #endif
03115 if (SDL_Init (flags)) {
03116 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
03117 exit(1);
03118 }
03119
03120 if (!display_disable) {
03121 #if HAVE_SDL_VIDEO_SIZE
03122 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
03123 fs_screen_width = vi->current_w;
03124 fs_screen_height = vi->current_h;
03125 #endif
03126 }
03127
03128 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
03129 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
03130 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
03131
03132 av_init_packet(&flush_pkt);
03133 flush_pkt.data= "FLUSH";
03134
03135 cur_stream = stream_open(input_filename, file_iformat);
03136
03137 event_loop();
03138
03139
03140
03141 return 0;
03142 }