00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00030 #include "libavutil/intmath.h"
00031 #include "libavutil/imgutils.h"
00032 #include "avcodec.h"
00033 #include "dsputil.h"
00034 #include "internal.h"
00035 #include "mpegvideo.h"
00036 #include "mpegvideo_common.h"
00037 #include "mjpegenc.h"
00038 #include "msmpeg4.h"
00039 #include "faandct.h"
00040 #include "xvmc_internal.h"
00041 #include <limits.h>
00042
00043
00044
00045
00046 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
00047 DCTELEM *block, int n, int qscale);
00048 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
00049 DCTELEM *block, int n, int qscale);
00050 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
00051 DCTELEM *block, int n, int qscale);
00052 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
00053 DCTELEM *block, int n, int qscale);
00054 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
00055 DCTELEM *block, int n, int qscale);
00056 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
00057 DCTELEM *block, int n, int qscale);
00058 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
00059 DCTELEM *block, int n, int qscale);
00060
00061
00062
00063
00064
00065
00066
00067
00068 static const uint8_t ff_default_chroma_qscale_table[32]={
00069
00070 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
00071 };
00072
00073 const uint8_t ff_mpeg1_dc_scale_table[128]={
00074
00075 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00076 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00077 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00078 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
00079 };
00080
00081 static const uint8_t mpeg2_dc_scale_table1[128]={
00082
00083 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00084 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00085 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00086 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
00087 };
00088
00089 static const uint8_t mpeg2_dc_scale_table2[128]={
00090
00091 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00092 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00093 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00094 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
00095 };
00096
00097 static const uint8_t mpeg2_dc_scale_table3[128]={
00098
00099 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
00103 };
00104
00105 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
00106 ff_mpeg1_dc_scale_table,
00107 mpeg2_dc_scale_table1,
00108 mpeg2_dc_scale_table2,
00109 mpeg2_dc_scale_table3,
00110 };
00111
00112 const enum PixelFormat ff_pixfmt_list_420[] = {
00113 PIX_FMT_YUV420P,
00114 PIX_FMT_NONE
00115 };
00116
00117 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
00118 PIX_FMT_DXVA2_VLD,
00119 PIX_FMT_VAAPI_VLD,
00120 PIX_FMT_YUV420P,
00121 PIX_FMT_NONE
00122 };
00123
00124 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
00125 int i;
00126
00127 assert(p<=end);
00128 if(p>=end)
00129 return end;
00130
00131 for(i=0; i<3; i++){
00132 uint32_t tmp= *state << 8;
00133 *state= tmp + *(p++);
00134 if(tmp == 0x100 || p==end)
00135 return p;
00136 }
00137
00138 while(p<end){
00139 if (p[-1] > 1 ) p+= 3;
00140 else if(p[-2] ) p+= 2;
00141 else if(p[-3]|(p[-1]-1)) p++;
00142 else{
00143 p++;
00144 break;
00145 }
00146 }
00147
00148 p= FFMIN(p, end)-4;
00149 *state= AV_RB32(p);
00150
00151 return p+4;
00152 }
00153
00154
00155 av_cold int ff_dct_common_init(MpegEncContext *s)
00156 {
00157 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
00158 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
00159 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
00160 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
00161 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
00162 if(s->flags & CODEC_FLAG_BITEXACT)
00163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
00164 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
00165
00166 #if HAVE_MMX
00167 MPV_common_init_mmx(s);
00168 #elif ARCH_ALPHA
00169 MPV_common_init_axp(s);
00170 #elif CONFIG_MLIB
00171 MPV_common_init_mlib(s);
00172 #elif HAVE_MMI
00173 MPV_common_init_mmi(s);
00174 #elif ARCH_ARM
00175 MPV_common_init_arm(s);
00176 #elif HAVE_ALTIVEC
00177 MPV_common_init_altivec(s);
00178 #elif ARCH_BFIN
00179 MPV_common_init_bfin(s);
00180 #endif
00181
00182
00183
00184
00185 if(s->alternate_scan){
00186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
00187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
00188 }else{
00189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
00190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
00191 }
00192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
00193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
00194
00195 return 0;
00196 }
00197
00198 void ff_copy_picture(Picture *dst, Picture *src){
00199 *dst = *src;
00200 dst->type= FF_BUFFER_TYPE_COPY;
00201 }
00202
00206 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
00207 {
00208 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
00209 av_freep(&pic->hwaccel_picture_private);
00210 }
00211
00215 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
00216 {
00217 int r;
00218
00219 if (s->avctx->hwaccel) {
00220 assert(!pic->hwaccel_picture_private);
00221 if (s->avctx->hwaccel->priv_data_size) {
00222 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
00223 if (!pic->hwaccel_picture_private) {
00224 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
00225 return -1;
00226 }
00227 }
00228 }
00229
00230 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
00231
00232 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
00233 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
00234 av_freep(&pic->hwaccel_picture_private);
00235 return -1;
00236 }
00237
00238 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
00239 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
00240 free_frame_buffer(s, pic);
00241 return -1;
00242 }
00243
00244 if (pic->linesize[1] != pic->linesize[2]) {
00245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
00246 free_frame_buffer(s, pic);
00247 return -1;
00248 }
00249
00250 return 0;
00251 }
00252
00257 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
00258 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1;
00259 const int mb_array_size= s->mb_stride*s->mb_height;
00260 const int b8_array_size= s->b8_stride*s->mb_height*2;
00261 const int b4_array_size= s->b4_stride*s->mb_height*4;
00262 int i;
00263 int r= -1;
00264
00265 if(shared){
00266 assert(pic->data[0]);
00267 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
00268 pic->type= FF_BUFFER_TYPE_SHARED;
00269 }else{
00270 assert(!pic->data[0]);
00271
00272 if (alloc_frame_buffer(s, pic) < 0)
00273 return -1;
00274
00275 s->linesize = pic->linesize[0];
00276 s->uvlinesize= pic->linesize[1];
00277 }
00278
00279 if(pic->qscale_table==NULL){
00280 if (s->encoding) {
00281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
00282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
00283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
00284 }
00285
00286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail)
00287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
00288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
00289 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
00290 if(s->out_format == FMT_H264){
00291 for(i=0; i<2; i++){
00292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
00293 pic->motion_val[i]= pic->motion_val_base[i]+4;
00294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00295 }
00296 pic->motion_subsample_log2= 2;
00297 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
00298 for(i=0; i<2; i++){
00299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
00300 pic->motion_val[i]= pic->motion_val_base[i]+4;
00301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
00302 }
00303 pic->motion_subsample_log2= 3;
00304 }
00305 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
00306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
00307 }
00308 pic->qstride= s->mb_stride;
00309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
00310 }
00311
00312
00313
00314 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
00315 s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
00316 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
00317 pic->age= INT_MAX;
00318
00319 return 0;
00320 fail:
00321 if(r>=0)
00322 free_frame_buffer(s, pic);
00323 return -1;
00324 }
00325
00329 static void free_picture(MpegEncContext *s, Picture *pic){
00330 int i;
00331
00332 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
00333 free_frame_buffer(s, pic);
00334 }
00335
00336 av_freep(&pic->mb_var);
00337 av_freep(&pic->mc_mb_var);
00338 av_freep(&pic->mb_mean);
00339 av_freep(&pic->mbskip_table);
00340 av_freep(&pic->qscale_table);
00341 av_freep(&pic->mb_type_base);
00342 av_freep(&pic->dct_coeff);
00343 av_freep(&pic->pan_scan);
00344 pic->mb_type= NULL;
00345 for(i=0; i<2; i++){
00346 av_freep(&pic->motion_val_base[i]);
00347 av_freep(&pic->ref_index[i]);
00348 }
00349
00350 if(pic->type == FF_BUFFER_TYPE_SHARED){
00351 for(i=0; i<4; i++){
00352 pic->base[i]=
00353 pic->data[i]= NULL;
00354 }
00355 pic->type= 0;
00356 }
00357 }
00358
00359 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
00360 int y_size = s->b8_stride * (2 * s->mb_height + 1);
00361 int c_size = s->mb_stride * (s->mb_height + 1);
00362 int yc_size = y_size + 2 * c_size;
00363 int i;
00364
00365
00366 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail);
00367 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
00368
00369
00370 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
00371 s->me.temp= s->me.scratchpad;
00372 s->rd_scratchpad= s->me.scratchpad;
00373 s->b_scratchpad= s->me.scratchpad;
00374 s->obmc_scratchpad= s->me.scratchpad + 16;
00375 if (s->encoding) {
00376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
00377 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
00378 if(s->avctx->noise_reduction){
00379 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
00380 }
00381 }
00382 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
00383 s->block= s->blocks[0];
00384
00385 for(i=0;i<12;i++){
00386 s->pblocks[i] = &s->block[i];
00387 }
00388
00389 if (s->out_format == FMT_H263) {
00390
00391 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
00392 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
00393 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
00394 s->ac_val[2] = s->ac_val[1] + c_size;
00395 }
00396
00397 return 0;
00398 fail:
00399 return -1;
00400 }
00401
00402 static void free_duplicate_context(MpegEncContext *s){
00403 if(s==NULL) return;
00404
00405 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
00406 av_freep(&s->me.scratchpad);
00407 s->me.temp=
00408 s->rd_scratchpad=
00409 s->b_scratchpad=
00410 s->obmc_scratchpad= NULL;
00411
00412 av_freep(&s->dct_error_sum);
00413 av_freep(&s->me.map);
00414 av_freep(&s->me.score_map);
00415 av_freep(&s->blocks);
00416 av_freep(&s->ac_val_base);
00417 s->block= NULL;
00418 }
00419
00420 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
00421 #define COPY(a) bak->a= src->a
00422 COPY(allocated_edge_emu_buffer);
00423 COPY(edge_emu_buffer);
00424 COPY(me.scratchpad);
00425 COPY(me.temp);
00426 COPY(rd_scratchpad);
00427 COPY(b_scratchpad);
00428 COPY(obmc_scratchpad);
00429 COPY(me.map);
00430 COPY(me.score_map);
00431 COPY(blocks);
00432 COPY(block);
00433 COPY(start_mb_y);
00434 COPY(end_mb_y);
00435 COPY(me.map_generation);
00436 COPY(pb);
00437 COPY(dct_error_sum);
00438 COPY(dct_count[0]);
00439 COPY(dct_count[1]);
00440 COPY(ac_val_base);
00441 COPY(ac_val[0]);
00442 COPY(ac_val[1]);
00443 COPY(ac_val[2]);
00444 #undef COPY
00445 }
00446
00447 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
00448 MpegEncContext bak;
00449 int i;
00450
00451
00452 backup_duplicate_context(&bak, dst);
00453 memcpy(dst, src, sizeof(MpegEncContext));
00454 backup_duplicate_context(dst, &bak);
00455 for(i=0;i<12;i++){
00456 dst->pblocks[i] = &dst->block[i];
00457 }
00458
00459 }
00460
00465 void MPV_common_defaults(MpegEncContext *s){
00466 s->y_dc_scale_table=
00467 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
00468 s->chroma_qscale_table= ff_default_chroma_qscale_table;
00469 s->progressive_frame= 1;
00470 s->progressive_sequence= 1;
00471 s->picture_structure= PICT_FRAME;
00472
00473 s->coded_picture_number = 0;
00474 s->picture_number = 0;
00475 s->input_picture_number = 0;
00476
00477 s->picture_in_gop_number = 0;
00478
00479 s->f_code = 1;
00480 s->b_code = 1;
00481 }
00482
00487 void MPV_decode_defaults(MpegEncContext *s){
00488 MPV_common_defaults(s);
00489 }
00490
00495 av_cold int MPV_common_init(MpegEncContext *s)
00496 {
00497 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
00498
00499 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
00500 s->mb_height = (s->height + 31) / 32 * 2;
00501 else if (s->codec_id != CODEC_ID_H264)
00502 s->mb_height = (s->height + 15) / 16;
00503
00504 if(s->avctx->pix_fmt == PIX_FMT_NONE){
00505 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
00506 return -1;
00507 }
00508
00509 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
00510 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
00511 return -1;
00512 }
00513
00514 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
00515 return -1;
00516
00517 dsputil_init(&s->dsp, s->avctx);
00518 ff_dct_common_init(s);
00519
00520 s->flags= s->avctx->flags;
00521 s->flags2= s->avctx->flags2;
00522
00523 s->mb_width = (s->width + 15) / 16;
00524 s->mb_stride = s->mb_width + 1;
00525 s->b8_stride = s->mb_width*2 + 1;
00526 s->b4_stride = s->mb_width*4 + 1;
00527 mb_array_size= s->mb_height * s->mb_stride;
00528 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
00529
00530
00531 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
00532 &(s->chroma_y_shift) );
00533
00534
00535 s->h_edge_pos= s->mb_width*16;
00536 s->v_edge_pos= s->mb_height*16;
00537
00538 s->mb_num = s->mb_width * s->mb_height;
00539
00540 s->block_wrap[0]=
00541 s->block_wrap[1]=
00542 s->block_wrap[2]=
00543 s->block_wrap[3]= s->b8_stride;
00544 s->block_wrap[4]=
00545 s->block_wrap[5]= s->mb_stride;
00546
00547 y_size = s->b8_stride * (2 * s->mb_height + 1);
00548 c_size = s->mb_stride * (s->mb_height + 1);
00549 yc_size = y_size + 2 * c_size;
00550
00551
00552 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
00553
00554 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
00555
00556 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
00557
00558 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail)
00559 for(y=0; y<s->mb_height; y++){
00560 for(x=0; x<s->mb_width; x++){
00561 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
00562 }
00563 }
00564 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width;
00565
00566 if (s->encoding) {
00567
00568 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00569 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00570 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00571 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00572 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00573 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
00574 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
00575 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
00576 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
00577 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
00578 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
00579 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
00580
00581 if(s->msmpeg4_version){
00582 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
00583 }
00584 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
00585
00586
00587 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail)
00588
00589 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
00590
00591 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
00592 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
00593 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00594 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
00595 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00596 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
00597
00598 if(s->avctx->noise_reduction){
00599 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
00600 }
00601 }
00602 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
00603 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
00604 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
00605 }
00606
00607 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
00608
00609 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
00610
00611 for(i=0; i<2; i++){
00612 int j, k;
00613 for(j=0; j<2; j++){
00614 for(k=0; k<2; k++){
00615 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
00616 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
00617 }
00618 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
00619 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
00620 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
00621 }
00622 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
00623 }
00624 }
00625 if (s->out_format == FMT_H263) {
00626
00627 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
00628 s->coded_block= s->coded_block_base + s->b8_stride + 1;
00629
00630
00631 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
00632 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
00633 }
00634
00635 if (s->h263_pred || s->h263_plus || !s->encoding) {
00636
00637
00638 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
00639 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
00640 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
00641 s->dc_val[2] = s->dc_val[1] + c_size;
00642 for(i=0;i<yc_size;i++)
00643 s->dc_val_base[i] = 1024;
00644 }
00645
00646
00647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
00648 memset(s->mbintra_table, 1, mb_array_size);
00649
00650
00651 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
00652
00653 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
00654
00655 s->parse_context.state= -1;
00656 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
00657 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00658 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00659 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
00660 }
00661
00662 s->context_initialized = 1;
00663
00664 s->thread_context[0]= s;
00665 threads = s->avctx->thread_count;
00666
00667 for(i=1; i<threads; i++){
00668 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
00669 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
00670 }
00671
00672 for(i=0; i<threads; i++){
00673 if(init_duplicate_context(s->thread_context[i], s) < 0)
00674 goto fail;
00675 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
00676 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
00677 }
00678
00679 return 0;
00680 fail:
00681 MPV_common_end(s);
00682 return -1;
00683 }
00684
00685
00686 void MPV_common_end(MpegEncContext *s)
00687 {
00688 int i, j, k;
00689
00690 for(i=0; i<s->avctx->thread_count; i++){
00691 free_duplicate_context(s->thread_context[i]);
00692 }
00693 for(i=1; i<s->avctx->thread_count; i++){
00694 av_freep(&s->thread_context[i]);
00695 }
00696
00697 av_freep(&s->parse_context.buffer);
00698 s->parse_context.buffer_size=0;
00699
00700 av_freep(&s->mb_type);
00701 av_freep(&s->p_mv_table_base);
00702 av_freep(&s->b_forw_mv_table_base);
00703 av_freep(&s->b_back_mv_table_base);
00704 av_freep(&s->b_bidir_forw_mv_table_base);
00705 av_freep(&s->b_bidir_back_mv_table_base);
00706 av_freep(&s->b_direct_mv_table_base);
00707 s->p_mv_table= NULL;
00708 s->b_forw_mv_table= NULL;
00709 s->b_back_mv_table= NULL;
00710 s->b_bidir_forw_mv_table= NULL;
00711 s->b_bidir_back_mv_table= NULL;
00712 s->b_direct_mv_table= NULL;
00713 for(i=0; i<2; i++){
00714 for(j=0; j<2; j++){
00715 for(k=0; k<2; k++){
00716 av_freep(&s->b_field_mv_table_base[i][j][k]);
00717 s->b_field_mv_table[i][j][k]=NULL;
00718 }
00719 av_freep(&s->b_field_select_table[i][j]);
00720 av_freep(&s->p_field_mv_table_base[i][j]);
00721 s->p_field_mv_table[i][j]=NULL;
00722 }
00723 av_freep(&s->p_field_select_table[i]);
00724 }
00725
00726 av_freep(&s->dc_val_base);
00727 av_freep(&s->coded_block_base);
00728 av_freep(&s->mbintra_table);
00729 av_freep(&s->cbp_table);
00730 av_freep(&s->pred_dir_table);
00731
00732 av_freep(&s->mbskip_table);
00733 av_freep(&s->prev_pict_types);
00734 av_freep(&s->bitstream_buffer);
00735 s->allocated_bitstream_buffer_size=0;
00736
00737 av_freep(&s->avctx->stats_out);
00738 av_freep(&s->ac_stats);
00739 av_freep(&s->error_status_table);
00740 av_freep(&s->mb_index2xy);
00741 av_freep(&s->lambda_table);
00742 av_freep(&s->q_intra_matrix);
00743 av_freep(&s->q_inter_matrix);
00744 av_freep(&s->q_intra_matrix16);
00745 av_freep(&s->q_inter_matrix16);
00746 av_freep(&s->input_picture);
00747 av_freep(&s->reordered_input_picture);
00748 av_freep(&s->dct_offset);
00749
00750 if(s->picture){
00751 for(i=0; i<MAX_PICTURE_COUNT; i++){
00752 free_picture(s, &s->picture[i]);
00753 }
00754 }
00755 av_freep(&s->picture);
00756 s->context_initialized = 0;
00757 s->last_picture_ptr=
00758 s->next_picture_ptr=
00759 s->current_picture_ptr= NULL;
00760 s->linesize= s->uvlinesize= 0;
00761
00762 for(i=0; i<3; i++)
00763 av_freep(&s->visualization_buffer[i]);
00764
00765 avcodec_default_free_buffers(s->avctx);
00766 }
00767
00768 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
00769 {
00770 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
00771 uint8_t index_run[MAX_RUN+1];
00772 int last, run, level, start, end, i;
00773
00774
00775 if(static_store && rl->max_level[0])
00776 return;
00777
00778
00779 for(last=0;last<2;last++) {
00780 if (last == 0) {
00781 start = 0;
00782 end = rl->last;
00783 } else {
00784 start = rl->last;
00785 end = rl->n;
00786 }
00787
00788 memset(max_level, 0, MAX_RUN + 1);
00789 memset(max_run, 0, MAX_LEVEL + 1);
00790 memset(index_run, rl->n, MAX_RUN + 1);
00791 for(i=start;i<end;i++) {
00792 run = rl->table_run[i];
00793 level = rl->table_level[i];
00794 if (index_run[run] == rl->n)
00795 index_run[run] = i;
00796 if (level > max_level[run])
00797 max_level[run] = level;
00798 if (run > max_run[level])
00799 max_run[level] = run;
00800 }
00801 if(static_store)
00802 rl->max_level[last] = static_store[last];
00803 else
00804 rl->max_level[last] = av_malloc(MAX_RUN + 1);
00805 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
00806 if(static_store)
00807 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
00808 else
00809 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
00810 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
00811 if(static_store)
00812 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
00813 else
00814 rl->index_run[last] = av_malloc(MAX_RUN + 1);
00815 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
00816 }
00817 }
00818
00819 void init_vlc_rl(RLTable *rl)
00820 {
00821 int i, q;
00822
00823 for(q=0; q<32; q++){
00824 int qmul= q*2;
00825 int qadd= (q-1)|1;
00826
00827 if(q==0){
00828 qmul=1;
00829 qadd=0;
00830 }
00831 for(i=0; i<rl->vlc.table_size; i++){
00832 int code= rl->vlc.table[i][0];
00833 int len = rl->vlc.table[i][1];
00834 int level, run;
00835
00836 if(len==0){
00837 run= 66;
00838 level= MAX_LEVEL;
00839 }else if(len<0){
00840 run= 0;
00841 level= code;
00842 }else{
00843 if(code==rl->n){
00844 run= 66;
00845 level= 0;
00846 }else{
00847 run= rl->table_run [code] + 1;
00848 level= rl->table_level[code] * qmul + qadd;
00849 if(code >= rl->last) run+=192;
00850 }
00851 }
00852 rl->rl_vlc[q][i].len= len;
00853 rl->rl_vlc[q][i].level= level;
00854 rl->rl_vlc[q][i].run= run;
00855 }
00856 }
00857 }
00858
00859 int ff_find_unused_picture(MpegEncContext *s, int shared){
00860 int i;
00861
00862 if(shared){
00863 for(i=0; i<MAX_PICTURE_COUNT; i++){
00864 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
00865 }
00866 }else{
00867 for(i=0; i<MAX_PICTURE_COUNT; i++){
00868 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i;
00869 }
00870 for(i=0; i<MAX_PICTURE_COUNT; i++){
00871 if(s->picture[i].data[0]==NULL) return i;
00872 }
00873 }
00874
00875 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
00876
00877
00878
00879
00880
00881
00882
00883
00884
00885
00886
00887 abort();
00888 return -1;
00889 }
00890
00891 static void update_noise_reduction(MpegEncContext *s){
00892 int intra, i;
00893
00894 for(intra=0; intra<2; intra++){
00895 if(s->dct_count[intra] > (1<<16)){
00896 for(i=0; i<64; i++){
00897 s->dct_error_sum[intra][i] >>=1;
00898 }
00899 s->dct_count[intra] >>= 1;
00900 }
00901
00902 for(i=0; i<64; i++){
00903 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
00904 }
00905 }
00906 }
00907
00911 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
00912 {
00913 int i;
00914 Picture *pic;
00915 s->mb_skipped = 0;
00916
00917 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
00918
00919
00920 if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
00921 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
00922 free_frame_buffer(s, s->last_picture_ptr);
00923
00924
00925
00926 if(!s->encoding){
00927 for(i=0; i<MAX_PICTURE_COUNT; i++){
00928 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
00929 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
00930 free_frame_buffer(s, &s->picture[i]);
00931 }
00932 }
00933 }
00934 }
00935 }
00936
00937 if(!s->encoding){
00938
00939 for(i=0; i<MAX_PICTURE_COUNT; i++){
00940 if(s->picture[i].data[0] && !s->picture[i].reference ){
00941 free_frame_buffer(s, &s->picture[i]);
00942 }
00943 }
00944
00945 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
00946 pic= s->current_picture_ptr;
00947 else{
00948 i= ff_find_unused_picture(s, 0);
00949 pic= &s->picture[i];
00950 }
00951
00952 pic->reference= 0;
00953 if (!s->dropable){
00954 if (s->codec_id == CODEC_ID_H264)
00955 pic->reference = s->picture_structure;
00956 else if (s->pict_type != FF_B_TYPE)
00957 pic->reference = 3;
00958 }
00959
00960 pic->coded_picture_number= s->coded_picture_number++;
00961
00962 if(ff_alloc_picture(s, pic, 0) < 0)
00963 return -1;
00964
00965 s->current_picture_ptr= pic;
00966
00967 s->current_picture_ptr->top_field_first= s->top_field_first;
00968 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
00969 if(s->picture_structure != PICT_FRAME)
00970 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
00971 }
00972 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
00973 }
00974
00975 s->current_picture_ptr->pict_type= s->pict_type;
00976
00977
00978 s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
00979
00980 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
00981
00982 if (s->pict_type != FF_B_TYPE) {
00983 s->last_picture_ptr= s->next_picture_ptr;
00984 if(!s->dropable)
00985 s->next_picture_ptr= s->current_picture_ptr;
00986 }
00987
00988
00989
00990
00991
00992
00993 if(s->codec_id != CODEC_ID_H264){
00994 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
00995 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
00996
00997 i= ff_find_unused_picture(s, 0);
00998 s->last_picture_ptr= &s->picture[i];
00999 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
01000 return -1;
01001 }
01002 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
01003
01004 i= ff_find_unused_picture(s, 0);
01005 s->next_picture_ptr= &s->picture[i];
01006 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
01007 return -1;
01008 }
01009 }
01010
01011 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
01012 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
01013
01014 assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
01015
01016 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
01017 int i;
01018 for(i=0; i<4; i++){
01019 if(s->picture_structure == PICT_BOTTOM_FIELD){
01020 s->current_picture.data[i] += s->current_picture.linesize[i];
01021 }
01022 s->current_picture.linesize[i] *= 2;
01023 s->last_picture.linesize[i] *=2;
01024 s->next_picture.linesize[i] *=2;
01025 }
01026 }
01027
01028 s->hurry_up= s->avctx->hurry_up;
01029 s->error_recognition= avctx->error_recognition;
01030
01031
01032
01033 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
01034 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
01035 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
01036 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
01037 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
01038 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
01039 }else{
01040 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
01041 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
01042 }
01043
01044 if(s->dct_error_sum){
01045 assert(s->avctx->noise_reduction && s->encoding);
01046
01047 update_noise_reduction(s);
01048 }
01049
01050 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
01051 return ff_xvmc_field_start(s, avctx);
01052
01053 return 0;
01054 }
01055
01056
01057 void MPV_frame_end(MpegEncContext *s)
01058 {
01059 int i;
01060
01061
01062 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01063 ff_xvmc_field_end(s);
01064 }else if(!s->avctx->hwaccel
01065 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
01066 && s->unrestricted_mv
01067 && s->current_picture.reference
01068 && !s->intra_only
01069 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
01070 s->dsp.draw_edges(s->current_picture.data[0], s->linesize , s->h_edge_pos , s->v_edge_pos , EDGE_WIDTH );
01071 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01072 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
01073 }
01074 emms_c();
01075
01076 s->last_pict_type = s->pict_type;
01077 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
01078 if(s->pict_type!=FF_B_TYPE){
01079 s->last_non_b_pict_type= s->pict_type;
01080 }
01081 #if 0
01082
01083 for(i=0; i<MAX_PICTURE_COUNT; i++){
01084 if(s->picture[i].data[0] == s->current_picture.data[0]){
01085 s->picture[i]= s->current_picture;
01086 break;
01087 }
01088 }
01089 assert(i<MAX_PICTURE_COUNT);
01090 #endif
01091
01092 if(s->encoding){
01093
01094 for(i=0; i<MAX_PICTURE_COUNT; i++){
01095 if(s->picture[i].data[0] && !s->picture[i].reference ){
01096 free_frame_buffer(s, &s->picture[i]);
01097 }
01098 }
01099 }
01100
01101 #if 0
01102 memset(&s->last_picture, 0, sizeof(Picture));
01103 memset(&s->next_picture, 0, sizeof(Picture));
01104 memset(&s->current_picture, 0, sizeof(Picture));
01105 #endif
01106 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
01107 }
01108
01116 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01117 int x, y, fr, f;
01118
01119 sx= av_clip(sx, 0, w-1);
01120 sy= av_clip(sy, 0, h-1);
01121 ex= av_clip(ex, 0, w-1);
01122 ey= av_clip(ey, 0, h-1);
01123
01124 buf[sy*stride + sx]+= color;
01125
01126 if(FFABS(ex - sx) > FFABS(ey - sy)){
01127 if(sx > ex){
01128 FFSWAP(int, sx, ex);
01129 FFSWAP(int, sy, ey);
01130 }
01131 buf+= sx + sy*stride;
01132 ex-= sx;
01133 f= ((ey-sy)<<16)/ex;
01134 for(x= 0; x <= ex; x++){
01135 y = (x*f)>>16;
01136 fr= (x*f)&0xFFFF;
01137 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
01138 buf[(y+1)*stride + x]+= (color* fr )>>16;
01139 }
01140 }else{
01141 if(sy > ey){
01142 FFSWAP(int, sx, ex);
01143 FFSWAP(int, sy, ey);
01144 }
01145 buf+= sx + sy*stride;
01146 ey-= sy;
01147 if(ey) f= ((ex-sx)<<16)/ey;
01148 else f= 0;
01149 for(y= 0; y <= ey; y++){
01150 x = (y*f)>>16;
01151 fr= (y*f)&0xFFFF;
01152 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
01153 buf[y*stride + x+1]+= (color* fr )>>16;
01154 }
01155 }
01156 }
01157
01165 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
01166 int dx,dy;
01167
01168 sx= av_clip(sx, -100, w+100);
01169 sy= av_clip(sy, -100, h+100);
01170 ex= av_clip(ex, -100, w+100);
01171 ey= av_clip(ey, -100, h+100);
01172
01173 dx= ex - sx;
01174 dy= ey - sy;
01175
01176 if(dx*dx + dy*dy > 3*3){
01177 int rx= dx + dy;
01178 int ry= -dx + dy;
01179 int length= ff_sqrt((rx*rx + ry*ry)<<8);
01180
01181
01182 rx= ROUNDED_DIV(rx*3<<4, length);
01183 ry= ROUNDED_DIV(ry*3<<4, length);
01184
01185 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
01186 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
01187 }
01188 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
01189 }
01190
01194 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
01195
01196 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
01197
01198 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
01199 int x,y;
01200
01201 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
01202 switch (pict->pict_type) {
01203 case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
01204 case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
01205 case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
01206 case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
01207 case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
01208 case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
01209 }
01210 for(y=0; y<s->mb_height; y++){
01211 for(x=0; x<s->mb_width; x++){
01212 if(s->avctx->debug&FF_DEBUG_SKIP){
01213 int count= s->mbskip_table[x + y*s->mb_stride];
01214 if(count>9) count=9;
01215 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
01216 }
01217 if(s->avctx->debug&FF_DEBUG_QP){
01218 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
01219 }
01220 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
01221 int mb_type= pict->mb_type[x + y*s->mb_stride];
01222
01223 if(IS_PCM(mb_type))
01224 av_log(s->avctx, AV_LOG_DEBUG, "P");
01225 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
01226 av_log(s->avctx, AV_LOG_DEBUG, "A");
01227 else if(IS_INTRA4x4(mb_type))
01228 av_log(s->avctx, AV_LOG_DEBUG, "i");
01229 else if(IS_INTRA16x16(mb_type))
01230 av_log(s->avctx, AV_LOG_DEBUG, "I");
01231 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
01232 av_log(s->avctx, AV_LOG_DEBUG, "d");
01233 else if(IS_DIRECT(mb_type))
01234 av_log(s->avctx, AV_LOG_DEBUG, "D");
01235 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
01236 av_log(s->avctx, AV_LOG_DEBUG, "g");
01237 else if(IS_GMC(mb_type))
01238 av_log(s->avctx, AV_LOG_DEBUG, "G");
01239 else if(IS_SKIP(mb_type))
01240 av_log(s->avctx, AV_LOG_DEBUG, "S");
01241 else if(!USES_LIST(mb_type, 1))
01242 av_log(s->avctx, AV_LOG_DEBUG, ">");
01243 else if(!USES_LIST(mb_type, 0))
01244 av_log(s->avctx, AV_LOG_DEBUG, "<");
01245 else{
01246 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01247 av_log(s->avctx, AV_LOG_DEBUG, "X");
01248 }
01249
01250
01251 if(IS_8X8(mb_type))
01252 av_log(s->avctx, AV_LOG_DEBUG, "+");
01253 else if(IS_16X8(mb_type))
01254 av_log(s->avctx, AV_LOG_DEBUG, "-");
01255 else if(IS_8X16(mb_type))
01256 av_log(s->avctx, AV_LOG_DEBUG, "|");
01257 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
01258 av_log(s->avctx, AV_LOG_DEBUG, " ");
01259 else
01260 av_log(s->avctx, AV_LOG_DEBUG, "?");
01261
01262
01263 if(IS_INTERLACED(mb_type))
01264 av_log(s->avctx, AV_LOG_DEBUG, "=");
01265 else
01266 av_log(s->avctx, AV_LOG_DEBUG, " ");
01267 }
01268
01269 }
01270 av_log(s->avctx, AV_LOG_DEBUG, "\n");
01271 }
01272 }
01273
01274 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
01275 const int shift= 1 + s->quarter_sample;
01276 int mb_y;
01277 uint8_t *ptr;
01278 int i;
01279 int h_chroma_shift, v_chroma_shift, block_height;
01280 const int width = s->avctx->width;
01281 const int height= s->avctx->height;
01282 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
01283 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
01284 s->low_delay=0;
01285
01286 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
01287 for(i=0; i<3; i++){
01288 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
01289 pict->data[i]= s->visualization_buffer[i];
01290 }
01291 pict->type= FF_BUFFER_TYPE_COPY;
01292 ptr= pict->data[0];
01293 block_height = 16>>v_chroma_shift;
01294
01295 for(mb_y=0; mb_y<s->mb_height; mb_y++){
01296 int mb_x;
01297 for(mb_x=0; mb_x<s->mb_width; mb_x++){
01298 const int mb_index= mb_x + mb_y*s->mb_stride;
01299 if((s->avctx->debug_mv) && pict->motion_val){
01300 int type;
01301 for(type=0; type<3; type++){
01302 int direction = 0;
01303 switch (type) {
01304 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
01305 continue;
01306 direction = 0;
01307 break;
01308 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
01309 continue;
01310 direction = 0;
01311 break;
01312 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
01313 continue;
01314 direction = 1;
01315 break;
01316 }
01317 if(!USES_LIST(pict->mb_type[mb_index], direction))
01318 continue;
01319
01320 if(IS_8X8(pict->mb_type[mb_index])){
01321 int i;
01322 for(i=0; i<4; i++){
01323 int sx= mb_x*16 + 4 + 8*(i&1);
01324 int sy= mb_y*16 + 4 + 8*(i>>1);
01325 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01326 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01327 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01328 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01329 }
01330 }else if(IS_16X8(pict->mb_type[mb_index])){
01331 int i;
01332 for(i=0; i<2; i++){
01333 int sx=mb_x*16 + 8;
01334 int sy=mb_y*16 + 4 + 8*i;
01335 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
01336 int mx=(pict->motion_val[direction][xy][0]>>shift);
01337 int my=(pict->motion_val[direction][xy][1]>>shift);
01338
01339 if(IS_INTERLACED(pict->mb_type[mb_index]))
01340 my*=2;
01341
01342 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01343 }
01344 }else if(IS_8X16(pict->mb_type[mb_index])){
01345 int i;
01346 for(i=0; i<2; i++){
01347 int sx=mb_x*16 + 4 + 8*i;
01348 int sy=mb_y*16 + 8;
01349 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
01350 int mx=(pict->motion_val[direction][xy][0]>>shift);
01351 int my=(pict->motion_val[direction][xy][1]>>shift);
01352
01353 if(IS_INTERLACED(pict->mb_type[mb_index]))
01354 my*=2;
01355
01356 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
01357 }
01358 }else{
01359 int sx= mb_x*16 + 8;
01360 int sy= mb_y*16 + 8;
01361 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
01362 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
01363 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
01364 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
01365 }
01366 }
01367 }
01368 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
01369 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
01370 int y;
01371 for(y=0; y<block_height; y++){
01372 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
01373 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
01374 }
01375 }
01376 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
01377 int mb_type= pict->mb_type[mb_index];
01378 uint64_t u,v;
01379 int y;
01380 #define COLOR(theta, r)\
01381 u= (int)(128 + r*cos(theta*3.141592/180));\
01382 v= (int)(128 + r*sin(theta*3.141592/180));
01383
01384
01385 u=v=128;
01386 if(IS_PCM(mb_type)){
01387 COLOR(120,48)
01388 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
01389 COLOR(30,48)
01390 }else if(IS_INTRA4x4(mb_type)){
01391 COLOR(90,48)
01392 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
01393
01394 }else if(IS_DIRECT(mb_type)){
01395 COLOR(150,48)
01396 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
01397 COLOR(170,48)
01398 }else if(IS_GMC(mb_type)){
01399 COLOR(190,48)
01400 }else if(IS_SKIP(mb_type)){
01401
01402 }else if(!USES_LIST(mb_type, 1)){
01403 COLOR(240,48)
01404 }else if(!USES_LIST(mb_type, 0)){
01405 COLOR(0,48)
01406 }else{
01407 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
01408 COLOR(300,48)
01409 }
01410
01411 u*= 0x0101010101010101ULL;
01412 v*= 0x0101010101010101ULL;
01413 for(y=0; y<block_height; y++){
01414 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
01415 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
01416 }
01417
01418
01419 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
01420 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01421 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
01422 }
01423 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
01424 for(y=0; y<16; y++)
01425 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
01426 }
01427 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
01428 int dm= 1 << (mv_sample_log2-2);
01429 for(i=0; i<4; i++){
01430 int sx= mb_x*16 + 8*(i&1);
01431 int sy= mb_y*16 + 8*(i>>1);
01432 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
01433
01434 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
01435 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
01436 for(y=0; y<8; y++)
01437 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
01438 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
01439 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
01440 }
01441 }
01442
01443 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
01444
01445 }
01446 }
01447 s->mbskip_table[mb_index]=0;
01448 }
01449 }
01450 }
01451 }
01452
01453 static inline int hpel_motion_lowres(MpegEncContext *s,
01454 uint8_t *dest, uint8_t *src,
01455 int field_based, int field_select,
01456 int src_x, int src_y,
01457 int width, int height, int stride,
01458 int h_edge_pos, int v_edge_pos,
01459 int w, int h, h264_chroma_mc_func *pix_op,
01460 int motion_x, int motion_y)
01461 {
01462 const int lowres= s->avctx->lowres;
01463 const int op_index= FFMIN(lowres, 2);
01464 const int s_mask= (2<<lowres)-1;
01465 int emu=0;
01466 int sx, sy;
01467
01468 if(s->quarter_sample){
01469 motion_x/=2;
01470 motion_y/=2;
01471 }
01472
01473 sx= motion_x & s_mask;
01474 sy= motion_y & s_mask;
01475 src_x += motion_x >> (lowres+1);
01476 src_y += motion_y >> (lowres+1);
01477
01478 src += src_y * stride + src_x;
01479
01480 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
01481 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01482 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
01483 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01484 src= s->edge_emu_buffer;
01485 emu=1;
01486 }
01487
01488 sx= (sx << 2) >> lowres;
01489 sy= (sy << 2) >> lowres;
01490 if(field_select)
01491 src += s->linesize;
01492 pix_op[op_index](dest, src, stride, h, sx, sy);
01493 return emu;
01494 }
01495
01496
01497 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
01498 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01499 int field_based, int bottom_field, int field_select,
01500 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
01501 int motion_x, int motion_y, int h, int mb_y)
01502 {
01503 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
01504 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
01505 const int lowres= s->avctx->lowres;
01506 const int op_index= FFMIN(lowres, 2);
01507 const int block_s= 8>>lowres;
01508 const int s_mask= (2<<lowres)-1;
01509 const int h_edge_pos = s->h_edge_pos >> lowres;
01510 const int v_edge_pos = s->v_edge_pos >> lowres;
01511 linesize = s->current_picture.linesize[0] << field_based;
01512 uvlinesize = s->current_picture.linesize[1] << field_based;
01513
01514 if(s->quarter_sample){
01515 motion_x/=2;
01516 motion_y/=2;
01517 }
01518
01519 if(field_based){
01520 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
01521 }
01522
01523 sx= motion_x & s_mask;
01524 sy= motion_y & s_mask;
01525 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
01526 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
01527
01528 if (s->out_format == FMT_H263) {
01529 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
01530 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
01531 uvsrc_x = src_x>>1;
01532 uvsrc_y = src_y>>1;
01533 }else if(s->out_format == FMT_H261){
01534 mx = motion_x / 4;
01535 my = motion_y / 4;
01536 uvsx = (2*mx) & s_mask;
01537 uvsy = (2*my) & s_mask;
01538 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
01539 uvsrc_y = mb_y*block_s + (my >> lowres);
01540 } else {
01541 mx = motion_x / 2;
01542 my = motion_y / 2;
01543 uvsx = mx & s_mask;
01544 uvsy = my & s_mask;
01545 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
01546 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
01547 }
01548
01549 ptr_y = ref_picture[0] + src_y * linesize + src_x;
01550 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
01551 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
01552
01553 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
01554 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
01555 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
01556 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
01557 ptr_y = s->edge_emu_buffer;
01558 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01559 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
01560 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
01561 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01562 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
01563 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
01564 ptr_cb= uvbuf;
01565 ptr_cr= uvbuf+16;
01566 }
01567 }
01568
01569 if(bottom_field){
01570 dest_y += s->linesize;
01571 dest_cb+= s->uvlinesize;
01572 dest_cr+= s->uvlinesize;
01573 }
01574
01575 if(field_select){
01576 ptr_y += s->linesize;
01577 ptr_cb+= s->uvlinesize;
01578 ptr_cr+= s->uvlinesize;
01579 }
01580
01581 sx= (sx << 2) >> lowres;
01582 sy= (sy << 2) >> lowres;
01583 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
01584
01585 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01586 uvsx= (uvsx << 2) >> lowres;
01587 uvsy= (uvsy << 2) >> lowres;
01588 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01589 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
01590 }
01591
01592 }
01593
01594 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
01595 uint8_t *dest_cb, uint8_t *dest_cr,
01596 uint8_t **ref_picture,
01597 h264_chroma_mc_func *pix_op,
01598 int mx, int my){
01599 const int lowres= s->avctx->lowres;
01600 const int op_index= FFMIN(lowres, 2);
01601 const int block_s= 8>>lowres;
01602 const int s_mask= (2<<lowres)-1;
01603 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
01604 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
01605 int emu=0, src_x, src_y, offset, sx, sy;
01606 uint8_t *ptr;
01607
01608 if(s->quarter_sample){
01609 mx/=2;
01610 my/=2;
01611 }
01612
01613
01614
01615 mx= ff_h263_round_chroma(mx);
01616 my= ff_h263_round_chroma(my);
01617
01618 sx= mx & s_mask;
01619 sy= my & s_mask;
01620 src_x = s->mb_x*block_s + (mx >> (lowres+1));
01621 src_y = s->mb_y*block_s + (my >> (lowres+1));
01622
01623 offset = src_y * s->uvlinesize + src_x;
01624 ptr = ref_picture[1] + offset;
01625 if(s->flags&CODEC_FLAG_EMU_EDGE){
01626 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
01627 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
01628 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01629 ptr= s->edge_emu_buffer;
01630 emu=1;
01631 }
01632 }
01633 sx= (sx << 2) >> lowres;
01634 sy= (sy << 2) >> lowres;
01635 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
01636
01637 ptr = ref_picture[2] + offset;
01638 if(emu){
01639 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
01640 ptr= s->edge_emu_buffer;
01641 }
01642 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
01643 }
01644
01656 static inline void MPV_motion_lowres(MpegEncContext *s,
01657 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
01658 int dir, uint8_t **ref_picture,
01659 h264_chroma_mc_func *pix_op)
01660 {
01661 int mx, my;
01662 int mb_x, mb_y, i;
01663 const int lowres= s->avctx->lowres;
01664 const int block_s= 8>>lowres;
01665
01666 mb_x = s->mb_x;
01667 mb_y = s->mb_y;
01668
01669 switch(s->mv_type) {
01670 case MV_TYPE_16X16:
01671 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01672 0, 0, 0,
01673 ref_picture, pix_op,
01674 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
01675 break;
01676 case MV_TYPE_8X8:
01677 mx = 0;
01678 my = 0;
01679 for(i=0;i<4;i++) {
01680 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
01681 ref_picture[0], 0, 0,
01682 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
01683 s->width, s->height, s->linesize,
01684 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
01685 block_s, block_s, pix_op,
01686 s->mv[dir][i][0], s->mv[dir][i][1]);
01687
01688 mx += s->mv[dir][i][0];
01689 my += s->mv[dir][i][1];
01690 }
01691
01692 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
01693 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
01694 break;
01695 case MV_TYPE_FIELD:
01696 if (s->picture_structure == PICT_FRAME) {
01697
01698 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01699 1, 0, s->field_select[dir][0],
01700 ref_picture, pix_op,
01701 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
01702
01703 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01704 1, 1, s->field_select[dir][1],
01705 ref_picture, pix_op,
01706 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
01707 } else {
01708 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
01709 ref_picture= s->current_picture_ptr->data;
01710 }
01711
01712 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01713 0, 0, s->field_select[dir][0],
01714 ref_picture, pix_op,
01715 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
01716 }
01717 break;
01718 case MV_TYPE_16X8:
01719 for(i=0; i<2; i++){
01720 uint8_t ** ref2picture;
01721
01722 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
01723 ref2picture= ref_picture;
01724 }else{
01725 ref2picture= s->current_picture_ptr->data;
01726 }
01727
01728 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01729 0, 0, s->field_select[dir][i],
01730 ref2picture, pix_op,
01731 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
01732
01733 dest_y += 2*block_s*s->linesize;
01734 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01735 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
01736 }
01737 break;
01738 case MV_TYPE_DMV:
01739 if(s->picture_structure == PICT_FRAME){
01740 for(i=0; i<2; i++){
01741 int j;
01742 for(j=0; j<2; j++){
01743 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01744 1, j, j^i,
01745 ref_picture, pix_op,
01746 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
01747 }
01748 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01749 }
01750 }else{
01751 for(i=0; i<2; i++){
01752 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
01753 0, 0, s->picture_structure != i+1,
01754 ref_picture, pix_op,
01755 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
01756
01757
01758 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
01759
01760
01761 if(!s->first_field){
01762 ref_picture = s->current_picture_ptr->data;
01763 }
01764 }
01765 }
01766 break;
01767 default: assert(0);
01768 }
01769 }
01770
01771
01772 static inline void put_dct(MpegEncContext *s,
01773 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01774 {
01775 s->dct_unquantize_intra(s, block, i, qscale);
01776 s->dsp.idct_put (dest, line_size, block);
01777 }
01778
01779
01780 static inline void add_dct(MpegEncContext *s,
01781 DCTELEM *block, int i, uint8_t *dest, int line_size)
01782 {
01783 if (s->block_last_index[i] >= 0) {
01784 s->dsp.idct_add (dest, line_size, block);
01785 }
01786 }
01787
01788 static inline void add_dequant_dct(MpegEncContext *s,
01789 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
01790 {
01791 if (s->block_last_index[i] >= 0) {
01792 s->dct_unquantize_inter(s, block, i, qscale);
01793
01794 s->dsp.idct_add (dest, line_size, block);
01795 }
01796 }
01797
01801 void ff_clean_intra_table_entries(MpegEncContext *s)
01802 {
01803 int wrap = s->b8_stride;
01804 int xy = s->block_index[0];
01805
01806 s->dc_val[0][xy ] =
01807 s->dc_val[0][xy + 1 ] =
01808 s->dc_val[0][xy + wrap] =
01809 s->dc_val[0][xy + 1 + wrap] = 1024;
01810
01811 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
01812 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
01813 if (s->msmpeg4_version>=3) {
01814 s->coded_block[xy ] =
01815 s->coded_block[xy + 1 ] =
01816 s->coded_block[xy + wrap] =
01817 s->coded_block[xy + 1 + wrap] = 0;
01818 }
01819
01820 wrap = s->mb_stride;
01821 xy = s->mb_x + s->mb_y * wrap;
01822 s->dc_val[1][xy] =
01823 s->dc_val[2][xy] = 1024;
01824
01825 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
01826 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
01827
01828 s->mbintra_table[xy]= 0;
01829 }
01830
01831
01832
01833
01834
01835
01836
01837
01838
01839
01840
01841 static av_always_inline
01842 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
01843 int lowres_flag, int is_mpeg12)
01844 {
01845 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
01846 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
01847 ff_xvmc_decode_mb(s);
01848 return;
01849 }
01850
01851 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
01852
01853 int i,j;
01854 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
01855 for(i=0; i<6; i++)
01856 for(j=0; j<64; j++)
01857 *dct++ = block[i][s->dsp.idct_permutation[j]];
01858 }
01859
01860 s->current_picture.qscale_table[mb_xy]= s->qscale;
01861
01862
01863 if (!s->mb_intra) {
01864 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
01865 if(s->mbintra_table[mb_xy])
01866 ff_clean_intra_table_entries(s);
01867 } else {
01868 s->last_dc[0] =
01869 s->last_dc[1] =
01870 s->last_dc[2] = 128 << s->intra_dc_precision;
01871 }
01872 }
01873 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
01874 s->mbintra_table[mb_xy]=1;
01875
01876 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) {
01877 uint8_t *dest_y, *dest_cb, *dest_cr;
01878 int dct_linesize, dct_offset;
01879 op_pixels_func (*op_pix)[4];
01880 qpel_mc_func (*op_qpix)[16];
01881 const int linesize= s->current_picture.linesize[0];
01882 const int uvlinesize= s->current_picture.linesize[1];
01883 const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
01884 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
01885
01886
01887
01888 if(!s->encoding){
01889 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
01890 const int age= s->current_picture.age;
01891
01892 assert(age);
01893
01894 if (s->mb_skipped) {
01895 s->mb_skipped= 0;
01896 assert(s->pict_type!=FF_I_TYPE);
01897
01898 (*mbskip_ptr) ++;
01899 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01900
01901
01902 if (*mbskip_ptr >= age && s->current_picture.reference){
01903 return;
01904 }
01905 } else if(!s->current_picture.reference){
01906 (*mbskip_ptr) ++;
01907 if(*mbskip_ptr >99) *mbskip_ptr= 99;
01908 } else{
01909 *mbskip_ptr = 0;
01910 }
01911 }
01912
01913 dct_linesize = linesize << s->interlaced_dct;
01914 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
01915
01916 if(readable){
01917 dest_y= s->dest[0];
01918 dest_cb= s->dest[1];
01919 dest_cr= s->dest[2];
01920 }else{
01921 dest_y = s->b_scratchpad;
01922 dest_cb= s->b_scratchpad+16*linesize;
01923 dest_cr= s->b_scratchpad+32*linesize;
01924 }
01925
01926 if (!s->mb_intra) {
01927
01928
01929 if(!s->encoding){
01930 if(lowres_flag){
01931 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
01932
01933 if (s->mv_dir & MV_DIR_FORWARD) {
01934 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
01935 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
01936 }
01937 if (s->mv_dir & MV_DIR_BACKWARD) {
01938 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
01939 }
01940 }else{
01941 op_qpix= s->me.qpel_put;
01942 if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
01943 op_pix = s->dsp.put_pixels_tab;
01944 }else{
01945 op_pix = s->dsp.put_no_rnd_pixels_tab;
01946 }
01947 if (s->mv_dir & MV_DIR_FORWARD) {
01948 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
01949 op_pix = s->dsp.avg_pixels_tab;
01950 op_qpix= s->me.qpel_avg;
01951 }
01952 if (s->mv_dir & MV_DIR_BACKWARD) {
01953 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
01954 }
01955 }
01956 }
01957
01958
01959 if(s->hurry_up>1) goto skip_idct;
01960 if(s->avctx->skip_idct){
01961 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
01962 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
01963 || s->avctx->skip_idct >= AVDISCARD_ALL)
01964 goto skip_idct;
01965 }
01966
01967
01968 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
01969 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
01970 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
01971 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
01972 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
01973 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
01974
01975 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01976 if (s->chroma_y_shift){
01977 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
01978 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
01979 }else{
01980 dct_linesize >>= 1;
01981 dct_offset >>=1;
01982 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
01983 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
01984 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
01985 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
01986 }
01987 }
01988 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
01989 add_dct(s, block[0], 0, dest_y , dct_linesize);
01990 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
01991 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
01992 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
01993
01994 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
01995 if(s->chroma_y_shift){
01996 add_dct(s, block[4], 4, dest_cb, uvlinesize);
01997 add_dct(s, block[5], 5, dest_cr, uvlinesize);
01998 }else{
01999
02000 dct_linesize = uvlinesize << s->interlaced_dct;
02001 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02002
02003 add_dct(s, block[4], 4, dest_cb, dct_linesize);
02004 add_dct(s, block[5], 5, dest_cr, dct_linesize);
02005 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
02006 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
02007 if(!s->chroma_x_shift){
02008 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
02009 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
02010 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
02011 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
02012 }
02013 }
02014 }
02015 }
02016 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
02017 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
02018 }
02019 } else {
02020
02021 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
02022 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
02023 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
02024 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
02025 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
02026
02027 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02028 if(s->chroma_y_shift){
02029 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
02030 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
02031 }else{
02032 dct_offset >>=1;
02033 dct_linesize >>=1;
02034 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
02035 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
02036 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
02037 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
02038 }
02039 }
02040 }else{
02041 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
02042 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
02043 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
02044 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
02045
02046 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
02047 if(s->chroma_y_shift){
02048 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
02049 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
02050 }else{
02051
02052 dct_linesize = uvlinesize << s->interlaced_dct;
02053 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
02054
02055 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
02056 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
02057 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
02058 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
02059 if(!s->chroma_x_shift){
02060 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
02061 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
02062 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
02063 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
02064 }
02065 }
02066 }
02067 }
02068 }
02069 skip_idct:
02070 if(!readable){
02071 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
02072 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
02073 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
02074 }
02075 }
02076 }
02077
02078 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
02079 #if !CONFIG_SMALL
02080 if(s->out_format == FMT_MPEG1) {
02081 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
02082 else MPV_decode_mb_internal(s, block, 0, 1);
02083 } else
02084 #endif
02085 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
02086 else MPV_decode_mb_internal(s, block, 0, 0);
02087 }
02088
02093 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
02094 if (s->avctx->draw_horiz_band) {
02095 AVFrame *src;
02096 const int field_pic= s->picture_structure != PICT_FRAME;
02097 int offset[4];
02098
02099 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
02100
02101 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
02102 h <<= 1;
02103 y <<= 1;
02104 if(s->first_field) return;
02105 }
02106
02107 if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
02108 src= (AVFrame*)s->current_picture_ptr;
02109 else if(s->last_picture_ptr)
02110 src= (AVFrame*)s->last_picture_ptr;
02111 else
02112 return;
02113
02114 if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
02115 offset[0]=
02116 offset[1]=
02117 offset[2]=
02118 offset[3]= 0;
02119 }else{
02120 offset[0]= y * s->linesize;
02121 offset[1]=
02122 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
02123 offset[3]= 0;
02124 }
02125
02126 emms_c();
02127
02128 s->avctx->draw_horiz_band(s->avctx, src, offset,
02129 y, s->picture_structure, h);
02130 }
02131 }
02132
02133 void ff_init_block_index(MpegEncContext *s){
02134 const int linesize= s->current_picture.linesize[0];
02135 const int uvlinesize= s->current_picture.linesize[1];
02136 const int mb_size= 4 - s->avctx->lowres;
02137
02138 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
02139 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
02140 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
02141 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
02142 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02143 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
02144
02145
02146 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
02147 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02148 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
02149
02150 if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
02151 {
02152 if(s->picture_structure==PICT_FRAME){
02153 s->dest[0] += s->mb_y * linesize << mb_size;
02154 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02155 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
02156 }else{
02157 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
02158 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02159 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
02160 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
02161 }
02162 }
02163 }
02164
02165 void ff_mpeg_flush(AVCodecContext *avctx){
02166 int i;
02167 MpegEncContext *s = avctx->priv_data;
02168
02169 if(s==NULL || s->picture==NULL)
02170 return;
02171
02172 for(i=0; i<MAX_PICTURE_COUNT; i++){
02173 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
02174 || s->picture[i].type == FF_BUFFER_TYPE_USER))
02175 free_frame_buffer(s, &s->picture[i]);
02176 }
02177 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
02178
02179 s->mb_x= s->mb_y= 0;
02180 s->closed_gop= 0;
02181
02182 s->parse_context.state= -1;
02183 s->parse_context.frame_start_found= 0;
02184 s->parse_context.overread= 0;
02185 s->parse_context.overread_index= 0;
02186 s->parse_context.index= 0;
02187 s->parse_context.last_index= 0;
02188 s->bitstream_buffer_size=0;
02189 s->pp_time=0;
02190 }
02191
02192 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
02193 DCTELEM *block, int n, int qscale)
02194 {
02195 int i, level, nCoeffs;
02196 const uint16_t *quant_matrix;
02197
02198 nCoeffs= s->block_last_index[n];
02199
02200 if (n < 4)
02201 block[0] = block[0] * s->y_dc_scale;
02202 else
02203 block[0] = block[0] * s->c_dc_scale;
02204
02205 quant_matrix = s->intra_matrix;
02206 for(i=1;i<=nCoeffs;i++) {
02207 int j= s->intra_scantable.permutated[i];
02208 level = block[j];
02209 if (level) {
02210 if (level < 0) {
02211 level = -level;
02212 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02213 level = (level - 1) | 1;
02214 level = -level;
02215 } else {
02216 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02217 level = (level - 1) | 1;
02218 }
02219 block[j] = level;
02220 }
02221 }
02222 }
02223
02224 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
02225 DCTELEM *block, int n, int qscale)
02226 {
02227 int i, level, nCoeffs;
02228 const uint16_t *quant_matrix;
02229
02230 nCoeffs= s->block_last_index[n];
02231
02232 quant_matrix = s->inter_matrix;
02233 for(i=0; i<=nCoeffs; i++) {
02234 int j= s->intra_scantable.permutated[i];
02235 level = block[j];
02236 if (level) {
02237 if (level < 0) {
02238 level = -level;
02239 level = (((level << 1) + 1) * qscale *
02240 ((int) (quant_matrix[j]))) >> 4;
02241 level = (level - 1) | 1;
02242 level = -level;
02243 } else {
02244 level = (((level << 1) + 1) * qscale *
02245 ((int) (quant_matrix[j]))) >> 4;
02246 level = (level - 1) | 1;
02247 }
02248 block[j] = level;
02249 }
02250 }
02251 }
02252
02253 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
02254 DCTELEM *block, int n, int qscale)
02255 {
02256 int i, level, nCoeffs;
02257 const uint16_t *quant_matrix;
02258
02259 if(s->alternate_scan) nCoeffs= 63;
02260 else nCoeffs= s->block_last_index[n];
02261
02262 if (n < 4)
02263 block[0] = block[0] * s->y_dc_scale;
02264 else
02265 block[0] = block[0] * s->c_dc_scale;
02266 quant_matrix = s->intra_matrix;
02267 for(i=1;i<=nCoeffs;i++) {
02268 int j= s->intra_scantable.permutated[i];
02269 level = block[j];
02270 if (level) {
02271 if (level < 0) {
02272 level = -level;
02273 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02274 level = -level;
02275 } else {
02276 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02277 }
02278 block[j] = level;
02279 }
02280 }
02281 }
02282
02283 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
02284 DCTELEM *block, int n, int qscale)
02285 {
02286 int i, level, nCoeffs;
02287 const uint16_t *quant_matrix;
02288 int sum=-1;
02289
02290 if(s->alternate_scan) nCoeffs= 63;
02291 else nCoeffs= s->block_last_index[n];
02292
02293 if (n < 4)
02294 block[0] = block[0] * s->y_dc_scale;
02295 else
02296 block[0] = block[0] * s->c_dc_scale;
02297 quant_matrix = s->intra_matrix;
02298 for(i=1;i<=nCoeffs;i++) {
02299 int j= s->intra_scantable.permutated[i];
02300 level = block[j];
02301 if (level) {
02302 if (level < 0) {
02303 level = -level;
02304 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02305 level = -level;
02306 } else {
02307 level = (int)(level * qscale * quant_matrix[j]) >> 3;
02308 }
02309 block[j] = level;
02310 sum+=level;
02311 }
02312 }
02313 block[63]^=sum&1;
02314 }
02315
02316 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
02317 DCTELEM *block, int n, int qscale)
02318 {
02319 int i, level, nCoeffs;
02320 const uint16_t *quant_matrix;
02321 int sum=-1;
02322
02323 if(s->alternate_scan) nCoeffs= 63;
02324 else nCoeffs= s->block_last_index[n];
02325
02326 quant_matrix = s->inter_matrix;
02327 for(i=0; i<=nCoeffs; i++) {
02328 int j= s->intra_scantable.permutated[i];
02329 level = block[j];
02330 if (level) {
02331 if (level < 0) {
02332 level = -level;
02333 level = (((level << 1) + 1) * qscale *
02334 ((int) (quant_matrix[j]))) >> 4;
02335 level = -level;
02336 } else {
02337 level = (((level << 1) + 1) * qscale *
02338 ((int) (quant_matrix[j]))) >> 4;
02339 }
02340 block[j] = level;
02341 sum+=level;
02342 }
02343 }
02344 block[63]^=sum&1;
02345 }
02346
02347 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
02348 DCTELEM *block, int n, int qscale)
02349 {
02350 int i, level, qmul, qadd;
02351 int nCoeffs;
02352
02353 assert(s->block_last_index[n]>=0);
02354
02355 qmul = qscale << 1;
02356
02357 if (!s->h263_aic) {
02358 if (n < 4)
02359 block[0] = block[0] * s->y_dc_scale;
02360 else
02361 block[0] = block[0] * s->c_dc_scale;
02362 qadd = (qscale - 1) | 1;
02363 }else{
02364 qadd = 0;
02365 }
02366 if(s->ac_pred)
02367 nCoeffs=63;
02368 else
02369 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02370
02371 for(i=1; i<=nCoeffs; i++) {
02372 level = block[i];
02373 if (level) {
02374 if (level < 0) {
02375 level = level * qmul - qadd;
02376 } else {
02377 level = level * qmul + qadd;
02378 }
02379 block[i] = level;
02380 }
02381 }
02382 }
02383
02384 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
02385 DCTELEM *block, int n, int qscale)
02386 {
02387 int i, level, qmul, qadd;
02388 int nCoeffs;
02389
02390 assert(s->block_last_index[n]>=0);
02391
02392 qadd = (qscale - 1) | 1;
02393 qmul = qscale << 1;
02394
02395 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
02396
02397 for(i=0; i<=nCoeffs; i++) {
02398 level = block[i];
02399 if (level) {
02400 if (level < 0) {
02401 level = level * qmul - qadd;
02402 } else {
02403 level = level * qmul + qadd;
02404 }
02405 block[i] = level;
02406 }
02407 }
02408 }
02409
02413 void ff_set_qscale(MpegEncContext * s, int qscale)
02414 {
02415 if (qscale < 1)
02416 qscale = 1;
02417 else if (qscale > 31)
02418 qscale = 31;
02419
02420 s->qscale = qscale;
02421 s->chroma_qscale= s->chroma_qscale_table[qscale];
02422
02423 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
02424 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
02425 }