62 #define QUANT_BIAS_SHIFT 8
64 #define QMAT_SHIFT_MMX 16
82 uint16_t (*qmat16)[2][64],
83 const uint16_t *quant_matrix,
84 int bias,
int qmin,
int qmax,
int intra)
90 for (qscale = qmin; qscale <= qmax; qscale++) {
97 for (i = 0; i < 64; i++) {
99 int64_t den = (int64_t) qscale * quant_matrix[j];
109 for (i = 0; i < 64; i++) {
111 int64_t den =
ff_aanscales[i] * (int64_t) qscale * quant_matrix[j];
121 for (i = 0; i < 64; i++) {
123 int64_t den = (int64_t) qscale * quant_matrix[j];
134 if (qmat16[qscale][0][i] == 0 ||
135 qmat16[qscale][0][i] == 128 * 256)
136 qmat16[
qscale][0][i] = 128 * 256 - 1;
139 qmat16[qscale][0][i]);
143 for (i = intra; i < 64; i++) {
148 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
155 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
176 for (i = 0; i < 64; i++) {
191 for (i = 0; i < s->
mb_num; i++) {
202 #define COPY(a) dst->a= src->a
227 for (i = -16; i < 16; i++) {
258 int i,
ret, format_supported;
267 "only YUV420 and YUV422 are supported\n");
273 format_supported = 0;
282 format_supported = 1;
288 format_supported = 1;
290 if (!format_supported) {
324 "keyframe interval too large!, reducing it from %d to %d\n",
353 "intra dc precision must be positive, note some applications use"
354 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
421 av_log(avctx,
AV_LOG_ERROR,
"Either both buffer size and max rate or neither must be specified\n");
427 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
444 "impossible bitrate constraints, this will fail\n");
469 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
470 "specified vbv buffer is too large for the given bitrate!\n");
482 "OBMC is only supported with simple mb decision\n");
500 "max b frames must be 0 or positive for mpegvideo based encoders\n");
510 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
518 (avctx->
width > 2048 ||
525 ((avctx->
width &3) ||
532 (avctx->
width > 4095 ||
539 (avctx->
width > 16383 ||
540 avctx->
height > 16383 )) {
541 av_log(avctx,
AV_LOG_ERROR,
"MPEG-2 does not support resolutions above 16383x16383\n");
576 "mpeg2 style quantization not supported by codec\n");
594 "closed gop with scene change detection are not supported yet, "
595 "set threshold to 1000000000\n");
602 "low delay forcing is only available for mpeg2\n");
607 "b frames cannot be used with low delay\n");
613 if (avctx->
qmax > 12) {
615 "non linear quant only supports qmax <= 12 currently\n");
627 "multi threaded encoding not supported by codec\n");
633 "automatic thread number detection not supported by codec, "
651 "notice: b_frame_strategy only affects the first pass\n");
674 av_log(avctx,
AV_LOG_ERROR,
"qmin and or qmax are invalid, they must be 0 < min <= max\n");
688 "timebase %d/%d not supported by MPEG 4 standard, "
689 "the maximum admitted value for the timebase denominator "
723 "The specified picture size of %dx%d is not valid for the "
724 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
739 "The specified picture size of %dx%d is not valid for "
740 "the H.263 codec.\nValid sizes are 128x96, 176x144, "
741 "352x288, 704x576, and 1408x1152. "
857 (
MAX_RUN + 1) * 2 *
sizeof(
int), fail);
874 2 * 64 *
sizeof(uint16_t), fail);
899 for (i = 0; i < 64; i++) {
934 #if FF_API_ERROR_RATE
941 #if FF_API_NORMALIZE_AQP
1046 for (y = 0; y < 16; y++) {
1047 for (x = 0; x < 16; x++) {
1048 acc +=
FFABS(src[x + y * stride] - ref);
1064 for (y = 0; y < h; y += 16) {
1065 for (x = 0; x < w; x += 16) {
1070 int sae =
get_sae(src + offset, mean, stride);
1072 acc += sae + 500 < sad;
1083 int i, display_picture_number = 0,
ret;
1098 "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
1103 if (!s->
low_delay && display_picture_number == 1)
1112 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1115 pts = display_picture_number;
1121 if (!pic_arg->
buf[0] ||
1159 int h_chroma_shift, v_chroma_shift;
1164 for (i = 0; i < 3; i++) {
1165 int src_stride = pic_arg->
linesize[i];
1167 int h_shift = i ? h_chroma_shift : 0;
1168 int v_shift = i ? v_chroma_shift : 0;
1169 int w = s->
width >> h_shift;
1170 int h = s->
height >> v_shift;
1183 if (src_stride == dst_stride)
1184 memcpy(dst, src, src_stride * h);
1189 memcpy(dst2, src, w);
1225 int64_t score64 = 0;
1227 for (plane = 0; plane < 3; plane++) {
1229 const int bw = plane ? 1 : 2;
1230 for (y = 0; y < s->
mb_height * bw; y++) {
1231 for (x = 0; x < s->
mb_width * bw; x++) {
1232 int off = p->
shared ? 0 : 16;
1238 case 0: score =
FFMAX(score, v);
break;
1239 case 1: score +=
FFABS(v);
break;
1240 case 2: score64 += v * (int64_t)v;
break;
1241 case 3: score64 +=
FFABS(v * (int64_t)v * v);
break;
1242 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);
break;
1265 int ret, got_output;
1282 int i, j, out_size, p_lambda, b_lambda,
lambda2;
1283 int64_t best_rd = INT64_MAX;
1284 int best_b_count = -1;
1294 b_lambda = p_lambda;
1319 pre_input = *pre_input_ptr;
1320 memcpy(data, pre_input_ptr->
f->
data,
sizeof(data));
1322 if (!pre_input.
shared && i) {
1390 return best_b_count;
1463 b_frames =
FFMAX(0, i - 1);
1466 for (i = 0; i < b_frames + 1; i++) {
1478 for (i = b_frames - 1; i >= 0; i--) {
1486 "warning, too many b frames in a row\n");
1509 for (i = 0; i < b_frames; i++) {
1555 for (i = 0; i < 4; i++) {
1615 for (intra = 0; intra < 2; intra++) {
1617 for (i = 0; i < 64; i++) {
1623 for (i = 0; i < 64; i++) {
1674 for (i = 0; i < 4; i++) {
1705 const AVFrame *pic_arg,
int *got_packet)
1708 int i, stuffing_count,
ret;
1735 for (i = 0; i < context_count; i++) {
1752 if (growing_buffer) {
1803 for (i = 0; i < context_count; i++) {
1817 for (i = 0; i < 4; i++) {
1833 if (stuffing_count) {
1835 stuffing_count + 50) {
1843 while (stuffing_count--) {
1850 stuffing_count -= 4;
1851 while (stuffing_count--) {
1877 "Internal error, negative bits\n");
1885 vbv_delay =
FFMAX(vbv_delay, min_delay);
1925 *got_packet = !!pkt->
size;
1930 int n,
int threshold)
1932 static const char tab[64] = {
1933 3, 2, 2, 1, 1, 1, 1, 1,
1934 1, 1, 1, 1, 1, 1, 1, 1,
1935 1, 1, 1, 1, 1, 1, 1, 1,
1936 0, 0, 0, 0, 0, 0, 0, 0,
1937 0, 0, 0, 0, 0, 0, 0, 0,
1938 0, 0, 0, 0, 0, 0, 0, 0,
1939 0, 0, 0, 0, 0, 0, 0, 0,
1940 0, 0, 0, 0, 0, 0, 0, 0
1949 if (threshold < 0) {
1951 threshold = -threshold;
1956 if (last_index <= skip_dc - 1)
1959 for (i = 0; i <= last_index; i++) {
1963 if (skip_dc && i == 0)
1967 }
else if (level > 1) {
1973 if (score >= threshold)
1975 for (i = skip_dc; i <= last_index; i++) {
1998 for (; i <= last_index; i++) {
2000 int level = block[j];
2002 if (level > maxlevel) {
2005 }
else if (level < minlevel) {
2015 "warning, clipping %d dct coefficients to %d..%d\n",
2016 overflow, minlevel, maxlevel);
2023 for (y = 0; y < 8; y++) {
2024 for (x = 0; x < 8; x++) {
2030 for (y2 =
FFMAX(y - 1, 0); y2 <
FFMIN(8, y + 2); y2++) {
2031 for (x2=
FFMAX(x - 1, 0); x2 <
FFMIN(8, x + 2); x2++) {
2032 int v = ptr[x2 + y2 *
stride];
2038 weight[x + 8 *
y]= (36 *
ff_sqrt(count * sqr - sum * sum)) / count;
2044 int motion_x,
int motion_y,
2045 int mb_block_height,
2050 int16_t orig[12][64];
2057 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2058 ptrdiff_t wrap_y, wrap_c;
2060 for (i = 0; i < mb_block_count; i++)
2064 const int last_qp = s->
qscale;
2065 const int mb_xy = mb_x + mb_y * s->
mb_stride;
2096 (mb_y * 16 * wrap_y) + mb_x * 16;
2098 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2100 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2108 16, 16, mb_x * 16, mb_y * 16,
2113 mb_block_width, mb_block_height,
2114 mb_x * mb_block_width, mb_y * mb_block_height,
2116 ptr_cb = ebuf + 16 * wrap_y;
2119 mb_block_width, mb_block_height,
2120 mb_x * mb_block_width, mb_y * mb_block_height,
2122 ptr_cr = ebuf + 16 * wrap_y + 16;
2127 int progressive_score, interlaced_score;
2132 NULL, wrap_y, 8) - 400;
2134 if (progressive_score > 0) {
2136 NULL, wrap_y * 2, 8) +
2138 NULL, wrap_y * 2, 8);
2139 if (progressive_score > interlaced_score) {
2142 dct_offset = wrap_y;
2143 uv_dct_offset = wrap_c;
2178 uint8_t *dest_y, *dest_cb, *dest_cr;
2180 dest_y = s->
dest[0];
2181 dest_cb = s->
dest[1];
2182 dest_cr = s->
dest[2];
2206 int progressive_score, interlaced_score;
2209 progressive_score = s->
mecc.
ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
2215 progressive_score -= 400;
2217 if (progressive_score > 0) {
2224 if (progressive_score > interlaced_score) {
2227 dct_offset = wrap_y;
2228 uv_dct_offset = wrap_c;
2239 dest_y + dct_offset, wrap_y);
2241 dest_y + dct_offset + 8, wrap_y);
2251 dest_cb + uv_dct_offset, wrap_c);
2253 dest_cr + uv_dct_offset, wrap_c);
2264 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset, dest_y + dct_offset,
2265 wrap_y, 8) < 20 * s->
qscale)
2267 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2268 wrap_y, 8) < 20 * s->
qscale)
2276 dest_cb + uv_dct_offset,
2277 wrap_c, 8) < 20 * s->
qscale)
2280 dest_cr + uv_dct_offset,
2281 wrap_c, 8) < 20 * s->
qscale)
2308 memcpy(orig[0], s->
block[0],
sizeof(int16_t) * 64 * mb_block_count);
2314 for (i = 0; i < mb_block_count; i++) {
2329 for (i = 0; i < mb_block_count; i++) {
2339 for (i = 0; i < 4; i++)
2342 for (i = 4; i < mb_block_count; i++)
2346 for (i = 0; i < mb_block_count; i++) {
2359 for (i=6; i<12; i++) {
2368 for (i = 0; i < mb_block_count; i++) {
2371 for (j = 63; j > 0; j--) {
2461 memcpy(d->
mv, s->
mv, 2*4*2*
sizeof(
int));
2499 int *dmin,
int *next_block,
int motion_x,
int motion_y)
2507 s->
pb= pb[*next_block];
2509 s->
pb2 = pb2 [*next_block];
2510 s->
tex_pb= tex_pb[*next_block];
2514 memcpy(dest_backup, s->
dest,
sizeof(s->
dest));
2537 memcpy(s->
dest, dest_backup,
sizeof(s->
dest));
2555 else if(w==8 && h==8)
2645 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
2653 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2690 bytestream_put_le32(&ptr, offset);
2691 bytestream_put_byte(&ptr, s->
qscale);
2692 bytestream_put_byte(&ptr, gobn);
2693 bytestream_put_le16(&ptr, mba);
2694 bytestream_put_byte(&ptr, pred_x);
2695 bytestream_put_byte(&ptr, pred_y);
2697 bytestream_put_byte(&ptr, 0);
2698 bytestream_put_byte(&ptr, 0);
2794 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
2811 int new_buffer_size = 0;
2848 int current_packet_size, is_gob_start;
2854 if(s->
start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
2863 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
2868 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
2888 current_packet_size=0;
2946 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
2953 backup_s.pb2= s->
pb2;
2954 backup_s.tex_pb= s->
tex_pb;
2963 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
2964 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
2975 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
2976 &dmin, &next_block, 0, 0);
2984 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
2985 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
2995 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
2996 &dmin, &next_block, 0, 0);
3004 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3005 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3013 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3014 &dmin, &next_block, s->
mv[1][0][0], s->
mv[1][0][1]);
3024 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3025 &dmin, &next_block, 0, 0);
3036 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3037 &dmin, &next_block, 0, 0);
3048 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3049 &dmin, &next_block, 0, 0);
3055 for(dir=0; dir<2; dir++){
3062 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3063 &dmin, &next_block, 0, 0);
3071 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3072 &dmin, &next_block, 0, 0);
3083 const int last_qp= backup_s.qscale;
3087 static const int dquant_tab[4]={-1,1,-2,2};
3096 s->
mv[0][0][0] = best_s.
mv[0][0][0];
3097 s->
mv[0][0][1] = best_s.
mv[0][0][1];
3098 s->
mv[1][0][0] = best_s.
mv[1][0][0];
3099 s->
mv[1][0][1] = best_s.
mv[1][0][1];
3102 for(; qpi<4; qpi++){
3103 int dquant= dquant_tab[qpi];
3115 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3116 &dmin, &next_block, s->
mv[mvdir][0][0], s->
mv[mvdir][0][1]);
3132 backup_s.dquant = 0;
3136 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3137 &dmin, &next_block, mx, my);
3140 backup_s.dquant = 0;
3144 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3145 &dmin, &next_block, 0, 0);
3153 memcpy(s->
mv, best_s.
mv,
sizeof(s->
mv));
3174 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3175 &dmin, &next_block, mx, my);
3193 s->
pb2= backup_s.pb2;
3197 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3198 s->
tex_pb= backup_s.tex_pb;
3215 int motion_x = 0, motion_y = 0;
3223 motion_x= s->
mv[0][0][0] = 0;
3224 motion_y= s->
mv[0][0][1] = 0;
3311 for(dir=0; dir<2; dir++){
3385 #define MERGE(field) dst->field += src->field; src->field=0
3412 for(i=0; i<64; i++){
3526 for(i=1; i<context_count; i++){
3556 for(i=1; i<context_count; i++){
3569 av_dlog(s,
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
3613 for(dir=0; dir<2; dir++){
3662 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3663 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3731 for(i=1; i<context_count; i++){
3735 for(i=1; i<context_count; i++){
3748 for(i=0; i<64; i++){
3749 int level= block[i];
3755 if(level<0) level=0;
3759 if(level>0) level=0;
3768 int qscale,
int *overflow){
3770 const uint16_t *matrix;
3774 unsigned int threshold1, threshold2;
3786 int coeff_count[64];
3787 int qmul, qadd, start_i, last_non_zero, i,
dc;
3798 qadd= ((qscale-1)|1)*8;
3815 block[0] = (block[0] + (q >> 1)) / q;
3841 threshold2= (threshold1<<1);
3843 for(i=63; i>=start_i; i--) {
3844 const int j = scantable[i];
3845 int level = block[j] * qmat[j];
3847 if(((
unsigned)(level+threshold1))>threshold2){
3853 for(i=start_i; i<=last_non_zero; i++) {
3854 const int j = scantable[i];
3855 int level = block[j] * qmat[j];
3859 if(((
unsigned)(level+threshold1))>threshold2){
3863 coeff[1][i]= level-1;
3867 coeff[0][i]= -
level;
3868 coeff[1][i]= -level+1;
3871 coeff_count[i]=
FFMIN(level, 2);
3875 coeff[0][i]= (level>>31)|1;
3882 if(last_non_zero < start_i){
3883 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
3884 return last_non_zero;
3887 score_tab[start_i]= 0;
3888 survivor[0]= start_i;
3891 for(i=start_i; i<=last_non_zero; i++){
3892 int level_index, j, zero_distortion;
3893 int dct_coeff=
FFABS(block[ scantable[i] ]);
3894 int best_score=256*256*256*120;
3898 zero_distortion= dct_coeff*dct_coeff;
3900 for(level_index=0; level_index < coeff_count[i]; level_index++){
3902 int level= coeff[level_index][i];
3903 const int alevel=
FFABS(level);
3909 unquant_coeff= alevel*qmul + qadd;
3912 unquant_coeff = alevel * matrix[j] * 8;
3916 unquant_coeff = (int)( alevel * qscale * matrix[j]) >> 3;
3917 unquant_coeff = (unquant_coeff - 1) | 1;
3919 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[j])) >> 4;
3920 unquant_coeff = (unquant_coeff - 1) | 1;
3925 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
3927 if((level&(~127)) == 0){
3928 for(j=survivor_count-1; j>=0; j--){
3929 int run= i - survivor[j];
3931 score += score_tab[i-
run];
3933 if(score < best_score){
3936 level_tab[i+1]= level-64;
3941 for(j=survivor_count-1; j>=0; j--){
3942 int run= i - survivor[j];
3944 score += score_tab[i-
run];
3945 if(score < last_score){
3948 last_level= level-64;
3954 distortion += esc_length*
lambda;
3955 for(j=survivor_count-1; j>=0; j--){
3956 int run= i - survivor[j];
3957 int score= distortion + score_tab[i-
run];
3959 if(score < best_score){
3962 level_tab[i+1]= level-64;
3967 for(j=survivor_count-1; j>=0; j--){
3968 int run= i - survivor[j];
3969 int score= distortion + score_tab[i-
run];
3970 if(score < last_score){
3973 last_level= level-64;
3981 score_tab[i+1]= best_score;
3984 if(last_non_zero <= 27){
3985 for(; survivor_count; survivor_count--){
3986 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
3990 for(; survivor_count; survivor_count--){
3991 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
3996 survivor[ survivor_count++ ]= i+1;
4000 last_score= 256*256*256*120;
4001 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4002 int score= score_tab[i];
4003 if(i) score += lambda*2;
4005 if(score < last_score){
4008 last_level= level_tab[i];
4009 last_run= run_tab[i];
4016 dc=
FFABS(block[0]);
4017 last_non_zero= last_i - 1;
4018 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4020 if(last_non_zero < start_i)
4021 return last_non_zero;
4023 if(last_non_zero == 0 && start_i == 0){
4025 int best_score= dc *
dc;
4027 for(i=0; i<coeff_count[0]; i++){
4028 int level= coeff[i][0];
4029 int alevel=
FFABS(level);
4030 int unquant_coeff, score, distortion;
4033 unquant_coeff= (alevel*qmul + qadd)>>3;
4035 unquant_coeff = ((( alevel << 1) + 1) * qscale * ((int) matrix[0])) >> 4;
4036 unquant_coeff = (unquant_coeff - 1) | 1;
4038 unquant_coeff = (unquant_coeff + 4) >> 3;
4039 unquant_coeff<<= 3 + 3;
4041 distortion= (unquant_coeff -
dc) * (unquant_coeff - dc);
4044 else score= distortion + esc_length*
lambda;
4046 if(score < best_score){
4048 best_level= level - 64;
4051 block[0]= best_level;
4053 if(best_level == 0)
return -1;
4054 else return last_non_zero;
4060 block[ perm_scantable[last_non_zero] ]= last_level;
4063 for(; i>start_i; i -= run_tab[i] + 1){
4064 block[ perm_scantable[i-1] ]= level_tab[i];
4067 return last_non_zero;
4082 int perm_index= perm[
index];
4083 if(i==0) s*= sqrt(0.5);
4084 if(j==0) s*= sqrt(0.5);
4085 basis[perm_index][8*x +
y]=
lrintf(s * cos((
M_PI/8.0)*i*(x+0.5)) * cos((
M_PI/8.0)*j*(y+0.5)));
4104 int qmul, qadd, start_i, last_non_zero, i,
dc;
4108 int rle_index,
run, q = 1, sum;
4111 static int after_last=0;
4112 static int to_zero=0;
4113 static int from_zero=0;
4116 static int messed_sign=0;
4119 if(basis[0][0] == 0)
4161 for(i=0; i<64; i++){
4168 for(i=0; i<64; i++){
4173 w=
FFABS(weight[i]) + qns*one;
4174 w= 15 + (48*qns*one + w/2)/w;
4189 for(i=start_i; i<=last_non_zero; i++){
4190 int j= perm_scantable[i];
4191 const int level= block[j];
4195 if(level<0) coeff= qmul*level - qadd;
4196 else coeff= qmul*level + qadd;
4197 run_tab[rle_index++]=
run;
4206 if(last_non_zero>0){
4217 int run2, best_unquant_change=0, analyze_gradient;
4223 if(analyze_gradient){
4227 for(i=0; i<64; i++){
4243 const int level= block[0];
4244 int change, old_coeff;
4250 for(change=-1; change<=1; change+=2){
4251 int new_level= level + change;
4252 int score, new_coeff;
4254 new_coeff= q*new_level;
4255 if(new_coeff >= 2048 || new_coeff < 0)
4259 new_coeff - old_coeff);
4260 if(score<best_score){
4263 best_change= change;
4264 best_unquant_change= new_coeff - old_coeff;
4271 run2= run_tab[rle_index++];
4275 for(i=start_i; i<64; i++){
4276 int j= perm_scantable[i];
4277 const int level= block[j];
4278 int change, old_coeff;
4284 if(level<0) old_coeff= qmul*level - qadd;
4285 else old_coeff= qmul*level + qadd;
4286 run2= run_tab[rle_index++];
4293 for(change=-1; change<=1; change+=2){
4294 int new_level= level + change;
4295 int score, new_coeff, unquant_change;
4302 if(new_level<0) new_coeff= qmul*new_level - qadd;
4303 else new_coeff= qmul*new_level + qadd;
4304 if(new_coeff >= 2048 || new_coeff <= -2048)
4309 if(level < 63 && level > -63){
4310 if(i < last_non_zero)
4320 if(analyze_gradient){
4321 int g= d1[ scantable[i] ];
4322 if(g && (g^new_level) >= 0)
4326 if(i < last_non_zero){
4327 int next_i= i + run2 + 1;
4328 int next_level= block[ perm_scantable[next_i] ] + 64;
4330 if(next_level&(~127))
4333 if(next_i < last_non_zero)
4353 if(i < last_non_zero){
4354 int next_i= i + run2 + 1;
4355 int next_level= block[ perm_scantable[next_i] ] + 64;
4357 if(next_level&(~127))
4360 if(next_i < last_non_zero)
4379 unquant_change= new_coeff - old_coeff;
4380 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4384 if(score<best_score){
4387 best_change= change;
4388 best_unquant_change= unquant_change;
4392 prev_level= level + 64;
4393 if(prev_level&(~127))
4406 int j= perm_scantable[ best_coeff ];
4408 block[j] += best_change;
4410 if(best_coeff > last_non_zero){
4411 last_non_zero= best_coeff;
4419 if(block[j] - best_change){
4420 if(
FFABS(block[j]) >
FFABS(block[j] - best_change)){
4432 for(; last_non_zero>=start_i; last_non_zero--){
4433 if(block[perm_scantable[last_non_zero]])
4439 if(256*256*256*64 % count == 0){
4440 av_log(s->
avctx,
AV_LOG_DEBUG,
"after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero,
raise, lower, messed_sign, s->
mb_x, s->
mb_y, s->
picture_number);
4445 for(i=start_i; i<=last_non_zero; i++){
4446 int j= perm_scantable[i];
4447 const int level= block[j];
4450 run_tab[rle_index++]=
run;
4463 if(last_non_zero>0){
4469 return last_non_zero;
4474 int qscale,
int *overflow)
4476 int i, j,
level, last_non_zero, q, start_i;
4481 unsigned int threshold1, threshold2;
4500 block[0] = (block[0] + (q >> 1)) / q;
4512 threshold2= (threshold1<<1);
4513 for(i=63;i>=start_i;i--) {
4515 level = block[j] * qmat[j];
4517 if(((
unsigned)(level+threshold1))>threshold2){
4524 for(i=start_i; i<=last_non_zero; i++) {
4526 level = block[j] * qmat[j];
4530 if(((
unsigned)(level+threshold1))>threshold2){
4548 scantable, last_non_zero);
4550 return last_non_zero;
4553 #define OFFSET(x) offsetof(MpegEncContext, x)
4554 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4558 {
"mb_info",
"emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size",
OFFSET(
mb_info),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
4615 .
name =
"msmpeg4v2",
4624 .priv_class = &msmpeg4v2_class,
4639 .priv_class = &msmpeg4v3_class,
4654 .priv_class = &wmv1_class,
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
void ff_h261_reorder_mb_index(MpegEncContext *s)
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
int chroma_elim_threshold
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
int ff_mpeg_ref_picture(MpegEncContext *s, Picture *dst, Picture *src)
av_cold int ff_dct_encode_init(MpegEncContext *s)
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size)
Check AVPacket size and/or allocate data.
const struct AVCodec * codec
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define FF_MPV_FLAG_STRICT_GOP
void ff_init_block_index(MpegEncContext *s)
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
qpel_mc_func avg_qpel_pixels_tab[2][16]
#define MAX_PICTURE_COUNT
me_cmp_func frame_skip_cmp[6]
#define CANDIDATE_MB_TYPE_SKIPPED
static int shift(int a, int b)
#define CONFIG_WMV2_ENCODER
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void av_free_packet(AVPacket *pkt)
Free a packet.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
< number of bits to represent the fractional part of time (encoder only)
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
This structure describes decoded (raw) audio or video data.
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared)
Allocate a Picture.
int16_t(* p_mv_table)[2]
MV table (1MV per MB) p-frame encoding.
int mpeg_quant
0-> h263 quant 1-> mpeg quant
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
void ff_fdct_ifast(int16_t *data)
ptrdiff_t const GLvoid * data
uint8_t * fcode_tab
smallest fcode needed for each MV
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * mb_mean
Table for MB luminance.
uint64_t error[AV_NUM_DATA_POINTERS]
error
#define CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG1 & B-frame MPEG4
int pre_pass
= 1 for the pre pass
#define CONFIG_RV10_ENCODER
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define FF_MPV_FLAG_SKIP_RD
#define FF_MPV_GENERIC_CLASS(name)
AVFrame * tmp_frames[MAX_B_FRAMES+2]
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
attribute_deprecated int rc_qmod_freq
#define AV_LOG_WARNING
Something somehow does not look correct.
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
#define LIBAVUTIL_VERSION_INT
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
#define CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
void ff_h263_encode_init(MpegEncContext *s)
AVFrame * coded_frame
the picture in the bitstream
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
static av_cold int init(AVCodecContext *avctx)
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
uint16_t * mb_var
Table for MB variances.
uint16_t(* q_chroma_intra_matrix16)[2][64]
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block.
uint16_t chroma_intra_matrix[64]
static int estimate_qp(MpegEncContext *s, int dry_run)
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for mpeg4 AC prediction, all 3 arrays must be continuous
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
#define FF_MPV_COMMON_OPTS
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
#define CANDIDATE_MB_TYPE_BIDIR
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
attribute_deprecated int lmax
void ff_get_2pass_fcode(MpegEncContext *s)
#define CONFIG_MJPEG_ENCODER
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
int obmc
overlapped block motion compensation
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
void ff_mpeg1_clean_buffers(MpegEncContext *s)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int ff_h261_get_picture_format(int width, int height)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced p-frame encoding.
static int select_input_picture(MpegEncContext *s)
int min_qcoeff
minimum encodable coefficient
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
int ildct_cmp
interlaced DCT comparison function
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
int mpv_flags
flags set by private options
int intra_quant_bias
intra quantizer bias
static const AVClass h263_class
uint8_t * intra_ac_vlc_length
int padding_bug_score
used to detect the VERY common padding bug in MPEG4
const uint16_t ff_h263_format[8][2]
#define UNI_AC_ENC_INDEX(run, level)
int mb_num
number of MBs of a picture
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
int ff_msmpeg4_encode_init(MpegEncContext *s)
int frame_skip_cmp
frame skip comparison function
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
int h263_aic
Advanded INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode b-frame encoding.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
int encoding
true if we are encoding (vs decoding)
uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
#define CODEC_FLAG_QPEL
Use qpel MC.
int scenechange_threshold
scene change detection threshold 0 is default, larger means fewer detected scene changes.
uint32_t ff_square_tab[512]
#define CONFIG_RV20_ENCODER
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
int skipdct
skip dct and code zero residual
float rc_buffer_aggressivity
void ff_mjpeg_encode_stuffing(MpegEncContext *s)
void ff_mpeg4_clean_buffers(MpegEncContext *s)
#define CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define CANDIDATE_MB_TYPE_INTER
float p_masking
p block masking (0-> disabled)
int picture_in_gop_number
0-> first pic in gop, ...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
int alt_inter_vlc
alternative inter vlc
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
int64_t time
time of current frame
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4) ...
attribute_deprecated const char * rc_eq
attribute_deprecated float rc_buffer_aggressivity
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Picture ** input_picture
next pictures on display order for encoding
#define CANDIDATE_MB_TYPE_INTER4V
void(* get_pixels)(int16_t *block, const uint8_t *pixels, ptrdiff_t line_size)
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
PutBitContext pb2
used for data partitioned VOPs
enum OutputFormat out_format
output format
#define CANDIDATE_MB_TYPE_FORWARD_I
uint16_t(* dct_offset)[64]
void ff_dct_encode_init_x86(MpegEncContext *s)
static av_cold int end(AVCodecContext *avctx)
uint16_t * chroma_intra_matrix
custom intra quantization matrix Code outside libavcodec should access this field using av_codec_g/se...
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
int pre_dia_size
ME prepass diamond size & shape.
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int misc_bits
cbp, mb_type
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int no_rounding
apply no rounding to motion compensation (MPEG4, msmpeg4, ...) for b-frames rounding mode is always 0...
#define CANDIDATE_MB_TYPE_BACKWARD_I
int(* q_chroma_intra_matrix)[64]
int me_cmp
motion estimation comparison function
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr)
Encode a frame of video.
#define CONFIG_MPEG1VIDEO_ENCODER
#define PICT_BOTTOM_FIELD
static double av_q2d(AVRational a)
Convert rational to double.
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
const uint16_t ff_aanscales[64]
uint8_t(* mv_penalty)[MAX_MV *2+1]
amount of bits needed to encode a MV
void ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_...
uint16_t pp_time
time distance between the last 2 p,s,i frames
#define AV_LOG_VERBOSE
Detailed information.
const uint8_t * scantable
int flags2
AVCodecContext.flags2.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
float lumi_masking
luminance masking (0-> disabled)
char * stats_out
pass1 encoding statistics output buffer
int max_qcoeff
maximum encodable coefficient
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
#define CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
int dquant
qscale difference to prev qscale
int num_entries
number of RateControlEntries
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & mpeg1 specific
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
int(* q_inter_matrix)[64]
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static int get_bits_diff(MpegEncContext *s)
#define CODEC_FLAG_LOOP_FILTER
loop filter
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
int intra_only
if true, only intra pictures are generated
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
int16_t * dc_val[3]
used for mpeg4 DC prediction, all 3 arrays must be continuous
int h263_plus
h263 plus headers
int slice_context_count
number of used thread_contexts
int last_non_b_pict_type
used for mpeg4 gmc b-frames & ratecontrol
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
int width
width and height of the video frame
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int last_dc[3]
last DC values for MPEG1
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
attribute_deprecated float rc_initial_cplx
uint8_t * inter_ac_vlc_last_length
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
#define CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define PTRDIFF_SPECIFIER
int mb_skipped
MUST BE SET only during DECODING.
int strict_std_compliance
strictly follow the std (MPEG4, ...)
int partitioned_frame
is current frame partitioned
int frame_skip_threshold
frame skip threshold
av_cold int ff_rate_control_init(MpegEncContext *s)
int me_sub_cmp
subpixel motion estimation comparison function
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int qmax
maximum quantizer
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static void update_mb_info(MpegEncContext *s, int startcode)
void ff_write_pass1_stats(MpegEncContext *s)
int unrestricted_mv
mv can point outside of the coded picture
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int last_lambda_for[5]
last lambda for a specific pict type
static int sse_mb(MpegEncContext *s)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
uint8_t * intra_chroma_ac_vlc_length
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int h263_slice_structured
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
#define CODEC_FLAG_QSCALE
Use fixed qscale.
uint64_t error[AV_NUM_DATA_POINTERS]
int rc_max_rate
maximum bitrate
int64_t av_gcd(int64_t a, int64_t b)
Return the greatest common divisor of a and b.
MpegvideoEncDSPContext mpvencdsp
const char * name
Name of the codec implementation.
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
int low_delay
no reordering needed / has no b-frames
qpel_mc_func put_qpel_pixels_tab[2][16]
void ff_mpeg_unref_picture(MpegEncContext *s, Picture *pic)
Deallocate a picture.
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void ff_mpv_common_end(MpegEncContext *s)
Libavcodec external API header.
#define CODEC_FLAG_LOW_DELAY
Force low delay.
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
int rc_buffer_size
decoder bitstream buffer size
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in h263 (limit difference to -2..2)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
int me_penalty_compensation
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
common internal API header
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
uint8_t * intra_ac_vlc_last_length
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
void ff_h263_loop_filter(MpegEncContext *s)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
int bit_rate
the average bitrate
enum AVPictureType pict_type
Picture type of the frame.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int ff_h263_get_gob_height(MpegEncContext *s)
Get the GOB height based on picture height.
int display_picture_number
picture number in display order
uint16_t(* q_inter_matrix16)[2][64]
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
int fixed_qscale
fixed qscale if non zero
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in mpeg4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
int me_method
ME algorithm.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int umvplus
== H263+ && unrestricted_mv
Picture new_picture
copy of the source picture structure for encoding.
int intra_quant_bias
bias for the quantizer
int width
picture width / height.
int(* pix_sum)(uint8_t *pix, int line_size)
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
#define CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int block_last_index[12]
last non zero coefficient in block
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
const int16_t ff_mpeg4_default_non_intra_matrix[64]
int mb_decision
macroblock decision mode
#define CONFIG_FLV_ENCODER
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
attribute_deprecated float rc_qsquish
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
int ac_esc_length
num of bits needed to encode the longest esc
preferred ID for MPEG-1/2 video decoding
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
the normal 2^n-1 "JPEG" YUV ranges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int inter_quant_bias
inter quantizer bias
static uint8_t default_fcode_tab[MAX_MV *2+1]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
int frame_skip_factor
frame skip factor
int first_slice_line
used in mpeg4 too to handle resync markers
uint16_t * mc_mb_var
Table for motion compensated MB variances.
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
const uint8_t *const ff_mpeg2_dc_scale_table[4]
int coded_picture_number
picture number in bitstream order
#define AV_LOG_INFO
Standard information.
uint16_t inter_matrix[64]
uint64_t error[AV_NUM_DATA_POINTERS]
error
void ff_jpeg_fdct_islow_8(int16_t *data)
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
struct MpegEncContext * thread_context[MAX_THREADS]
#define CONFIG_MSMPEG4_ENCODER
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
#define CODEC_FLAG_NORMALIZE_AQP
void ff_faandct(int16_t *data)
double buffer_index
amount of bits in the video/audio buffer
void ff_free_picture_tables(Picture *pic)
void ff_h263_update_motion_val(MpegEncContext *s)
int h263_flv
use flv h263 header
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
char * av_strdup(const char *s)
Duplicate the string s.
static av_const unsigned int ff_sqrt(unsigned int a)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
const uint16_t ff_inv_aanscales[64]
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
int frame_bits
number of bits used for the previously encoded frame
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
uint8_t * intra_chroma_ac_vlc_last_length
void(* fdct)(int16_t *block)
main external API structure.
ScanTable intra_scantable
int pre_me
prepass for motion estimation
int qmin
minimum quantizer
int height
picture size. must be a multiple of 16
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
void ff_fix_long_p_mvs(MpegEncContext *s)
Picture * picture
main picture buffer
int data_partitioning
data partitioning flag from header
uint8_t * inter_ac_vlc_length
uint16_t * intra_matrix
custom intra quantization matrix
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Describe the class of an AVClass context structure.
int stuffing_bits
bits used for stuffing
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced b-frame encoding.
int(* pix_norm1)(uint8_t *pix, int line_size)
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define CANDIDATE_MB_TYPE_DIRECT
#define FF_DEFAULT_QUANT_BIAS
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
const uint16_t ff_mpeg1_default_intra_matrix[256]
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
const uint8_t ff_zigzag_direct[64]
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
int mb_info
interval for outputting info about mb offsets as side data
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
int frame_skip_exp
frame skip exponent
#define CANDIDATE_MB_TYPE_BIDIR_I
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
const int16_t ff_mpeg4_default_intra_matrix[64]
int f_code
forward MV resolution
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
#define CANDIDATE_MB_TYPE_DIRECT0
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_MV *2+1]
#define CODEC_FLAG_CLOSED_GOP
static int weight(int i, int blen, int offset)
uint16_t * inter_matrix
custom inter quantization matrix
int max_b_frames
max number of b-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
int bit_rate
wanted bit rate
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
int last_mv_dir
last mv_dir, used for b frame encoding
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int h263_pred
use mpeg4/h263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode b-frame encoding.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
float dark_masking
darkness masking (0-> disabled)
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
static int64_t pts
Global timestamp for the audio frames.
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
int ff_init_me(MpegEncContext *s)
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode b-frame encoding.
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
int64_t mc_mb_var_sum_temp
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode b-frame encoding.
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
int noise_reduction
noise reduction strength
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
PutBitContext pb
bit output
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
#define CONFIG_MPEG2VIDEO_ENCODER
GLint GLenum GLboolean GLsizei stride
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
int quantizer_noise_shaping
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
#define FF_MB_DECISION_RD
rate distortion
#define CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
#define CANDIDATE_MB_TYPE_FORWARD
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
const uint8_t ff_h263_chroma_qscale_table[32]
#define CONFIG_H261_ENCODER
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int adaptive_quant
use adaptive quantization
static int16_t basis[64][64]
attribute_deprecated float border_masking
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
static int score_tab[256]
Picture last_picture
copy of the previous picture structure.
Picture * last_picture_ptr
pointer to the previous picture.
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
float rc_qsquish
ratecontrol qmin qmax limiting method 0-> clipping, 1-> use a nice continuous function to limit qscal...
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
int ff_vbv_update(MpegEncContext *s, int frame_size)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define CONFIG_H263_ENCODER
attribute_deprecated float rc_qmod_amp
#define CONFIG_H263P_ENCODER
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (h263)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
int trellis
trellis RD quantization
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add mpeg4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
int16_t(* blocks)[12][64]
int slices
Number of slices.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
const AVOption ff_mpv_generic_options[]
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int last_bits
temp var used for calculating the above vars
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
void(* diff_pixels)(int16_t *block, const uint8_t *s1, const uint8_t *s2, int stride)
int dia_size
ME diamond size & shape.
int b_sensitivity
Adjust sensitivity of b_frame_strategy 1.
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
#define FF_ENABLE_DEPRECATION_WARNINGS
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
int resync_mb_y
y position of last resync marker
struct AVCodecInternal * internal
Private context used for internal data.
int16_t(* block)[64]
points to one of the following blocks
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
PutBitContext tex_pb
used for data partitioned VOPs
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
attribute_deprecated int error_rate
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Picture ** reordered_input_picture
pointer to the next pictures in codedorder for encoding
static const struct twinvq_data tab
unsigned int byte_buffer_size
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
int flags
AVCodecContext.flags (HQ, MV4, ...)
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
#define LOCAL_ALIGNED_16(t, v,...)
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
int inter_quant_bias
bias for the quantizer
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
attribute_deprecated int lmin
#define CANDIDATE_MB_TYPE_BACKWARD
int me_method
Motion estimation algorithm used for video coding.
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
int ff_find_unused_picture(MpegEncContext *s, int shared)
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define MV_TYPE_8X8
4 vectors (h263, mpeg4 4MV)
int rc_min_rate
minimum bitrate
int b_code
backward MV resolution for B Frames (mpeg4)
#define CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
void ff_h261_encode_init(MpegEncContext *s)
int64_t mb_var_sum
sum of MB variance for current frame
static int encode_frame(AVCodecContext *c, AVFrame *frame)
AVPixelFormat
Pixel format.
This structure stores compressed data.
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
int strict_std_compliance
strictly follow the standard (MPEG4, ...).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int ff_check_alignment(void)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
unsigned int lambda
lagrange multipler used in rate distortion
AVCodec ff_msmpeg4v2_encoder
uint16_t pb_time
time distance between the last b and p,s,i frame
enum idct_permutation_type perm_type
static const uint8_t sp5x_quant_table[20][64]
int next_lambda
next lambda used for retrying to encode a frame