00001
00025 #include "libavutil/imgutils.h"
00026 #include "avcodec.h"
00027 #include "vp8.h"
00028 #include "vp8data.h"
00029 #include "rectangle.h"
00030
00031 #if ARCH_ARM
00032 # include "arm/vp8.h"
00033 #endif
00034
00035 static void vp8_decode_flush(AVCodecContext *avctx)
00036 {
00037 VP8Context *s = avctx->priv_data;
00038 int i;
00039
00040 for (i = 0; i < 4; i++)
00041 if (s->frames[i].data[0])
00042 avctx->release_buffer(avctx, &s->frames[i]);
00043 memset(s->framep, 0, sizeof(s->framep));
00044
00045 av_freep(&s->macroblocks_base);
00046 av_freep(&s->filter_strength);
00047 av_freep(&s->intra4x4_pred_mode_top);
00048 av_freep(&s->top_nnz);
00049 av_freep(&s->edge_emu_buffer);
00050 av_freep(&s->top_border);
00051 av_freep(&s->segmentation_map);
00052
00053 s->macroblocks = NULL;
00054 }
00055
00056 static int update_dimensions(VP8Context *s, int width, int height)
00057 {
00058 if (av_image_check_size(width, height, 0, s->avctx))
00059 return AVERROR_INVALIDDATA;
00060
00061 vp8_decode_flush(s->avctx);
00062
00063 avcodec_set_dimensions(s->avctx, width, height);
00064
00065 s->mb_width = (s->avctx->coded_width +15) / 16;
00066 s->mb_height = (s->avctx->coded_height+15) / 16;
00067
00068 s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
00069 s->filter_strength = av_mallocz(s->mb_width*sizeof(*s->filter_strength));
00070 s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4);
00071 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
00072 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
00073 s->segmentation_map = av_mallocz(s->mb_width*s->mb_height);
00074
00075 if (!s->macroblocks_base || !s->filter_strength || !s->intra4x4_pred_mode_top ||
00076 !s->top_nnz || !s->top_border || !s->segmentation_map)
00077 return AVERROR(ENOMEM);
00078
00079 s->macroblocks = s->macroblocks_base + 1;
00080
00081 return 0;
00082 }
00083
00084 static void parse_segment_info(VP8Context *s)
00085 {
00086 VP56RangeCoder *c = &s->c;
00087 int i;
00088
00089 s->segmentation.update_map = vp8_rac_get(c);
00090
00091 if (vp8_rac_get(c)) {
00092 s->segmentation.absolute_vals = vp8_rac_get(c);
00093
00094 for (i = 0; i < 4; i++)
00095 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
00096
00097 for (i = 0; i < 4; i++)
00098 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
00099 }
00100 if (s->segmentation.update_map)
00101 for (i = 0; i < 3; i++)
00102 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
00103 }
00104
00105 static void update_lf_deltas(VP8Context *s)
00106 {
00107 VP56RangeCoder *c = &s->c;
00108 int i;
00109
00110 for (i = 0; i < 4; i++)
00111 s->lf_delta.ref[i] = vp8_rac_get_sint(c, 6);
00112
00113 for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++)
00114 s->lf_delta.mode[i] = vp8_rac_get_sint(c, 6);
00115 }
00116
00117 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
00118 {
00119 const uint8_t *sizes = buf;
00120 int i;
00121
00122 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
00123
00124 buf += 3*(s->num_coeff_partitions-1);
00125 buf_size -= 3*(s->num_coeff_partitions-1);
00126 if (buf_size < 0)
00127 return -1;
00128
00129 for (i = 0; i < s->num_coeff_partitions-1; i++) {
00130 int size = AV_RL24(sizes + 3*i);
00131 if (buf_size - size < 0)
00132 return -1;
00133
00134 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
00135 buf += size;
00136 buf_size -= size;
00137 }
00138 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
00139
00140 return 0;
00141 }
00142
00143 static void get_quants(VP8Context *s)
00144 {
00145 VP56RangeCoder *c = &s->c;
00146 int i, base_qi;
00147
00148 int yac_qi = vp8_rac_get_uint(c, 7);
00149 int ydc_delta = vp8_rac_get_sint(c, 4);
00150 int y2dc_delta = vp8_rac_get_sint(c, 4);
00151 int y2ac_delta = vp8_rac_get_sint(c, 4);
00152 int uvdc_delta = vp8_rac_get_sint(c, 4);
00153 int uvac_delta = vp8_rac_get_sint(c, 4);
00154
00155 for (i = 0; i < 4; i++) {
00156 if (s->segmentation.enabled) {
00157 base_qi = s->segmentation.base_quant[i];
00158 if (!s->segmentation.absolute_vals)
00159 base_qi += yac_qi;
00160 } else
00161 base_qi = yac_qi;
00162
00163 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + ydc_delta , 0, 127)];
00164 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi , 0, 127)];
00165 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip(base_qi + y2dc_delta, 0, 127)];
00166 s->qmat[i].luma_dc_qmul[1] = 155 * vp8_ac_qlookup[av_clip(base_qi + y2ac_delta, 0, 127)] / 100;
00167 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip(base_qi + uvdc_delta, 0, 127)];
00168 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip(base_qi + uvac_delta, 0, 127)];
00169
00170 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
00171 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
00172 }
00173 }
00174
00188 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
00189 {
00190 VP56RangeCoder *c = &s->c;
00191
00192 if (update)
00193 return VP56_FRAME_CURRENT;
00194
00195 switch (vp8_rac_get_uint(c, 2)) {
00196 case 1:
00197 return VP56_FRAME_PREVIOUS;
00198 case 2:
00199 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
00200 }
00201 return VP56_FRAME_NONE;
00202 }
00203
00204 static void update_refs(VP8Context *s)
00205 {
00206 VP56RangeCoder *c = &s->c;
00207
00208 int update_golden = vp8_rac_get(c);
00209 int update_altref = vp8_rac_get(c);
00210
00211 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
00212 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
00213 }
00214
00215 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
00216 {
00217 VP56RangeCoder *c = &s->c;
00218 int header_size, hscale, vscale, i, j, k, l, m, ret;
00219 int width = s->avctx->width;
00220 int height = s->avctx->height;
00221
00222 s->keyframe = !(buf[0] & 1);
00223 s->profile = (buf[0]>>1) & 7;
00224 s->invisible = !(buf[0] & 0x10);
00225 header_size = AV_RL24(buf) >> 5;
00226 buf += 3;
00227 buf_size -= 3;
00228
00229 if (s->profile > 3)
00230 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
00231
00232 if (!s->profile)
00233 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
00234 else
00235 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
00236
00237 if (header_size > buf_size - 7*s->keyframe) {
00238 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
00239 return AVERROR_INVALIDDATA;
00240 }
00241
00242 if (s->keyframe) {
00243 if (AV_RL24(buf) != 0x2a019d) {
00244 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
00245 return AVERROR_INVALIDDATA;
00246 }
00247 width = AV_RL16(buf+3) & 0x3fff;
00248 height = AV_RL16(buf+5) & 0x3fff;
00249 hscale = buf[4] >> 6;
00250 vscale = buf[6] >> 6;
00251 buf += 7;
00252 buf_size -= 7;
00253
00254 if (hscale || vscale)
00255 av_log_missing_feature(s->avctx, "Upscaling", 1);
00256
00257 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
00258 for (i = 0; i < 4; i++)
00259 for (j = 0; j < 16; j++)
00260 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
00261 sizeof(s->prob->token[i][j]));
00262 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
00263 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
00264 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
00265 memset(&s->segmentation, 0, sizeof(s->segmentation));
00266 }
00267
00268 if (!s->macroblocks_base ||
00269 width != s->avctx->width || height != s->avctx->height) {
00270 if ((ret = update_dimensions(s, width, height) < 0))
00271 return ret;
00272 }
00273
00274 ff_vp56_init_range_decoder(c, buf, header_size);
00275 buf += header_size;
00276 buf_size -= header_size;
00277
00278 if (s->keyframe) {
00279 if (vp8_rac_get(c))
00280 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
00281 vp8_rac_get(c);
00282 }
00283
00284 if ((s->segmentation.enabled = vp8_rac_get(c)))
00285 parse_segment_info(s);
00286 else
00287 s->segmentation.update_map = 0;
00288
00289 s->filter.simple = vp8_rac_get(c);
00290 s->filter.level = vp8_rac_get_uint(c, 6);
00291 s->filter.sharpness = vp8_rac_get_uint(c, 3);
00292
00293 if ((s->lf_delta.enabled = vp8_rac_get(c)))
00294 if (vp8_rac_get(c))
00295 update_lf_deltas(s);
00296
00297 if (setup_partitions(s, buf, buf_size)) {
00298 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
00299 return AVERROR_INVALIDDATA;
00300 }
00301
00302 get_quants(s);
00303
00304 if (!s->keyframe) {
00305 update_refs(s);
00306 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
00307 s->sign_bias[VP56_FRAME_GOLDEN2 ] = vp8_rac_get(c);
00308 }
00309
00310
00311
00312 if (!(s->update_probabilities = vp8_rac_get(c)))
00313 s->prob[1] = s->prob[0];
00314
00315 s->update_last = s->keyframe || vp8_rac_get(c);
00316
00317 for (i = 0; i < 4; i++)
00318 for (j = 0; j < 8; j++)
00319 for (k = 0; k < 3; k++)
00320 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
00321 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
00322 int prob = vp8_rac_get_uint(c, 8);
00323 for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
00324 s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
00325 }
00326
00327 if ((s->mbskip_enabled = vp8_rac_get(c)))
00328 s->prob->mbskip = vp8_rac_get_uint(c, 8);
00329
00330 if (!s->keyframe) {
00331 s->prob->intra = vp8_rac_get_uint(c, 8);
00332 s->prob->last = vp8_rac_get_uint(c, 8);
00333 s->prob->golden = vp8_rac_get_uint(c, 8);
00334
00335 if (vp8_rac_get(c))
00336 for (i = 0; i < 4; i++)
00337 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
00338 if (vp8_rac_get(c))
00339 for (i = 0; i < 3; i++)
00340 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
00341
00342
00343 for (i = 0; i < 2; i++)
00344 for (j = 0; j < 19; j++)
00345 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
00346 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
00347 }
00348
00349 return 0;
00350 }
00351
00352 static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
00353 {
00354 dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
00355 dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
00356 }
00357
00361 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
00362 {
00363 int bit, x = 0;
00364
00365 if (vp56_rac_get_prob_branchy(c, p[0])) {
00366 int i;
00367
00368 for (i = 0; i < 3; i++)
00369 x += vp56_rac_get_prob(c, p[9 + i]) << i;
00370 for (i = 9; i > 3; i--)
00371 x += vp56_rac_get_prob(c, p[9 + i]) << i;
00372 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
00373 x += 8;
00374 } else {
00375
00376 const uint8_t *ps = p+2;
00377 bit = vp56_rac_get_prob(c, *ps);
00378 ps += 1 + 3*bit;
00379 x += 4*bit;
00380 bit = vp56_rac_get_prob(c, *ps);
00381 ps += 1 + bit;
00382 x += 2*bit;
00383 x += vp56_rac_get_prob(c, *ps);
00384 }
00385
00386 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
00387 }
00388
00389 static av_always_inline
00390 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
00391 {
00392 if (left == top)
00393 return vp8_submv_prob[4-!!left];
00394 if (!top)
00395 return vp8_submv_prob[2];
00396 return vp8_submv_prob[1-!!left];
00397 }
00398
00403 static av_always_inline
00404 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb)
00405 {
00406 int part_idx;
00407 int n, num;
00408 VP8Macroblock *top_mb = &mb[2];
00409 VP8Macroblock *left_mb = &mb[-1];
00410 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
00411 *mbsplits_top = vp8_mbsplits[top_mb->partitioning],
00412 *mbsplits_cur, *firstidx;
00413 VP56mv *top_mv = top_mb->bmv;
00414 VP56mv *left_mv = left_mb->bmv;
00415 VP56mv *cur_mv = mb->bmv;
00416
00417 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
00418 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
00419 part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
00420 } else {
00421 part_idx = VP8_SPLITMVMODE_8x8;
00422 }
00423 } else {
00424 part_idx = VP8_SPLITMVMODE_4x4;
00425 }
00426
00427 num = vp8_mbsplit_count[part_idx];
00428 mbsplits_cur = vp8_mbsplits[part_idx],
00429 firstidx = vp8_mbfirstidx[part_idx];
00430 mb->partitioning = part_idx;
00431
00432 for (n = 0; n < num; n++) {
00433 int k = firstidx[n];
00434 uint32_t left, above;
00435 const uint8_t *submv_prob;
00436
00437 if (!(k & 3))
00438 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
00439 else
00440 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
00441 if (k <= 3)
00442 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
00443 else
00444 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
00445
00446 submv_prob = get_submv_prob(left, above);
00447
00448 if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
00449 if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
00450 if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
00451 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
00452 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
00453 } else {
00454 AV_ZERO32(&mb->bmv[n]);
00455 }
00456 } else {
00457 AV_WN32A(&mb->bmv[n], above);
00458 }
00459 } else {
00460 AV_WN32A(&mb->bmv[n], left);
00461 }
00462 }
00463
00464 return num;
00465 }
00466
00467 static av_always_inline
00468 void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y)
00469 {
00470 VP8Macroblock *mb_edge[3] = { mb + 2 ,
00471 mb - 1 ,
00472 mb + 1 };
00473 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
00474 enum { EDGE_TOP, EDGE_LEFT, EDGE_TOPLEFT };
00475 int idx = CNT_ZERO;
00476 int cur_sign_bias = s->sign_bias[mb->ref_frame];
00477 int *sign_bias = s->sign_bias;
00478 VP56mv near_mv[4];
00479 uint8_t cnt[4] = { 0 };
00480 VP56RangeCoder *c = &s->c;
00481
00482 AV_ZERO32(&near_mv[0]);
00483 AV_ZERO32(&near_mv[1]);
00484
00485
00486 #define MV_EDGE_CHECK(n)\
00487 {\
00488 VP8Macroblock *edge = mb_edge[n];\
00489 int edge_ref = edge->ref_frame;\
00490 if (edge_ref != VP56_FRAME_CURRENT) {\
00491 uint32_t mv = AV_RN32A(&edge->mv);\
00492 if (mv) {\
00493 if (cur_sign_bias != sign_bias[edge_ref]) {\
00494 \
00495 mv = ~mv;\
00496 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
00497 }\
00498 if (!n || mv != AV_RN32A(&near_mv[idx]))\
00499 AV_WN32A(&near_mv[++idx], mv);\
00500 cnt[idx] += 1 + (n != 2);\
00501 } else\
00502 cnt[CNT_ZERO] += 1 + (n != 2);\
00503 }\
00504 }
00505
00506 MV_EDGE_CHECK(0)
00507 MV_EDGE_CHECK(1)
00508 MV_EDGE_CHECK(2)
00509
00510 mb->partitioning = VP8_SPLITMVMODE_NONE;
00511 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
00512 mb->mode = VP8_MVMODE_MV;
00513
00514
00515 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1+EDGE_TOP]) == AV_RN32A(&near_mv[1+EDGE_TOPLEFT]))
00516 cnt[CNT_NEAREST] += 1;
00517
00518
00519 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
00520 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
00521 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
00522 }
00523
00524 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
00525 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
00526
00527
00528 clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
00529 cnt[CNT_SPLITMV] = ((mb_edge[EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
00530 (mb_edge[EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
00531 (mb_edge[EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
00532
00533 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
00534 mb->mode = VP8_MVMODE_SPLIT;
00535 mb->mv = mb->bmv[decode_splitmvs(s, c, mb) - 1];
00536 } else {
00537 mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
00538 mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
00539 mb->bmv[0] = mb->mv;
00540 }
00541 } else {
00542 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
00543 mb->bmv[0] = mb->mv;
00544 }
00545 } else {
00546 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
00547 mb->bmv[0] = mb->mv;
00548 }
00549 } else {
00550 mb->mode = VP8_MVMODE_ZERO;
00551 AV_ZERO32(&mb->mv);
00552 mb->bmv[0] = mb->mv;
00553 }
00554 }
00555
00556 static av_always_inline
00557 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c,
00558 int mb_x, int keyframe)
00559 {
00560 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
00561 if (keyframe) {
00562 int x, y;
00563 uint8_t* const top = s->intra4x4_pred_mode_top + 4 * mb_x;
00564 uint8_t* const left = s->intra4x4_pred_mode_left;
00565 for (y = 0; y < 4; y++) {
00566 for (x = 0; x < 4; x++) {
00567 const uint8_t *ctx;
00568 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
00569 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
00570 left[y] = top[x] = *intra4x4;
00571 intra4x4++;
00572 }
00573 }
00574 } else {
00575 int i;
00576 for (i = 0; i < 16; i++)
00577 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
00578 }
00579 }
00580
00581 static av_always_inline
00582 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, uint8_t *segment)
00583 {
00584 VP56RangeCoder *c = &s->c;
00585
00586 if (s->segmentation.update_map)
00587 *segment = vp8_rac_get_tree(c, vp8_segmentid_tree, s->prob->segmentid);
00588 s->segment = *segment;
00589
00590 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
00591
00592 if (s->keyframe) {
00593 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
00594
00595 if (mb->mode == MODE_I4x4) {
00596 decode_intra4x4_modes(s, c, mb_x, 1);
00597 } else {
00598 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
00599 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
00600 AV_WN32A(s->intra4x4_pred_mode_left, modes);
00601 }
00602
00603 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
00604 mb->ref_frame = VP56_FRAME_CURRENT;
00605 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
00606
00607 if (vp56_rac_get_prob_branchy(c, s->prob->last))
00608 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
00609 VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
00610 else
00611 mb->ref_frame = VP56_FRAME_PREVIOUS;
00612 s->ref_count[mb->ref_frame-1]++;
00613
00614
00615 decode_mvs(s, mb, mb_x, mb_y);
00616 } else {
00617
00618 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
00619
00620 if (mb->mode == MODE_I4x4)
00621 decode_intra4x4_modes(s, c, mb_x, 0);
00622
00623 s->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
00624 mb->ref_frame = VP56_FRAME_CURRENT;
00625 mb->partitioning = VP8_SPLITMVMODE_NONE;
00626 AV_ZERO32(&mb->bmv[0]);
00627 }
00628 }
00629
00630 #ifndef decode_block_coeffs_internal
00631
00642 static int decode_block_coeffs_internal(VP56RangeCoder *c, DCTELEM block[16],
00643 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
00644 int i, uint8_t *token_prob, int16_t qmul[2])
00645 {
00646 goto skip_eob;
00647 do {
00648 int coeff;
00649 if (!vp56_rac_get_prob_branchy(c, token_prob[0]))
00650 return i;
00651
00652 skip_eob:
00653 if (!vp56_rac_get_prob_branchy(c, token_prob[1])) {
00654 if (++i == 16)
00655 return i;
00656 token_prob = probs[i][0];
00657 goto skip_eob;
00658 }
00659
00660 if (!vp56_rac_get_prob_branchy(c, token_prob[2])) {
00661 coeff = 1;
00662 token_prob = probs[i+1][1];
00663 } else {
00664 if (!vp56_rac_get_prob_branchy(c, token_prob[3])) {
00665 coeff = vp56_rac_get_prob_branchy(c, token_prob[4]);
00666 if (coeff)
00667 coeff += vp56_rac_get_prob(c, token_prob[5]);
00668 coeff += 2;
00669 } else {
00670
00671 if (!vp56_rac_get_prob_branchy(c, token_prob[6])) {
00672 if (!vp56_rac_get_prob_branchy(c, token_prob[7])) {
00673 coeff = 5 + vp56_rac_get_prob(c, vp8_dct_cat1_prob[0]);
00674 } else {
00675 coeff = 7;
00676 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[0]) << 1;
00677 coeff += vp56_rac_get_prob(c, vp8_dct_cat2_prob[1]);
00678 }
00679 } else {
00680 int a = vp56_rac_get_prob(c, token_prob[8]);
00681 int b = vp56_rac_get_prob(c, token_prob[9+a]);
00682 int cat = (a<<1) + b;
00683 coeff = 3 + (8<<cat);
00684 coeff += vp8_rac_get_coeff(c, ff_vp8_dct_cat_prob[cat]);
00685 }
00686 }
00687 token_prob = probs[i+1][2];
00688 }
00689 block[zigzag_scan[i]] = (vp8_rac_get(c) ? -coeff : coeff) * qmul[!!i];
00690 } while (++i < 16);
00691
00692 return i;
00693 }
00694 #endif
00695
00696 static av_always_inline
00697 int decode_block_coeffs(VP56RangeCoder *c, DCTELEM block[16],
00698 uint8_t probs[8][3][NUM_DCT_TOKENS-1],
00699 int i, int zero_nhood, int16_t qmul[2])
00700 {
00701 uint8_t *token_prob = probs[i][zero_nhood];
00702 if (!vp56_rac_get_prob_branchy(c, token_prob[0]))
00703 return 0;
00704 return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul);
00705 }
00706
00707 static av_always_inline
00708 void decode_mb_coeffs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
00709 uint8_t t_nnz[9], uint8_t l_nnz[9])
00710 {
00711 int i, x, y, luma_start = 0, luma_ctx = 3;
00712 int nnz_pred, nnz, nnz_total = 0;
00713 int segment = s->segment;
00714 int block_dc = 0;
00715
00716 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
00717 nnz_pred = t_nnz[8] + l_nnz[8];
00718
00719
00720 nnz = decode_block_coeffs(c, s->block_dc, s->prob->token[1], 0, nnz_pred,
00721 s->qmat[segment].luma_dc_qmul);
00722 l_nnz[8] = t_nnz[8] = !!nnz;
00723 if (nnz) {
00724 nnz_total += nnz;
00725 block_dc = 1;
00726 if (nnz == 1)
00727 s->vp8dsp.vp8_luma_dc_wht_dc(s->block, s->block_dc);
00728 else
00729 s->vp8dsp.vp8_luma_dc_wht(s->block, s->block_dc);
00730 }
00731 luma_start = 1;
00732 luma_ctx = 0;
00733 }
00734
00735
00736 for (y = 0; y < 4; y++)
00737 for (x = 0; x < 4; x++) {
00738 nnz_pred = l_nnz[y] + t_nnz[x];
00739 nnz = decode_block_coeffs(c, s->block[y][x], s->prob->token[luma_ctx], luma_start,
00740 nnz_pred, s->qmat[segment].luma_qmul);
00741
00742 s->non_zero_count_cache[y][x] = nnz + block_dc;
00743 t_nnz[x] = l_nnz[y] = !!nnz;
00744 nnz_total += nnz;
00745 }
00746
00747
00748
00749
00750 for (i = 4; i < 6; i++)
00751 for (y = 0; y < 2; y++)
00752 for (x = 0; x < 2; x++) {
00753 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
00754 nnz = decode_block_coeffs(c, s->block[i][(y<<1)+x], s->prob->token[2], 0,
00755 nnz_pred, s->qmat[segment].chroma_qmul);
00756 s->non_zero_count_cache[i][(y<<1)+x] = nnz;
00757 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
00758 nnz_total += nnz;
00759 }
00760
00761
00762
00763
00764 if (!nnz_total)
00765 mb->skip = 1;
00766 }
00767
00768 static av_always_inline
00769 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
00770 int linesize, int uvlinesize, int simple)
00771 {
00772 AV_COPY128(top_border, src_y + 15*linesize);
00773 if (!simple) {
00774 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
00775 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
00776 }
00777 }
00778
00779 static av_always_inline
00780 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
00781 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
00782 int simple, int xchg)
00783 {
00784 uint8_t *top_border_m1 = top_border-32;
00785 src_y -= linesize;
00786 src_cb -= uvlinesize;
00787 src_cr -= uvlinesize;
00788
00789 #define XCHG(a,b,xchg) do { \
00790 if (xchg) AV_SWAP64(b,a); \
00791 else AV_COPY64(b,a); \
00792 } while (0)
00793
00794 XCHG(top_border_m1+8, src_y-8, xchg);
00795 XCHG(top_border, src_y, xchg);
00796 XCHG(top_border+8, src_y+8, 1);
00797 if (mb_x < mb_width-1)
00798 XCHG(top_border+32, src_y+16, 1);
00799
00800
00801
00802 if (!simple || !mb_y) {
00803 XCHG(top_border_m1+16, src_cb-8, xchg);
00804 XCHG(top_border_m1+24, src_cr-8, xchg);
00805 XCHG(top_border+16, src_cb, 1);
00806 XCHG(top_border+24, src_cr, 1);
00807 }
00808 }
00809
00810 static av_always_inline
00811 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
00812 {
00813 if (!mb_x) {
00814 return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
00815 } else {
00816 return mb_y ? mode : LEFT_DC_PRED8x8;
00817 }
00818 }
00819
00820 static av_always_inline
00821 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
00822 {
00823 if (!mb_x) {
00824 return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
00825 } else {
00826 return mb_y ? mode : HOR_PRED8x8;
00827 }
00828 }
00829
00830 static av_always_inline
00831 int check_intra_pred8x8_mode(int mode, int mb_x, int mb_y)
00832 {
00833 if (mode == DC_PRED8x8) {
00834 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
00835 } else {
00836 return mode;
00837 }
00838 }
00839
00840 static av_always_inline
00841 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
00842 {
00843 switch (mode) {
00844 case DC_PRED8x8:
00845 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
00846 case VERT_PRED8x8:
00847 return !mb_y ? DC_127_PRED8x8 : mode;
00848 case HOR_PRED8x8:
00849 return !mb_x ? DC_129_PRED8x8 : mode;
00850 case PLANE_PRED8x8 :
00851 return check_tm_pred8x8_mode(mode, mb_x, mb_y);
00852 }
00853 return mode;
00854 }
00855
00856 static av_always_inline
00857 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
00858 {
00859 if (!mb_x) {
00860 return mb_y ? VERT_VP8_PRED : DC_129_PRED;
00861 } else {
00862 return mb_y ? mode : HOR_VP8_PRED;
00863 }
00864 }
00865
00866 static av_always_inline
00867 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
00868 {
00869 switch (mode) {
00870 case VERT_PRED:
00871 if (!mb_x && mb_y) {
00872 *copy_buf = 1;
00873 return mode;
00874 }
00875
00876 case DIAG_DOWN_LEFT_PRED:
00877 case VERT_LEFT_PRED:
00878 return !mb_y ? DC_127_PRED : mode;
00879 case HOR_PRED:
00880 if (!mb_y) {
00881 *copy_buf = 1;
00882 return mode;
00883 }
00884
00885 case HOR_UP_PRED:
00886 return !mb_x ? DC_129_PRED : mode;
00887 case TM_VP8_PRED:
00888 return check_tm_pred4x4_mode(mode, mb_x, mb_y);
00889 case DC_PRED:
00890 case DIAG_DOWN_RIGHT_PRED:
00891 case VERT_RIGHT_PRED:
00892 case HOR_DOWN_PRED:
00893 if (!mb_y || !mb_x)
00894 *copy_buf = 1;
00895 return mode;
00896 }
00897 return mode;
00898 }
00899
00900 static av_always_inline
00901 void intra_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
00902 int mb_x, int mb_y)
00903 {
00904 AVCodecContext *avctx = s->avctx;
00905 int x, y, mode, nnz, tr;
00906
00907
00908
00909 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
00910 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
00911 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
00912 s->filter.simple, 1);
00913
00914 if (mb->mode < MODE_I4x4) {
00915 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
00916 mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y);
00917 } else {
00918 mode = check_intra_pred8x8_mode(mb->mode, mb_x, mb_y);
00919 }
00920 s->hpc.pred16x16[mode](dst[0], s->linesize);
00921 } else {
00922 uint8_t *ptr = dst[0];
00923 uint8_t *intra4x4 = s->intra4x4_pred_mode_mb;
00924 uint8_t tr_top[4] = { 127, 127, 127, 127 };
00925
00926
00927
00928 uint8_t *tr_right = ptr - s->linesize + 16;
00929
00930
00931
00932 if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
00933 mb_x == s->mb_width-1) {
00934 tr = tr_right[-1]*0x01010101;
00935 tr_right = (uint8_t *)&tr;
00936 }
00937
00938 if (mb->skip)
00939 AV_ZERO128(s->non_zero_count_cache);
00940
00941 for (y = 0; y < 4; y++) {
00942 uint8_t *topright = ptr + 4 - s->linesize;
00943 for (x = 0; x < 4; x++) {
00944 int copy = 0, linesize = s->linesize;
00945 uint8_t *dst = ptr+4*x;
00946 DECLARE_ALIGNED(4, uint8_t, copy_dst)[5*8];
00947
00948 if ((y == 0 || x == 3) && mb_y == 0 && avctx->flags & CODEC_FLAG_EMU_EDGE) {
00949 topright = tr_top;
00950 } else if (x == 3)
00951 topright = tr_right;
00952
00953 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
00954 mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
00955 if (copy) {
00956 dst = copy_dst + 12;
00957 linesize = 8;
00958 if (!(mb_y + y)) {
00959 copy_dst[3] = 127U;
00960 AV_WN32A(copy_dst+4, 127U * 0x01010101U);
00961 } else {
00962 AV_COPY32(copy_dst+4, ptr+4*x-s->linesize);
00963 if (!(mb_x + x)) {
00964 copy_dst[3] = 129U;
00965 } else {
00966 copy_dst[3] = ptr[4*x-s->linesize-1];
00967 }
00968 }
00969 if (!(mb_x + x)) {
00970 copy_dst[11] =
00971 copy_dst[19] =
00972 copy_dst[27] =
00973 copy_dst[35] = 129U;
00974 } else {
00975 copy_dst[11] = ptr[4*x -1];
00976 copy_dst[19] = ptr[4*x+s->linesize -1];
00977 copy_dst[27] = ptr[4*x+s->linesize*2-1];
00978 copy_dst[35] = ptr[4*x+s->linesize*3-1];
00979 }
00980 }
00981 } else {
00982 mode = intra4x4[x];
00983 }
00984 s->hpc.pred4x4[mode](dst, topright, linesize);
00985 if (copy) {
00986 AV_COPY32(ptr+4*x , copy_dst+12);
00987 AV_COPY32(ptr+4*x+s->linesize , copy_dst+20);
00988 AV_COPY32(ptr+4*x+s->linesize*2, copy_dst+28);
00989 AV_COPY32(ptr+4*x+s->linesize*3, copy_dst+36);
00990 }
00991
00992 nnz = s->non_zero_count_cache[y][x];
00993 if (nnz) {
00994 if (nnz == 1)
00995 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, s->block[y][x], s->linesize);
00996 else
00997 s->vp8dsp.vp8_idct_add(ptr+4*x, s->block[y][x], s->linesize);
00998 }
00999 topright += 4;
01000 }
01001
01002 ptr += 4*s->linesize;
01003 intra4x4 += 4;
01004 }
01005 }
01006
01007 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
01008 mode = check_intra_pred8x8_mode_emuedge(s->chroma_pred_mode, mb_x, mb_y);
01009 } else {
01010 mode = check_intra_pred8x8_mode(s->chroma_pred_mode, mb_x, mb_y);
01011 }
01012 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
01013 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
01014
01015 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y))
01016 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
01017 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
01018 s->filter.simple, 0);
01019 }
01020
01021 static const uint8_t subpel_idx[3][8] = {
01022 { 0, 1, 2, 1, 2, 1, 2, 1 },
01023
01024 { 0, 3, 5, 3, 5, 3, 5, 3 },
01025 { 0, 2, 3, 2, 3, 2, 3, 2 },
01026 };
01027
01045 static av_always_inline
01046 void vp8_mc_luma(VP8Context *s, uint8_t *dst, uint8_t *src, const VP56mv *mv,
01047 int x_off, int y_off, int block_w, int block_h,
01048 int width, int height, int linesize,
01049 vp8_mc_func mc_func[3][3])
01050 {
01051 if (AV_RN32A(mv)) {
01052
01053 int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
01054 int my = (mv->y << 1)&7, my_idx = subpel_idx[0][my];
01055
01056 x_off += mv->x >> 2;
01057 y_off += mv->y >> 2;
01058
01059
01060 src += y_off * linesize + x_off;
01061 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
01062 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
01063 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
01064 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
01065 x_off - mx_idx, y_off - my_idx, width, height);
01066 src = s->edge_emu_buffer + mx_idx + linesize * my_idx;
01067 }
01068 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
01069 } else
01070 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
01071 }
01072
01073 static av_always_inline
01074 void vp8_mc_chroma(VP8Context *s, uint8_t *dst1, uint8_t *dst2, uint8_t *src1,
01075 uint8_t *src2, const VP56mv *mv, int x_off, int y_off,
01076 int block_w, int block_h, int width, int height, int linesize,
01077 vp8_mc_func mc_func[3][3])
01078 {
01079 if (AV_RN32A(mv)) {
01080 int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
01081 int my = mv->y&7, my_idx = subpel_idx[0][my];
01082
01083 x_off += mv->x >> 3;
01084 y_off += mv->y >> 3;
01085
01086
01087 src1 += y_off * linesize + x_off;
01088 src2 += y_off * linesize + x_off;
01089 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
01090 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
01091 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
01092 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
01093 x_off - mx_idx, y_off - my_idx, width, height);
01094 src1 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
01095 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
01096
01097 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
01098 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
01099 x_off - mx_idx, y_off - my_idx, width, height);
01100 src2 = s->edge_emu_buffer + mx_idx + linesize * my_idx;
01101 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
01102 } else {
01103 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
01104 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
01105 }
01106 } else {
01107 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
01108 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
01109 }
01110 }
01111
01112 static av_always_inline
01113 void vp8_mc_part(VP8Context *s, uint8_t *dst[3],
01114 AVFrame *ref_frame, int x_off, int y_off,
01115 int bx_off, int by_off,
01116 int block_w, int block_h,
01117 int width, int height, VP56mv *mv)
01118 {
01119 VP56mv uvmv = *mv;
01120
01121
01122 vp8_mc_luma(s, dst[0] + by_off * s->linesize + bx_off,
01123 ref_frame->data[0], mv, x_off + bx_off, y_off + by_off,
01124 block_w, block_h, width, height, s->linesize,
01125 s->put_pixels_tab[block_w == 8]);
01126
01127
01128 if (s->profile == 3) {
01129 uvmv.x &= ~7;
01130 uvmv.y &= ~7;
01131 }
01132 x_off >>= 1; y_off >>= 1;
01133 bx_off >>= 1; by_off >>= 1;
01134 width >>= 1; height >>= 1;
01135 block_w >>= 1; block_h >>= 1;
01136 vp8_mc_chroma(s, dst[1] + by_off * s->uvlinesize + bx_off,
01137 dst[2] + by_off * s->uvlinesize + bx_off, ref_frame->data[1],
01138 ref_frame->data[2], &uvmv, x_off + bx_off, y_off + by_off,
01139 block_w, block_h, width, height, s->uvlinesize,
01140 s->put_pixels_tab[1 + (block_w == 4)]);
01141 }
01142
01143
01144
01145 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
01146 {
01147
01148 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
01149 int x_off = mb_x << 4, y_off = mb_y << 4;
01150 int mx = (mb->mv.x>>2) + x_off + 8;
01151 int my = (mb->mv.y>>2) + y_off;
01152 uint8_t **src= s->framep[ref]->data;
01153 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
01154 s->dsp.prefetch(src[0]+off, s->linesize, 4);
01155 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
01156 s->dsp.prefetch(src[1]+off, src[2]-src[1], 2);
01157 }
01158 }
01159
01163 static av_always_inline
01164 void inter_predict(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb,
01165 int mb_x, int mb_y)
01166 {
01167 int x_off = mb_x << 4, y_off = mb_y << 4;
01168 int width = 16*s->mb_width, height = 16*s->mb_height;
01169 AVFrame *ref = s->framep[mb->ref_frame];
01170 VP56mv *bmv = mb->bmv;
01171
01172 switch (mb->partitioning) {
01173 case VP8_SPLITMVMODE_NONE:
01174 vp8_mc_part(s, dst, ref, x_off, y_off,
01175 0, 0, 16, 16, width, height, &mb->mv);
01176 break;
01177 case VP8_SPLITMVMODE_4x4: {
01178 int x, y;
01179 VP56mv uvmv;
01180
01181
01182 for (y = 0; y < 4; y++) {
01183 for (x = 0; x < 4; x++) {
01184 vp8_mc_luma(s, dst[0] + 4*y*s->linesize + x*4,
01185 ref->data[0], &bmv[4*y + x],
01186 4*x + x_off, 4*y + y_off, 4, 4,
01187 width, height, s->linesize,
01188 s->put_pixels_tab[2]);
01189 }
01190 }
01191
01192
01193 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
01194 for (y = 0; y < 2; y++) {
01195 for (x = 0; x < 2; x++) {
01196 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
01197 mb->bmv[ 2*y * 4 + 2*x+1].x +
01198 mb->bmv[(2*y+1) * 4 + 2*x ].x +
01199 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
01200 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
01201 mb->bmv[ 2*y * 4 + 2*x+1].y +
01202 mb->bmv[(2*y+1) * 4 + 2*x ].y +
01203 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
01204 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
01205 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
01206 if (s->profile == 3) {
01207 uvmv.x &= ~7;
01208 uvmv.y &= ~7;
01209 }
01210 vp8_mc_chroma(s, dst[1] + 4*y*s->uvlinesize + x*4,
01211 dst[2] + 4*y*s->uvlinesize + x*4,
01212 ref->data[1], ref->data[2], &uvmv,
01213 4*x + x_off, 4*y + y_off, 4, 4,
01214 width, height, s->uvlinesize,
01215 s->put_pixels_tab[2]);
01216 }
01217 }
01218 break;
01219 }
01220 case VP8_SPLITMVMODE_16x8:
01221 vp8_mc_part(s, dst, ref, x_off, y_off,
01222 0, 0, 16, 8, width, height, &bmv[0]);
01223 vp8_mc_part(s, dst, ref, x_off, y_off,
01224 0, 8, 16, 8, width, height, &bmv[1]);
01225 break;
01226 case VP8_SPLITMVMODE_8x16:
01227 vp8_mc_part(s, dst, ref, x_off, y_off,
01228 0, 0, 8, 16, width, height, &bmv[0]);
01229 vp8_mc_part(s, dst, ref, x_off, y_off,
01230 8, 0, 8, 16, width, height, &bmv[1]);
01231 break;
01232 case VP8_SPLITMVMODE_8x8:
01233 vp8_mc_part(s, dst, ref, x_off, y_off,
01234 0, 0, 8, 8, width, height, &bmv[0]);
01235 vp8_mc_part(s, dst, ref, x_off, y_off,
01236 8, 0, 8, 8, width, height, &bmv[1]);
01237 vp8_mc_part(s, dst, ref, x_off, y_off,
01238 0, 8, 8, 8, width, height, &bmv[2]);
01239 vp8_mc_part(s, dst, ref, x_off, y_off,
01240 8, 8, 8, 8, width, height, &bmv[3]);
01241 break;
01242 }
01243 }
01244
01245 static av_always_inline void idct_mb(VP8Context *s, uint8_t *dst[3], VP8Macroblock *mb)
01246 {
01247 int x, y, ch;
01248
01249 if (mb->mode != MODE_I4x4) {
01250 uint8_t *y_dst = dst[0];
01251 for (y = 0; y < 4; y++) {
01252 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[y]);
01253 if (nnz4) {
01254 if (nnz4&~0x01010101) {
01255 for (x = 0; x < 4; x++) {
01256 if ((uint8_t)nnz4 == 1)
01257 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, s->block[y][x], s->linesize);
01258 else if((uint8_t)nnz4 > 1)
01259 s->vp8dsp.vp8_idct_add(y_dst+4*x, s->block[y][x], s->linesize);
01260 nnz4 >>= 8;
01261 if (!nnz4)
01262 break;
01263 }
01264 } else {
01265 s->vp8dsp.vp8_idct_dc_add4y(y_dst, s->block[y], s->linesize);
01266 }
01267 }
01268 y_dst += 4*s->linesize;
01269 }
01270 }
01271
01272 for (ch = 0; ch < 2; ch++) {
01273 uint32_t nnz4 = AV_RL32(s->non_zero_count_cache[4+ch]);
01274 if (nnz4) {
01275 uint8_t *ch_dst = dst[1+ch];
01276 if (nnz4&~0x01010101) {
01277 for (y = 0; y < 2; y++) {
01278 for (x = 0; x < 2; x++) {
01279 if ((uint8_t)nnz4 == 1)
01280 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
01281 else if((uint8_t)nnz4 > 1)
01282 s->vp8dsp.vp8_idct_add(ch_dst+4*x, s->block[4+ch][(y<<1)+x], s->uvlinesize);
01283 nnz4 >>= 8;
01284 if (!nnz4)
01285 break;
01286 }
01287 ch_dst += 4*s->uvlinesize;
01288 }
01289 } else {
01290 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, s->block[4+ch], s->uvlinesize);
01291 }
01292 }
01293 }
01294 }
01295
01296 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
01297 {
01298 int interior_limit, filter_level;
01299
01300 if (s->segmentation.enabled) {
01301 filter_level = s->segmentation.filter_level[s->segment];
01302 if (!s->segmentation.absolute_vals)
01303 filter_level += s->filter.level;
01304 } else
01305 filter_level = s->filter.level;
01306
01307 if (s->lf_delta.enabled) {
01308 filter_level += s->lf_delta.ref[mb->ref_frame];
01309 filter_level += s->lf_delta.mode[mb->mode];
01310 }
01311
01312
01313 #define POW2CLIP(x,max) (((x) & ~max) ? (-(x))>>31 & max : (x));
01314 filter_level = POW2CLIP(filter_level, 63);
01315
01316 interior_limit = filter_level;
01317 if (s->filter.sharpness) {
01318 interior_limit >>= (s->filter.sharpness + 3) >> 2;
01319 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
01320 }
01321 interior_limit = FFMAX(interior_limit, 1);
01322
01323 f->filter_level = filter_level;
01324 f->inner_limit = interior_limit;
01325 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
01326 }
01327
01328 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
01329 {
01330 int mbedge_lim, bedge_lim, hev_thresh;
01331 int filter_level = f->filter_level;
01332 int inner_limit = f->inner_limit;
01333 int inner_filter = f->inner_filter;
01334 int linesize = s->linesize;
01335 int uvlinesize = s->uvlinesize;
01336 static const uint8_t hev_thresh_lut[2][64] = {
01337 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
01338 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
01339 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
01340 3, 3, 3, 3 },
01341 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
01342 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
01343 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
01344 2, 2, 2, 2 }
01345 };
01346
01347 if (!filter_level)
01348 return;
01349
01350 bedge_lim = 2*filter_level + inner_limit;
01351 mbedge_lim = bedge_lim + 4;
01352
01353 hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
01354
01355 if (mb_x) {
01356 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
01357 mbedge_lim, inner_limit, hev_thresh);
01358 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
01359 mbedge_lim, inner_limit, hev_thresh);
01360 }
01361
01362 if (inner_filter) {
01363 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
01364 inner_limit, hev_thresh);
01365 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
01366 inner_limit, hev_thresh);
01367 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
01368 inner_limit, hev_thresh);
01369 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
01370 uvlinesize, bedge_lim,
01371 inner_limit, hev_thresh);
01372 }
01373
01374 if (mb_y) {
01375 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
01376 mbedge_lim, inner_limit, hev_thresh);
01377 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
01378 mbedge_lim, inner_limit, hev_thresh);
01379 }
01380
01381 if (inner_filter) {
01382 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
01383 linesize, bedge_lim,
01384 inner_limit, hev_thresh);
01385 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
01386 linesize, bedge_lim,
01387 inner_limit, hev_thresh);
01388 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
01389 linesize, bedge_lim,
01390 inner_limit, hev_thresh);
01391 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
01392 dst[2] + 4 * uvlinesize,
01393 uvlinesize, bedge_lim,
01394 inner_limit, hev_thresh);
01395 }
01396 }
01397
01398 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
01399 {
01400 int mbedge_lim, bedge_lim;
01401 int filter_level = f->filter_level;
01402 int inner_limit = f->inner_limit;
01403 int inner_filter = f->inner_filter;
01404 int linesize = s->linesize;
01405
01406 if (!filter_level)
01407 return;
01408
01409 bedge_lim = 2*filter_level + inner_limit;
01410 mbedge_lim = bedge_lim + 4;
01411
01412 if (mb_x)
01413 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
01414 if (inner_filter) {
01415 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
01416 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
01417 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
01418 }
01419
01420 if (mb_y)
01421 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
01422 if (inner_filter) {
01423 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
01424 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
01425 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
01426 }
01427 }
01428
01429 static void filter_mb_row(VP8Context *s, int mb_y)
01430 {
01431 VP8FilterStrength *f = s->filter_strength;
01432 uint8_t *dst[3] = {
01433 s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize,
01434 s->framep[VP56_FRAME_CURRENT]->data[1] + 8*mb_y*s->uvlinesize,
01435 s->framep[VP56_FRAME_CURRENT]->data[2] + 8*mb_y*s->uvlinesize
01436 };
01437 int mb_x;
01438
01439 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01440 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
01441 filter_mb(s, dst, f++, mb_x, mb_y);
01442 dst[0] += 16;
01443 dst[1] += 8;
01444 dst[2] += 8;
01445 }
01446 }
01447
01448 static void filter_mb_row_simple(VP8Context *s, int mb_y)
01449 {
01450 VP8FilterStrength *f = s->filter_strength;
01451 uint8_t *dst = s->framep[VP56_FRAME_CURRENT]->data[0] + 16*mb_y*s->linesize;
01452 int mb_x;
01453
01454 for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
01455 backup_mb_border(s->top_border[mb_x+1], dst, NULL, NULL, s->linesize, 0, 1);
01456 filter_mb_simple(s, dst, f++, mb_x, mb_y);
01457 dst += 16;
01458 }
01459 }
01460
01461 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
01462 AVPacket *avpkt)
01463 {
01464 VP8Context *s = avctx->priv_data;
01465 int ret, mb_x, mb_y, i, y, referenced;
01466 enum AVDiscard skip_thresh;
01467 AVFrame *av_uninit(curframe);
01468
01469 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
01470 return ret;
01471
01472 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
01473 || s->update_altref == VP56_FRAME_CURRENT;
01474
01475 skip_thresh = !referenced ? AVDISCARD_NONREF :
01476 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
01477
01478 if (avctx->skip_frame >= skip_thresh) {
01479 s->invisible = 1;
01480 goto skip_decode;
01481 }
01482 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
01483
01484 for (i = 0; i < 4; i++)
01485 if (&s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
01486 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
01487 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
01488 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
01489 break;
01490 }
01491 if (curframe->data[0])
01492 avctx->release_buffer(avctx, curframe);
01493
01494 curframe->key_frame = s->keyframe;
01495 curframe->pict_type = s->keyframe ? FF_I_TYPE : FF_P_TYPE;
01496 curframe->reference = referenced ? 3 : 0;
01497 if ((ret = avctx->get_buffer(avctx, curframe))) {
01498 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed!\n");
01499 return ret;
01500 }
01501
01502
01503
01504
01505 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
01506 !s->framep[VP56_FRAME_GOLDEN] ||
01507 !s->framep[VP56_FRAME_GOLDEN2])) {
01508 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
01509 return AVERROR_INVALIDDATA;
01510 }
01511
01512 s->linesize = curframe->linesize[0];
01513 s->uvlinesize = curframe->linesize[1];
01514
01515 if (!s->edge_emu_buffer)
01516 s->edge_emu_buffer = av_malloc(21*s->linesize);
01517
01518 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
01519
01520
01521 memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
01522
01523
01524 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
01525 s->top_border[0][15] = s->top_border[0][23] = 127;
01526 memset(s->top_border[1]-1, 127, s->mb_width*sizeof(*s->top_border)+1);
01527 }
01528 memset(s->ref_count, 0, sizeof(s->ref_count));
01529 if (s->keyframe)
01530 memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
01531
01532 #define MARGIN (16 << 2)
01533 s->mv_min.y = -MARGIN;
01534 s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
01535
01536 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
01537 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
01538 VP8Macroblock *mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
01539 int mb_xy = mb_y*s->mb_width;
01540 uint8_t *dst[3] = {
01541 curframe->data[0] + 16*mb_y*s->linesize,
01542 curframe->data[1] + 8*mb_y*s->uvlinesize,
01543 curframe->data[2] + 8*mb_y*s->uvlinesize
01544 };
01545
01546 memset(mb - 1, 0, sizeof(*mb));
01547 memset(s->left_nnz, 0, sizeof(s->left_nnz));
01548 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
01549
01550
01551 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
01552 for (i = 0; i < 3; i++)
01553 for (y = 0; y < 16>>!!i; y++)
01554 dst[i][y*curframe->linesize[i]-1] = 129;
01555 if (mb_y == 1)
01556 s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
01557 }
01558
01559 s->mv_min.x = -MARGIN;
01560 s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
01561
01562 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
01563
01564 s->dsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
01565 s->dsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
01566
01567 decode_mb_mode(s, mb, mb_x, mb_y, s->segmentation_map + mb_xy);
01568
01569 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
01570
01571 if (!mb->skip)
01572 decode_mb_coeffs(s, c, mb, s->top_nnz[mb_x], s->left_nnz);
01573
01574 if (mb->mode <= MODE_I4x4)
01575 intra_predict(s, dst, mb, mb_x, mb_y);
01576 else
01577 inter_predict(s, dst, mb, mb_x, mb_y);
01578
01579 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
01580
01581 if (!mb->skip) {
01582 idct_mb(s, dst, mb);
01583 } else {
01584 AV_ZERO64(s->left_nnz);
01585 AV_WN64(s->top_nnz[mb_x], 0);
01586
01587
01588 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
01589 s->left_nnz[8] = 0;
01590 s->top_nnz[mb_x][8] = 0;
01591 }
01592 }
01593
01594 if (s->deblock_filter)
01595 filter_level_for_mb(s, mb, &s->filter_strength[mb_x]);
01596
01597 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
01598
01599 dst[0] += 16;
01600 dst[1] += 8;
01601 dst[2] += 8;
01602 s->mv_min.x -= 64;
01603 s->mv_max.x -= 64;
01604 }
01605 if (s->deblock_filter) {
01606 if (s->filter.simple)
01607 filter_mb_row_simple(s, mb_y);
01608 else
01609 filter_mb_row(s, mb_y);
01610 }
01611 s->mv_min.y -= 64;
01612 s->mv_max.y -= 64;
01613 }
01614
01615 skip_decode:
01616
01617
01618 if (!s->update_probabilities)
01619 s->prob[0] = s->prob[1];
01620
01621
01622 if (s->update_altref == VP56_FRAME_GOLDEN &&
01623 s->update_golden == VP56_FRAME_GOLDEN2)
01624 FFSWAP(AVFrame *, s->framep[VP56_FRAME_GOLDEN], s->framep[VP56_FRAME_GOLDEN2]);
01625 else {
01626 if (s->update_altref != VP56_FRAME_NONE)
01627 s->framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
01628
01629 if (s->update_golden != VP56_FRAME_NONE)
01630 s->framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
01631 }
01632
01633 if (s->update_last)
01634 s->framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_CURRENT];
01635
01636
01637 for (i = 0; i < 4; i++)
01638 if (s->frames[i].data[0] &&
01639 &s->frames[i] != s->framep[VP56_FRAME_CURRENT] &&
01640 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
01641 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
01642 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
01643 avctx->release_buffer(avctx, &s->frames[i]);
01644
01645 if (!s->invisible) {
01646 *(AVFrame*)data = *s->framep[VP56_FRAME_CURRENT];
01647 *data_size = sizeof(AVFrame);
01648 }
01649
01650 return avpkt->size;
01651 }
01652
01653 static av_cold int vp8_decode_init(AVCodecContext *avctx)
01654 {
01655 VP8Context *s = avctx->priv_data;
01656
01657 s->avctx = avctx;
01658 avctx->pix_fmt = PIX_FMT_YUV420P;
01659
01660 dsputil_init(&s->dsp, avctx);
01661 ff_h264_pred_init(&s->hpc, CODEC_ID_VP8);
01662 ff_vp8dsp_init(&s->vp8dsp);
01663
01664 return 0;
01665 }
01666
01667 static av_cold int vp8_decode_free(AVCodecContext *avctx)
01668 {
01669 vp8_decode_flush(avctx);
01670 return 0;
01671 }
01672
01673 AVCodec ff_vp8_decoder = {
01674 "vp8",
01675 AVMEDIA_TYPE_VIDEO,
01676 CODEC_ID_VP8,
01677 sizeof(VP8Context),
01678 vp8_decode_init,
01679 NULL,
01680 vp8_decode_free,
01681 vp8_decode_frame,
01682 CODEC_CAP_DR1,
01683 .flush = vp8_decode_flush,
01684 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
01685 };