00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022 #include <stdio.h>
00023 #include <stdlib.h>
00024 #include <string.h>
00025
00026 #include "libavutil/imgutils.h"
00027 #include "avcodec.h"
00028 #include "dsputil.h"
00029 #include "bytestream.h"
00030
00031 #include "indeo3data.h"
00032
00033 typedef struct
00034 {
00035 uint8_t *Ybuf;
00036 uint8_t *Ubuf;
00037 uint8_t *Vbuf;
00038 unsigned short y_w, y_h;
00039 unsigned short uv_w, uv_h;
00040 } YUVBufs;
00041
00042 typedef struct Indeo3DecodeContext {
00043 AVCodecContext *avctx;
00044 int width, height;
00045 AVFrame frame;
00046
00047 uint8_t *buf;
00048 YUVBufs iv_frame[2];
00049 YUVBufs *cur_frame;
00050 YUVBufs *ref_frame;
00051
00052 uint8_t *ModPred;
00053 uint8_t *corrector_type;
00054 } Indeo3DecodeContext;
00055
00056 static const uint8_t corrector_type_0[24] = {
00057 195, 159, 133, 115, 101, 93, 87, 77,
00058 195, 159, 133, 115, 101, 93, 87, 77,
00059 128, 79, 79, 79, 79, 79, 79, 79
00060 };
00061
00062 static const uint8_t corrector_type_2[8] = { 9, 7, 6, 8, 5, 4, 3, 2 };
00063
00064 static av_cold int build_modpred(Indeo3DecodeContext *s)
00065 {
00066 int i, j;
00067
00068 if (!(s->ModPred = av_malloc(8 * 128)))
00069 return AVERROR(ENOMEM);
00070
00071 for (i=0; i < 128; ++i) {
00072 s->ModPred[i+0*128] = i > 126 ? 254 : 2*(i + 1 - ((i + 1) % 2));
00073 s->ModPred[i+1*128] = i == 7 ? 20 :
00074 i == 119 ||
00075 i == 120 ? 236 : 2*(i + 2 - ((i + 1) % 3));
00076 s->ModPred[i+2*128] = i > 125 ? 248 : 2*(i + 2 - ((i + 2) % 4));
00077 s->ModPred[i+3*128] = 2*(i + 1 - ((i - 3) % 5));
00078 s->ModPred[i+4*128] = i == 8 ? 20 : 2*(i + 1 - ((i - 3) % 6));
00079 s->ModPred[i+5*128] = 2*(i + 4 - ((i + 3) % 7));
00080 s->ModPred[i+6*128] = i > 123 ? 240 : 2*(i + 4 - ((i + 4) % 8));
00081 s->ModPred[i+7*128] = 2*(i + 5 - ((i + 4) % 9));
00082 }
00083
00084 if (!(s->corrector_type = av_malloc(24 * 256)))
00085 return AVERROR(ENOMEM);
00086
00087 for (i=0; i < 24; ++i) {
00088 for (j=0; j < 256; ++j) {
00089 s->corrector_type[i*256+j] = j < corrector_type_0[i] ? 1 :
00090 j < 248 || (i == 16 && j == 248) ? 0 :
00091 corrector_type_2[j - 248];
00092 }
00093 }
00094
00095 return 0;
00096 }
00097
00098 static av_cold int iv_alloc_frames(Indeo3DecodeContext *s)
00099 {
00100 int luma_width = (s->width + 3) & ~3,
00101 luma_height = (s->height + 3) & ~3,
00102 chroma_width = ((luma_width >> 2) + 3) & ~3,
00103 chroma_height = ((luma_height >> 2) + 3) & ~3,
00104 luma_pixels = luma_width * luma_height,
00105 chroma_pixels = chroma_width * chroma_height,
00106 i;
00107 unsigned int bufsize = luma_pixels * 2 + luma_width * 3 +
00108 (chroma_pixels + chroma_width) * 4;
00109
00110 av_freep(&s->buf);
00111 if(!(s->buf = av_malloc(bufsize)))
00112 return AVERROR(ENOMEM);
00113 s->iv_frame[0].y_w = s->iv_frame[1].y_w = luma_width;
00114 s->iv_frame[0].y_h = s->iv_frame[1].y_h = luma_height;
00115 s->iv_frame[0].uv_w = s->iv_frame[1].uv_w = chroma_width;
00116 s->iv_frame[0].uv_h = s->iv_frame[1].uv_h = chroma_height;
00117
00118 s->iv_frame[0].Ybuf = s->buf + luma_width;
00119 i = luma_pixels + luma_width * 2;
00120 s->iv_frame[1].Ybuf = s->buf + i;
00121 i += (luma_pixels + luma_width);
00122 s->iv_frame[0].Ubuf = s->buf + i;
00123 i += (chroma_pixels + chroma_width);
00124 s->iv_frame[1].Ubuf = s->buf + i;
00125 i += (chroma_pixels + chroma_width);
00126 s->iv_frame[0].Vbuf = s->buf + i;
00127 i += (chroma_pixels + chroma_width);
00128 s->iv_frame[1].Vbuf = s->buf + i;
00129
00130 for(i = 1; i <= luma_width; i++)
00131 s->iv_frame[0].Ybuf[-i] = s->iv_frame[1].Ybuf[-i] =
00132 s->iv_frame[0].Ubuf[-i] = 0x80;
00133
00134 for(i = 1; i <= chroma_width; i++) {
00135 s->iv_frame[1].Ubuf[-i] = 0x80;
00136 s->iv_frame[0].Vbuf[-i] = 0x80;
00137 s->iv_frame[1].Vbuf[-i] = 0x80;
00138 s->iv_frame[1].Vbuf[chroma_pixels+i-1] = 0x80;
00139 }
00140
00141 return 0;
00142 }
00143
00144 static av_cold void iv_free_func(Indeo3DecodeContext *s)
00145 {
00146 av_freep(&s->buf);
00147 av_freep(&s->ModPred);
00148 av_freep(&s->corrector_type);
00149 }
00150
00151 struct ustr {
00152 long xpos;
00153 long ypos;
00154 long width;
00155 long height;
00156 long split_flag;
00157 long split_direction;
00158 long usl7;
00159 };
00160
00161
00162 #define LV1_CHECK(buf1,rle_v3,lv1,lp2) \
00163 if((lv1 & 0x80) != 0) { \
00164 if(rle_v3 != 0) \
00165 rle_v3 = 0; \
00166 else { \
00167 rle_v3 = 1; \
00168 buf1 -= 2; \
00169 } \
00170 } \
00171 lp2 = 4;
00172
00173
00174 #define RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3) \
00175 if(rle_v3 == 0) { \
00176 rle_v2 = *buf1; \
00177 rle_v1 = 1; \
00178 if(rle_v2 > 32) { \
00179 rle_v2 -= 32; \
00180 rle_v1 = 0; \
00181 } \
00182 rle_v3 = 1; \
00183 } \
00184 buf1--;
00185
00186
00187 #define LP2_CHECK(buf1,rle_v3,lp2) \
00188 if(lp2 == 0 && rle_v3 != 0) \
00189 rle_v3 = 0; \
00190 else { \
00191 buf1--; \
00192 rle_v3 = 1; \
00193 }
00194
00195
00196 #define RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2) \
00197 rle_v2--; \
00198 if(rle_v2 == 0) { \
00199 rle_v3 = 0; \
00200 buf1 += 2; \
00201 } \
00202 lp2 = 4;
00203
00204 static void iv_Decode_Chunk(Indeo3DecodeContext *s,
00205 uint8_t *cur, uint8_t *ref, int width, int height,
00206 const uint8_t *buf1, long cb_offset, const uint8_t *hdr,
00207 const uint8_t *buf2, int min_width_160)
00208 {
00209 uint8_t bit_buf;
00210 unsigned long bit_pos, lv, lv1, lv2;
00211 long *width_tbl, width_tbl_arr[10];
00212 const signed char *ref_vectors;
00213 uint8_t *cur_frm_pos, *ref_frm_pos, *cp, *cp2;
00214 uint32_t *cur_lp, *ref_lp;
00215 const uint32_t *correction_lp[2], *correctionloworder_lp[2], *correctionhighorder_lp[2];
00216 uint8_t *correction_type_sp[2];
00217 struct ustr strip_tbl[20], *strip;
00218 int i, j, k, lp1, lp2, flag1, cmd, blks_width, blks_height, region_160_width,
00219 rle_v1, rle_v2, rle_v3;
00220 unsigned short res;
00221
00222 bit_buf = 0;
00223 ref_vectors = NULL;
00224
00225 width_tbl = width_tbl_arr + 1;
00226 i = (width < 0 ? width + 3 : width)/4;
00227 for(j = -1; j < 8; j++)
00228 width_tbl[j] = i * j;
00229
00230 strip = strip_tbl;
00231
00232 for(region_160_width = 0; region_160_width < (width - min_width_160); region_160_width += min_width_160);
00233
00234 strip->ypos = strip->xpos = 0;
00235 for(strip->width = min_width_160; width > strip->width; strip->width *= 2);
00236 strip->height = height;
00237 strip->split_direction = 0;
00238 strip->split_flag = 0;
00239 strip->usl7 = 0;
00240
00241 bit_pos = 0;
00242
00243 rle_v1 = rle_v2 = rle_v3 = 0;
00244
00245 while(strip >= strip_tbl) {
00246 if(bit_pos <= 0) {
00247 bit_pos = 8;
00248 bit_buf = *buf1++;
00249 }
00250
00251 bit_pos -= 2;
00252 cmd = (bit_buf >> bit_pos) & 0x03;
00253
00254 if(cmd == 0) {
00255 strip++;
00256 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
00257 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
00258 break;
00259 }
00260 memcpy(strip, strip-1, sizeof(*strip));
00261 strip->split_flag = 1;
00262 strip->split_direction = 0;
00263 strip->height = (strip->height > 8 ? ((strip->height+8)>>4)<<3 : 4);
00264 continue;
00265 } else if(cmd == 1) {
00266 strip++;
00267 if(strip >= strip_tbl + FF_ARRAY_ELEMS(strip_tbl)) {
00268 av_log(s->avctx, AV_LOG_WARNING, "out of range strip\n");
00269 break;
00270 }
00271 memcpy(strip, strip-1, sizeof(*strip));
00272 strip->split_flag = 1;
00273 strip->split_direction = 1;
00274 strip->width = (strip->width > 8 ? ((strip->width+8)>>4)<<3 : 4);
00275 continue;
00276 } else if(cmd == 2) {
00277 if(strip->usl7 == 0) {
00278 strip->usl7 = 1;
00279 ref_vectors = NULL;
00280 continue;
00281 }
00282 } else if(cmd == 3) {
00283 if(strip->usl7 == 0) {
00284 strip->usl7 = 1;
00285 ref_vectors = (const signed char*)buf2 + (*buf1 * 2);
00286 buf1++;
00287 continue;
00288 }
00289 }
00290
00291 cur_frm_pos = cur + width * strip->ypos + strip->xpos;
00292
00293 if((blks_width = strip->width) < 0)
00294 blks_width += 3;
00295 blks_width >>= 2;
00296 blks_height = strip->height;
00297
00298 if(ref_vectors != NULL) {
00299 ref_frm_pos = ref + (ref_vectors[0] + strip->ypos) * width +
00300 ref_vectors[1] + strip->xpos;
00301 } else
00302 ref_frm_pos = cur_frm_pos - width_tbl[4];
00303
00304 if(cmd == 2) {
00305 if(bit_pos <= 0) {
00306 bit_pos = 8;
00307 bit_buf = *buf1++;
00308 }
00309
00310 bit_pos -= 2;
00311 cmd = (bit_buf >> bit_pos) & 0x03;
00312
00313 if(cmd == 0 || ref_vectors != NULL) {
00314 for(lp1 = 0; lp1 < blks_width; lp1++) {
00315 for(i = 0, j = 0; i < blks_height; i++, j += width_tbl[1])
00316 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
00317 cur_frm_pos += 4;
00318 ref_frm_pos += 4;
00319 }
00320 } else if(cmd != 1)
00321 return;
00322 } else {
00323 k = *buf1 >> 4;
00324 j = *buf1 & 0x0f;
00325 buf1++;
00326 lv = j + cb_offset;
00327
00328 if((lv - 8) <= 7 && (k == 0 || k == 3 || k == 10)) {
00329 cp2 = s->ModPred + ((lv - 8) << 7);
00330 cp = ref_frm_pos;
00331 for(i = 0; i < blks_width << 2; i++) {
00332 int v = *cp >> 1;
00333 *(cp++) = cp2[v];
00334 }
00335 }
00336
00337 if(k == 1 || k == 4) {
00338 lv = (hdr[j] & 0xf) + cb_offset;
00339 correction_type_sp[0] = s->corrector_type + (lv << 8);
00340 correction_lp[0] = correction + (lv << 8);
00341 lv = (hdr[j] >> 4) + cb_offset;
00342 correction_lp[1] = correction + (lv << 8);
00343 correction_type_sp[1] = s->corrector_type + (lv << 8);
00344 } else {
00345 correctionloworder_lp[0] = correctionloworder_lp[1] = correctionloworder + (lv << 8);
00346 correctionhighorder_lp[0] = correctionhighorder_lp[1] = correctionhighorder + (lv << 8);
00347 correction_type_sp[0] = correction_type_sp[1] = s->corrector_type + (lv << 8);
00348 correction_lp[0] = correction_lp[1] = correction + (lv << 8);
00349 }
00350
00351 switch(k) {
00352 case 1:
00353 case 0:
00354 for( ; blks_height > 0; blks_height -= 4) {
00355 for(lp1 = 0; lp1 < blks_width; lp1++) {
00356 for(lp2 = 0; lp2 < 4; ) {
00357 k = *buf1++;
00358 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2];
00359 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2];
00360
00361 switch(correction_type_sp[0][k]) {
00362 case 0:
00363 *cur_lp = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00364 lp2++;
00365 break;
00366 case 1:
00367 res = ((av_le2ne16(((unsigned short *)(ref_lp))[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
00368 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
00369 res = ((av_le2ne16(((unsigned short *)(ref_lp))[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
00370 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
00371 buf1++;
00372 lp2++;
00373 break;
00374 case 2:
00375 if(lp2 == 0) {
00376 for(i = 0, j = 0; i < 2; i++, j += width_tbl[1])
00377 cur_lp[j] = ref_lp[j];
00378 lp2 += 2;
00379 }
00380 break;
00381 case 3:
00382 if(lp2 < 2) {
00383 for(i = 0, j = 0; i < (3 - lp2); i++, j += width_tbl[1])
00384 cur_lp[j] = ref_lp[j];
00385 lp2 = 3;
00386 }
00387 break;
00388 case 8:
00389 if(lp2 == 0) {
00390 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00391
00392 if(rle_v1 == 1 || ref_vectors != NULL) {
00393 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00394 cur_lp[j] = ref_lp[j];
00395 }
00396
00397 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00398 break;
00399 } else {
00400 rle_v1 = 1;
00401 rle_v2 = *buf1 - 1;
00402 }
00403 case 5:
00404 LP2_CHECK(buf1,rle_v3,lp2)
00405 case 4:
00406 for(i = 0, j = 0; i < (4 - lp2); i++, j += width_tbl[1])
00407 cur_lp[j] = ref_lp[j];
00408 lp2 = 4;
00409 break;
00410
00411 case 7:
00412 if(rle_v3 != 0)
00413 rle_v3 = 0;
00414 else {
00415 buf1--;
00416 rle_v3 = 1;
00417 }
00418 case 6:
00419 if(ref_vectors != NULL) {
00420 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00421 cur_lp[j] = ref_lp[j];
00422 }
00423 lp2 = 4;
00424 break;
00425
00426 case 9:
00427 lv1 = *buf1++;
00428 lv = (lv1 & 0x7F) << 1;
00429 lv += (lv << 8);
00430 lv += (lv << 16);
00431 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00432 cur_lp[j] = lv;
00433
00434 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00435 break;
00436 default:
00437 return;
00438 }
00439 }
00440
00441 cur_frm_pos += 4;
00442 ref_frm_pos += 4;
00443 }
00444
00445 cur_frm_pos += ((width - blks_width) * 4);
00446 ref_frm_pos += ((width - blks_width) * 4);
00447 }
00448 break;
00449
00450 case 4:
00451 case 3:
00452 if(ref_vectors != NULL)
00453 return;
00454 flag1 = 1;
00455
00456 for( ; blks_height > 0; blks_height -= 8) {
00457 for(lp1 = 0; lp1 < blks_width; lp1++) {
00458 for(lp2 = 0; lp2 < 4; ) {
00459 k = *buf1++;
00460
00461 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00462 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
00463
00464 switch(correction_type_sp[lp2 & 0x01][k]) {
00465 case 0:
00466 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00467 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
00468 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00469 else
00470 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00471 lp2++;
00472 break;
00473
00474 case 1:
00475 res = ((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + correction_lp[lp2 & 0x01][*buf1]) << 1;
00476 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
00477 res = ((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1;
00478 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
00479
00480 if(lp2 > 0 || flag1 == 0 || strip->ypos != 0)
00481 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00482 else
00483 cur_lp[0] = cur_lp[width_tbl[1]];
00484 buf1++;
00485 lp2++;
00486 break;
00487
00488 case 2:
00489 if(lp2 == 0) {
00490 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00491 cur_lp[j] = *ref_lp;
00492 lp2 += 2;
00493 }
00494 break;
00495
00496 case 3:
00497 if(lp2 < 2) {
00498 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
00499 cur_lp[j] = *ref_lp;
00500 lp2 = 3;
00501 }
00502 break;
00503
00504 case 6:
00505 lp2 = 4;
00506 break;
00507
00508 case 7:
00509 if(rle_v3 != 0)
00510 rle_v3 = 0;
00511 else {
00512 buf1--;
00513 rle_v3 = 1;
00514 }
00515 lp2 = 4;
00516 break;
00517
00518 case 8:
00519 if(lp2 == 0) {
00520 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00521
00522 if(rle_v1 == 1) {
00523 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00524 cur_lp[j] = ref_lp[j];
00525 }
00526
00527 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00528 break;
00529 } else {
00530 rle_v2 = (*buf1) - 1;
00531 rle_v1 = 1;
00532 }
00533 case 5:
00534 LP2_CHECK(buf1,rle_v3,lp2)
00535 case 4:
00536 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
00537 cur_lp[j] = *ref_lp;
00538 lp2 = 4;
00539 break;
00540
00541 case 9:
00542 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00543 lv1 = *buf1++;
00544 lv = (lv1 & 0x7F) << 1;
00545 lv += (lv << 8);
00546 lv += (lv << 16);
00547
00548 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00549 cur_lp[j] = lv;
00550
00551 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00552 break;
00553
00554 default:
00555 return;
00556 }
00557 }
00558
00559 cur_frm_pos += 4;
00560 }
00561
00562 cur_frm_pos += (((width * 2) - blks_width) * 4);
00563 flag1 = 0;
00564 }
00565 break;
00566
00567 case 10:
00568 if(ref_vectors == NULL) {
00569 flag1 = 1;
00570
00571 for( ; blks_height > 0; blks_height -= 8) {
00572 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
00573 for(lp2 = 0; lp2 < 4; ) {
00574 k = *buf1++;
00575 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00576 ref_lp = ((uint32_t *)cur_frm_pos) + width_tbl[(lp2 * 2) - 1];
00577 lv1 = ref_lp[0];
00578 lv2 = ref_lp[1];
00579 if(lp2 == 0 && flag1 != 0) {
00580 #if HAVE_BIGENDIAN
00581 lv1 = lv1 & 0xFF00FF00;
00582 lv1 = (lv1 >> 8) | lv1;
00583 lv2 = lv2 & 0xFF00FF00;
00584 lv2 = (lv2 >> 8) | lv2;
00585 #else
00586 lv1 = lv1 & 0x00FF00FF;
00587 lv1 = (lv1 << 8) | lv1;
00588 lv2 = lv2 & 0x00FF00FF;
00589 lv2 = (lv2 << 8) | lv2;
00590 #endif
00591 }
00592
00593 switch(correction_type_sp[lp2 & 0x01][k]) {
00594 case 0:
00595 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
00596 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionhighorder_lp[lp2 & 0x01][k]) << 1);
00597 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
00598 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00599 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00600 } else {
00601 cur_lp[0] = cur_lp[width_tbl[1]];
00602 cur_lp[1] = cur_lp[width_tbl[1]+1];
00603 }
00604 lp2++;
00605 break;
00606
00607 case 1:
00608 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(lv1) >> 1) + correctionloworder_lp[lp2 & 0x01][*buf1]) << 1);
00609 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(lv2) >> 1) + correctionloworder_lp[lp2 & 0x01][k]) << 1);
00610 if(lp2 > 0 || strip->ypos != 0 || flag1 == 0) {
00611 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00612 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00613 } else {
00614 cur_lp[0] = cur_lp[width_tbl[1]];
00615 cur_lp[1] = cur_lp[width_tbl[1]+1];
00616 }
00617 buf1++;
00618 lp2++;
00619 break;
00620
00621 case 2:
00622 if(lp2 == 0) {
00623 if(flag1 != 0) {
00624 for(i = 0, j = width_tbl[1]; i < 3; i++, j += width_tbl[1]) {
00625 cur_lp[j] = lv1;
00626 cur_lp[j+1] = lv2;
00627 }
00628 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00629 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00630 } else {
00631 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
00632 cur_lp[j] = lv1;
00633 cur_lp[j+1] = lv2;
00634 }
00635 }
00636 lp2 += 2;
00637 }
00638 break;
00639
00640 case 3:
00641 if(lp2 < 2) {
00642 if(lp2 == 0 && flag1 != 0) {
00643 for(i = 0, j = width_tbl[1]; i < 5; i++, j += width_tbl[1]) {
00644 cur_lp[j] = lv1;
00645 cur_lp[j+1] = lv2;
00646 }
00647 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00648 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00649 } else {
00650 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
00651 cur_lp[j] = lv1;
00652 cur_lp[j+1] = lv2;
00653 }
00654 }
00655 lp2 = 3;
00656 }
00657 break;
00658
00659 case 8:
00660 if(lp2 == 0) {
00661 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00662 if(rle_v1 == 1) {
00663 if(flag1 != 0) {
00664 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
00665 cur_lp[j] = lv1;
00666 cur_lp[j+1] = lv2;
00667 }
00668 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00669 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00670 } else {
00671 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
00672 cur_lp[j] = lv1;
00673 cur_lp[j+1] = lv2;
00674 }
00675 }
00676 }
00677 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00678 break;
00679 } else {
00680 rle_v1 = 1;
00681 rle_v2 = (*buf1) - 1;
00682 }
00683 case 5:
00684 LP2_CHECK(buf1,rle_v3,lp2)
00685 case 4:
00686 if(lp2 == 0 && flag1 != 0) {
00687 for(i = 0, j = width_tbl[1]; i < 7; i++, j += width_tbl[1]) {
00688 cur_lp[j] = lv1;
00689 cur_lp[j+1] = lv2;
00690 }
00691 cur_lp[0] = ((cur_lp[-width_tbl[1]] >> 1) + (cur_lp[width_tbl[1]] >> 1)) & 0xFEFEFEFE;
00692 cur_lp[1] = ((cur_lp[-width_tbl[1]+1] >> 1) + (cur_lp[width_tbl[1]+1] >> 1)) & 0xFEFEFEFE;
00693 } else {
00694 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
00695 cur_lp[j] = lv1;
00696 cur_lp[j+1] = lv2;
00697 }
00698 }
00699 lp2 = 4;
00700 break;
00701
00702 case 6:
00703 lp2 = 4;
00704 break;
00705
00706 case 7:
00707 if(lp2 == 0) {
00708 if(rle_v3 != 0)
00709 rle_v3 = 0;
00710 else {
00711 buf1--;
00712 rle_v3 = 1;
00713 }
00714 lp2 = 4;
00715 }
00716 break;
00717
00718 case 9:
00719 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00720 lv1 = *buf1;
00721 lv = (lv1 & 0x7F) << 1;
00722 lv += (lv << 8);
00723 lv += (lv << 16);
00724 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00725 cur_lp[j] = lv;
00726 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00727 break;
00728
00729 default:
00730 return;
00731 }
00732 }
00733
00734 cur_frm_pos += 8;
00735 }
00736
00737 cur_frm_pos += (((width * 2) - blks_width) * 4);
00738 flag1 = 0;
00739 }
00740 } else {
00741 for( ; blks_height > 0; blks_height -= 8) {
00742 for(lp1 = 0; lp1 < blks_width; lp1 += 2) {
00743 for(lp2 = 0; lp2 < 4; ) {
00744 k = *buf1++;
00745 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00746 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
00747
00748 switch(correction_type_sp[lp2 & 0x01][k]) {
00749 case 0:
00750 lv1 = correctionloworder_lp[lp2 & 0x01][k];
00751 lv2 = correctionhighorder_lp[lp2 & 0x01][k];
00752 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
00753 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
00754 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
00755 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
00756 lp2++;
00757 break;
00758
00759 case 1:
00760 lv1 = correctionloworder_lp[lp2 & 0x01][*buf1++];
00761 lv2 = correctionloworder_lp[lp2 & 0x01][k];
00762 cur_lp[0] = av_le2ne32(((av_le2ne32(ref_lp[0]) >> 1) + lv1) << 1);
00763 cur_lp[1] = av_le2ne32(((av_le2ne32(ref_lp[1]) >> 1) + lv2) << 1);
00764 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + lv1) << 1);
00765 cur_lp[width_tbl[1]+1] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]+1]) >> 1) + lv2) << 1);
00766 lp2++;
00767 break;
00768
00769 case 2:
00770 if(lp2 == 0) {
00771 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1]) {
00772 cur_lp[j] = ref_lp[j];
00773 cur_lp[j+1] = ref_lp[j+1];
00774 }
00775 lp2 += 2;
00776 }
00777 break;
00778
00779 case 3:
00780 if(lp2 < 2) {
00781 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1]) {
00782 cur_lp[j] = ref_lp[j];
00783 cur_lp[j+1] = ref_lp[j+1];
00784 }
00785 lp2 = 3;
00786 }
00787 break;
00788
00789 case 8:
00790 if(lp2 == 0) {
00791 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00792 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1]) {
00793 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)ref_frm_pos)[j];
00794 ((uint32_t *)cur_frm_pos)[j+1] = ((uint32_t *)ref_frm_pos)[j+1];
00795 }
00796 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00797 break;
00798 } else {
00799 rle_v1 = 1;
00800 rle_v2 = (*buf1) - 1;
00801 }
00802 case 5:
00803 case 7:
00804 LP2_CHECK(buf1,rle_v3,lp2)
00805 case 6:
00806 case 4:
00807 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1]) {
00808 cur_lp[j] = ref_lp[j];
00809 cur_lp[j+1] = ref_lp[j+1];
00810 }
00811 lp2 = 4;
00812 break;
00813
00814 case 9:
00815 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00816 lv1 = *buf1;
00817 lv = (lv1 & 0x7F) << 1;
00818 lv += (lv << 8);
00819 lv += (lv << 16);
00820 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00821 ((uint32_t *)cur_frm_pos)[j] = ((uint32_t *)cur_frm_pos)[j+1] = lv;
00822 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00823 break;
00824
00825 default:
00826 return;
00827 }
00828 }
00829
00830 cur_frm_pos += 8;
00831 ref_frm_pos += 8;
00832 }
00833
00834 cur_frm_pos += (((width * 2) - blks_width) * 4);
00835 ref_frm_pos += (((width * 2) - blks_width) * 4);
00836 }
00837 }
00838 break;
00839
00840 case 11:
00841 if(ref_vectors == NULL)
00842 return;
00843
00844 for( ; blks_height > 0; blks_height -= 8) {
00845 for(lp1 = 0; lp1 < blks_width; lp1++) {
00846 for(lp2 = 0; lp2 < 4; ) {
00847 k = *buf1++;
00848 cur_lp = ((uint32_t *)cur_frm_pos) + width_tbl[lp2 * 2];
00849 ref_lp = ((uint32_t *)ref_frm_pos) + width_tbl[lp2 * 2];
00850
00851 switch(correction_type_sp[lp2 & 0x01][k]) {
00852 case 0:
00853 cur_lp[0] = av_le2ne32(((av_le2ne32(*ref_lp) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00854 cur_lp[width_tbl[1]] = av_le2ne32(((av_le2ne32(ref_lp[width_tbl[1]]) >> 1) + correction_lp[lp2 & 0x01][k]) << 1);
00855 lp2++;
00856 break;
00857
00858 case 1:
00859 lv1 = (unsigned short)(correction_lp[lp2 & 0x01][*buf1++]);
00860 lv2 = (unsigned short)(correction_lp[lp2 & 0x01][k]);
00861 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[0]) >> 1) + lv1) << 1);
00862 ((unsigned short *)cur_lp)[0] = av_le2ne16(res);
00863 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[1]) >> 1) + lv2) << 1);
00864 ((unsigned short *)cur_lp)[1] = av_le2ne16(res);
00865 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]]) >> 1) + lv1) << 1);
00866 ((unsigned short *)cur_lp)[width_tbl[2]] = av_le2ne16(res);
00867 res = (unsigned short)(((av_le2ne16(((unsigned short *)ref_lp)[width_tbl[2]+1]) >> 1) + lv2) << 1);
00868 ((unsigned short *)cur_lp)[width_tbl[2]+1] = av_le2ne16(res);
00869 lp2++;
00870 break;
00871
00872 case 2:
00873 if(lp2 == 0) {
00874 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00875 cur_lp[j] = ref_lp[j];
00876 lp2 += 2;
00877 }
00878 break;
00879
00880 case 3:
00881 if(lp2 < 2) {
00882 for(i = 0, j = 0; i < 6 - (lp2 * 2); i++, j += width_tbl[1])
00883 cur_lp[j] = ref_lp[j];
00884 lp2 = 3;
00885 }
00886 break;
00887
00888 case 8:
00889 if(lp2 == 0) {
00890 RLE_V3_CHECK(buf1,rle_v1,rle_v2,rle_v3)
00891
00892 for(i = 0, j = 0; i < 8; i++, j += width_tbl[1])
00893 cur_lp[j] = ref_lp[j];
00894
00895 RLE_V2_CHECK(buf1,rle_v2, rle_v3,lp2)
00896 break;
00897 } else {
00898 rle_v1 = 1;
00899 rle_v2 = (*buf1) - 1;
00900 }
00901 case 5:
00902 case 7:
00903 LP2_CHECK(buf1,rle_v3,lp2)
00904 case 4:
00905 case 6:
00906 for(i = 0, j = 0; i < 8 - (lp2 * 2); i++, j += width_tbl[1])
00907 cur_lp[j] = ref_lp[j];
00908 lp2 = 4;
00909 break;
00910
00911 case 9:
00912 av_log(s->avctx, AV_LOG_ERROR, "UNTESTED.\n");
00913 lv1 = *buf1++;
00914 lv = (lv1 & 0x7F) << 1;
00915 lv += (lv << 8);
00916 lv += (lv << 16);
00917 for(i = 0, j = 0; i < 4; i++, j += width_tbl[1])
00918 cur_lp[j] = lv;
00919 LV1_CHECK(buf1,rle_v3,lv1,lp2)
00920 break;
00921
00922 default:
00923 return;
00924 }
00925 }
00926
00927 cur_frm_pos += 4;
00928 ref_frm_pos += 4;
00929 }
00930
00931 cur_frm_pos += (((width * 2) - blks_width) * 4);
00932 ref_frm_pos += (((width * 2) - blks_width) * 4);
00933 }
00934 break;
00935
00936 default:
00937 return;
00938 }
00939 }
00940
00941 for( ; strip >= strip_tbl; strip--) {
00942 if(strip->split_flag != 0) {
00943 strip->split_flag = 0;
00944 strip->usl7 = (strip-1)->usl7;
00945
00946 if(strip->split_direction) {
00947 strip->xpos += strip->width;
00948 strip->width = (strip-1)->width - strip->width;
00949 if(region_160_width <= strip->xpos && width < strip->width + strip->xpos)
00950 strip->width = width - strip->xpos;
00951 } else {
00952 strip->ypos += strip->height;
00953 strip->height = (strip-1)->height - strip->height;
00954 }
00955 break;
00956 }
00957 }
00958 }
00959 }
00960
00961 static av_cold int indeo3_decode_init(AVCodecContext *avctx)
00962 {
00963 Indeo3DecodeContext *s = avctx->priv_data;
00964 int ret = 0;
00965
00966 s->avctx = avctx;
00967 s->width = avctx->width;
00968 s->height = avctx->height;
00969 avctx->pix_fmt = PIX_FMT_YUV410P;
00970
00971 if (!(ret = build_modpred(s)))
00972 ret = iv_alloc_frames(s);
00973 if (ret)
00974 iv_free_func(s);
00975
00976 return ret;
00977 }
00978
00979 static int iv_decode_frame(AVCodecContext *avctx,
00980 const uint8_t *buf, int buf_size)
00981 {
00982 Indeo3DecodeContext *s = avctx->priv_data;
00983 unsigned int image_width, image_height,
00984 chroma_width, chroma_height;
00985 unsigned long flags, cb_offset, data_size,
00986 y_offset, v_offset, u_offset, mc_vector_count;
00987 const uint8_t *hdr_pos, *buf_pos;
00988
00989 buf_pos = buf;
00990 buf_pos += 18;
00991
00992 flags = bytestream_get_le16(&buf_pos);
00993 data_size = bytestream_get_le32(&buf_pos);
00994 cb_offset = *buf_pos++;
00995 buf_pos += 3;
00996 image_height = bytestream_get_le16(&buf_pos);
00997 image_width = bytestream_get_le16(&buf_pos);
00998
00999 if(av_image_check_size(image_width, image_height, 0, avctx))
01000 return -1;
01001 if (image_width != avctx->width || image_height != avctx->height) {
01002 int ret;
01003 avcodec_set_dimensions(avctx, image_width, image_height);
01004 s->width = avctx->width;
01005 s->height = avctx->height;
01006 ret = iv_alloc_frames(s);
01007 if (ret < 0) {
01008 s->width = s->height = 0;
01009 return ret;
01010 }
01011 }
01012
01013 chroma_height = ((image_height >> 2) + 3) & 0x7ffc;
01014 chroma_width = ((image_width >> 2) + 3) & 0x7ffc;
01015 y_offset = bytestream_get_le32(&buf_pos);
01016 v_offset = bytestream_get_le32(&buf_pos);
01017 u_offset = bytestream_get_le32(&buf_pos);
01018 buf_pos += 4;
01019 hdr_pos = buf_pos;
01020 if(data_size == 0x80) return 4;
01021
01022 if(FFMAX3(y_offset, v_offset, u_offset) >= buf_size-16) {
01023 av_log(s->avctx, AV_LOG_ERROR, "y/u/v offset outside buffer\n");
01024 return -1;
01025 }
01026
01027 if(flags & 0x200) {
01028 s->cur_frame = s->iv_frame + 1;
01029 s->ref_frame = s->iv_frame;
01030 } else {
01031 s->cur_frame = s->iv_frame;
01032 s->ref_frame = s->iv_frame + 1;
01033 }
01034
01035 buf_pos = buf + 16 + y_offset;
01036 mc_vector_count = bytestream_get_le32(&buf_pos);
01037 if(2LL*mc_vector_count >= buf_size-16-y_offset) {
01038 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01039 return -1;
01040 }
01041
01042 iv_Decode_Chunk(s, s->cur_frame->Ybuf, s->ref_frame->Ybuf, image_width,
01043 image_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01044 FFMIN(image_width, 160));
01045
01046 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
01047 {
01048
01049 buf_pos = buf + 16 + v_offset;
01050 mc_vector_count = bytestream_get_le32(&buf_pos);
01051 if(2LL*mc_vector_count >= buf_size-16-v_offset) {
01052 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01053 return -1;
01054 }
01055
01056 iv_Decode_Chunk(s, s->cur_frame->Vbuf, s->ref_frame->Vbuf, chroma_width,
01057 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01058 FFMIN(chroma_width, 40));
01059
01060 buf_pos = buf + 16 + u_offset;
01061 mc_vector_count = bytestream_get_le32(&buf_pos);
01062 if(2LL*mc_vector_count >= buf_size-16-u_offset) {
01063 av_log(s->avctx, AV_LOG_ERROR, "mc_vector_count too large\n");
01064 return -1;
01065 }
01066
01067 iv_Decode_Chunk(s, s->cur_frame->Ubuf, s->ref_frame->Ubuf, chroma_width,
01068 chroma_height, buf_pos + mc_vector_count * 2, cb_offset, hdr_pos, buf_pos,
01069 FFMIN(chroma_width, 40));
01070
01071 }
01072
01073 return 8;
01074 }
01075
01076 static int indeo3_decode_frame(AVCodecContext *avctx,
01077 void *data, int *data_size,
01078 AVPacket *avpkt)
01079 {
01080 const uint8_t *buf = avpkt->data;
01081 int buf_size = avpkt->size;
01082 Indeo3DecodeContext *s=avctx->priv_data;
01083 uint8_t *src, *dest;
01084 int y;
01085
01086 if (iv_decode_frame(avctx, buf, buf_size) < 0)
01087 return -1;
01088
01089 if(s->frame.data[0])
01090 avctx->release_buffer(avctx, &s->frame);
01091
01092 s->frame.reference = 0;
01093 if(avctx->get_buffer(avctx, &s->frame) < 0) {
01094 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
01095 return -1;
01096 }
01097
01098 src = s->cur_frame->Ybuf;
01099 dest = s->frame.data[0];
01100 for (y = 0; y < s->height; y++) {
01101 memcpy(dest, src, s->cur_frame->y_w);
01102 src += s->cur_frame->y_w;
01103 dest += s->frame.linesize[0];
01104 }
01105
01106 if (!(s->avctx->flags & CODEC_FLAG_GRAY))
01107 {
01108 src = s->cur_frame->Ubuf;
01109 dest = s->frame.data[1];
01110 for (y = 0; y < s->height / 4; y++) {
01111 memcpy(dest, src, s->cur_frame->uv_w);
01112 src += s->cur_frame->uv_w;
01113 dest += s->frame.linesize[1];
01114 }
01115
01116 src = s->cur_frame->Vbuf;
01117 dest = s->frame.data[2];
01118 for (y = 0; y < s->height / 4; y++) {
01119 memcpy(dest, src, s->cur_frame->uv_w);
01120 src += s->cur_frame->uv_w;
01121 dest += s->frame.linesize[2];
01122 }
01123 }
01124
01125 *data_size=sizeof(AVFrame);
01126 *(AVFrame*)data= s->frame;
01127
01128 return buf_size;
01129 }
01130
01131 static av_cold int indeo3_decode_end(AVCodecContext *avctx)
01132 {
01133 Indeo3DecodeContext *s = avctx->priv_data;
01134
01135 iv_free_func(s);
01136
01137 return 0;
01138 }
01139
01140 AVCodec ff_indeo3_decoder = {
01141 "indeo3",
01142 AVMEDIA_TYPE_VIDEO,
01143 CODEC_ID_INDEO3,
01144 sizeof(Indeo3DecodeContext),
01145 indeo3_decode_init,
01146 NULL,
01147 indeo3_decode_end,
01148 indeo3_decode_frame,
01149 CODEC_CAP_DR1,
01150 NULL,
01151 .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
01152 };