FFmpeg  2.6.3
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
svq3.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 The FFmpeg Project
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /*
22  * How to use this decoder:
23  * SVQ3 data is transported within Apple Quicktime files. Quicktime files
24  * have stsd atoms to describe media trak properties. A stsd atom for a
25  * video trak contains 1 or more ImageDescription atoms. These atoms begin
26  * with the 4-byte length of the atom followed by the codec fourcc. Some
27  * decoders need information in this atom to operate correctly. Such
28  * is the case with SVQ3. In order to get the best use out of this decoder,
29  * the calling app must make the SVQ3 ImageDescription atom available
30  * via the AVCodecContext's extradata[_size] field:
31  *
32  * AVCodecContext.extradata = pointer to ImageDescription, first characters
33  * are expected to be 'S', 'V', 'Q', and '3', NOT the 4-byte atom length
34  * AVCodecContext.extradata_size = size of ImageDescription atom memory
35  * buffer (which will be the same as the ImageDescription atom size field
36  * from the QT file, minus 4 bytes since the length is missing)
37  *
38  * You will know you have these parameters passed correctly when the decoder
39  * correctly decodes this file:
40  * http://samples.mplayerhq.hu/V-codecs/SVQ3/Vertical400kbit.sorenson3.mov
41  */
42 
43 #include <inttypes.h>
44 
45 #include "libavutil/attributes.h"
46 #include "internal.h"
47 #include "avcodec.h"
48 #include "mpegutils.h"
49 #include "h264.h"
50 
51 #include "h264data.h" // FIXME FIXME FIXME
52 
53 #include "h264_mvpred.h"
54 #include "golomb.h"
55 #include "hpeldsp.h"
56 #include "rectangle.h"
57 #include "tpeldsp.h"
58 #include "vdpau_internal.h"
59 
60 #if CONFIG_ZLIB
61 #include <zlib.h>
62 #endif
63 
64 #include "svq1.h"
65 #include "svq3.h"
66 
67 /**
68  * @file
69  * svq3 decoder.
70  */
71 
72 typedef struct SVQ3Context {
83  uint32_t watermark_key;
85  int buf_size;
91 } SVQ3Context;
92 
93 #define FULLPEL_MODE 1
94 #define HALFPEL_MODE 2
95 #define THIRDPEL_MODE 3
96 #define PREDICT_MODE 4
97 
98 /* dual scan (from some older h264 draft)
99  * o-->o-->o o
100  * | /|
101  * o o o / o
102  * | / | |/ |
103  * o o o o
104  * /
105  * o-->o-->o-->o
106  */
107 static const uint8_t svq3_scan[16] = {
108  0 + 0 * 4, 1 + 0 * 4, 2 + 0 * 4, 2 + 1 * 4,
109  2 + 2 * 4, 3 + 0 * 4, 3 + 1 * 4, 3 + 2 * 4,
110  0 + 1 * 4, 0 + 2 * 4, 1 + 1 * 4, 1 + 2 * 4,
111  0 + 3 * 4, 1 + 3 * 4, 2 + 3 * 4, 3 + 3 * 4,
112 };
113 
114 static const uint8_t luma_dc_zigzag_scan[16] = {
115  0 * 16 + 0 * 64, 1 * 16 + 0 * 64, 2 * 16 + 0 * 64, 0 * 16 + 2 * 64,
116  3 * 16 + 0 * 64, 0 * 16 + 1 * 64, 1 * 16 + 1 * 64, 2 * 16 + 1 * 64,
117  1 * 16 + 2 * 64, 2 * 16 + 2 * 64, 3 * 16 + 2 * 64, 0 * 16 + 3 * 64,
118  3 * 16 + 1 * 64, 1 * 16 + 3 * 64, 2 * 16 + 3 * 64, 3 * 16 + 3 * 64,
119 };
120 
121 static const uint8_t svq3_pred_0[25][2] = {
122  { 0, 0 },
123  { 1, 0 }, { 0, 1 },
124  { 0, 2 }, { 1, 1 }, { 2, 0 },
125  { 3, 0 }, { 2, 1 }, { 1, 2 }, { 0, 3 },
126  { 0, 4 }, { 1, 3 }, { 2, 2 }, { 3, 1 }, { 4, 0 },
127  { 4, 1 }, { 3, 2 }, { 2, 3 }, { 1, 4 },
128  { 2, 4 }, { 3, 3 }, { 4, 2 },
129  { 4, 3 }, { 3, 4 },
130  { 4, 4 }
131 };
132 
133 static const int8_t svq3_pred_1[6][6][5] = {
134  { { 2, -1, -1, -1, -1 }, { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 },
135  { 2, 1, -1, -1, -1 }, { 1, 2, -1, -1, -1 }, { 1, 2, -1, -1, -1 } },
136  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 4, 3 }, { 0, 1, 2, 4, 3 },
137  { 0, 2, 1, 4, 3 }, { 2, 0, 1, 3, 4 }, { 0, 4, 2, 1, 3 } },
138  { { 2, 0, -1, -1, -1 }, { 2, 1, 0, 4, 3 }, { 1, 2, 4, 0, 3 },
139  { 2, 1, 0, 4, 3 }, { 2, 1, 4, 3, 0 }, { 1, 2, 4, 0, 3 } },
140  { { 2, 0, -1, -1, -1 }, { 2, 0, 1, 4, 3 }, { 1, 2, 0, 4, 3 },
141  { 2, 1, 0, 4, 3 }, { 2, 1, 3, 4, 0 }, { 2, 4, 1, 0, 3 } },
142  { { 0, 2, -1, -1, -1 }, { 0, 2, 1, 3, 4 }, { 1, 2, 3, 0, 4 },
143  { 2, 0, 1, 3, 4 }, { 2, 1, 3, 0, 4 }, { 2, 0, 4, 3, 1 } },
144  { { 0, 2, -1, -1, -1 }, { 0, 2, 4, 1, 3 }, { 1, 4, 2, 0, 3 },
145  { 4, 2, 0, 1, 3 }, { 2, 0, 1, 4, 3 }, { 4, 2, 1, 0, 3 } },
146 };
147 
148 static const struct {
151 } svq3_dct_tables[2][16] = {
152  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 2, 1 }, { 0, 2 }, { 3, 1 }, { 4, 1 }, { 5, 1 },
153  { 0, 3 }, { 1, 2 }, { 2, 2 }, { 6, 1 }, { 7, 1 }, { 8, 1 }, { 9, 1 }, { 0, 4 } },
154  { { 0, 0 }, { 0, 1 }, { 1, 1 }, { 0, 2 }, { 2, 1 }, { 0, 3 }, { 0, 4 }, { 0, 5 },
155  { 3, 1 }, { 4, 1 }, { 1, 2 }, { 1, 3 }, { 0, 6 }, { 0, 7 }, { 0, 8 }, { 0, 9 } }
156 };
157 
158 static const uint32_t svq3_dequant_coeff[32] = {
159  3881, 4351, 4890, 5481, 6154, 6914, 7761, 8718,
160  9781, 10987, 12339, 13828, 15523, 17435, 19561, 21873,
161  24552, 27656, 30847, 34870, 38807, 43747, 49103, 54683,
162  61694, 68745, 77615, 89113, 100253, 109366, 126635, 141533
163 };
164 
165 static int svq3_decode_end(AVCodecContext *avctx);
166 
167 void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
168 {
169  const int qmul = svq3_dequant_coeff[qp];
170 #define stride 16
171  int i;
172  int temp[16];
173  static const uint8_t x_offset[4] = { 0, 1 * stride, 4 * stride, 5 * stride };
174 
175  for (i = 0; i < 4; i++) {
176  const int z0 = 13 * (input[4 * i + 0] + input[4 * i + 2]);
177  const int z1 = 13 * (input[4 * i + 0] - input[4 * i + 2]);
178  const int z2 = 7 * input[4 * i + 1] - 17 * input[4 * i + 3];
179  const int z3 = 17 * input[4 * i + 1] + 7 * input[4 * i + 3];
180 
181  temp[4 * i + 0] = z0 + z3;
182  temp[4 * i + 1] = z1 + z2;
183  temp[4 * i + 2] = z1 - z2;
184  temp[4 * i + 3] = z0 - z3;
185  }
186 
187  for (i = 0; i < 4; i++) {
188  const int offset = x_offset[i];
189  const int z0 = 13 * (temp[4 * 0 + i] + temp[4 * 2 + i]);
190  const int z1 = 13 * (temp[4 * 0 + i] - temp[4 * 2 + i]);
191  const int z2 = 7 * temp[4 * 1 + i] - 17 * temp[4 * 3 + i];
192  const int z3 = 17 * temp[4 * 1 + i] + 7 * temp[4 * 3 + i];
193 
194  output[stride * 0 + offset] = (z0 + z3) * qmul + 0x80000 >> 20;
195  output[stride * 2 + offset] = (z1 + z2) * qmul + 0x80000 >> 20;
196  output[stride * 8 + offset] = (z1 - z2) * qmul + 0x80000 >> 20;
197  output[stride * 10 + offset] = (z0 - z3) * qmul + 0x80000 >> 20;
198  }
199 }
200 #undef stride
201 
202 void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block,
203  int stride, int qp, int dc)
204 {
205  const int qmul = svq3_dequant_coeff[qp];
206  int i;
207 
208  if (dc) {
209  dc = 13 * 13 * (dc == 1 ? 1538 * block[0]
210  : qmul * (block[0] >> 3) / 2);
211  block[0] = 0;
212  }
213 
214  for (i = 0; i < 4; i++) {
215  const int z0 = 13 * (block[0 + 4 * i] + block[2 + 4 * i]);
216  const int z1 = 13 * (block[0 + 4 * i] - block[2 + 4 * i]);
217  const int z2 = 7 * block[1 + 4 * i] - 17 * block[3 + 4 * i];
218  const int z3 = 17 * block[1 + 4 * i] + 7 * block[3 + 4 * i];
219 
220  block[0 + 4 * i] = z0 + z3;
221  block[1 + 4 * i] = z1 + z2;
222  block[2 + 4 * i] = z1 - z2;
223  block[3 + 4 * i] = z0 - z3;
224  }
225 
226  for (i = 0; i < 4; i++) {
227  const int z0 = 13 * (block[i + 4 * 0] + block[i + 4 * 2]);
228  const int z1 = 13 * (block[i + 4 * 0] - block[i + 4 * 2]);
229  const int z2 = 7 * block[i + 4 * 1] - 17 * block[i + 4 * 3];
230  const int z3 = 17 * block[i + 4 * 1] + 7 * block[i + 4 * 3];
231  const int rr = (dc + 0x80000);
232 
233  dst[i + stride * 0] = av_clip_uint8(dst[i + stride * 0] + ((z0 + z3) * qmul + rr >> 20));
234  dst[i + stride * 1] = av_clip_uint8(dst[i + stride * 1] + ((z1 + z2) * qmul + rr >> 20));
235  dst[i + stride * 2] = av_clip_uint8(dst[i + stride * 2] + ((z1 - z2) * qmul + rr >> 20));
236  dst[i + stride * 3] = av_clip_uint8(dst[i + stride * 3] + ((z0 - z3) * qmul + rr >> 20));
237  }
238 
239  memset(block, 0, 16 * sizeof(int16_t));
240 }
241 
242 static inline int svq3_decode_block(GetBitContext *gb, int16_t *block,
243  int index, const int type)
244 {
245  static const uint8_t *const scan_patterns[4] =
247 
248  int run, level, sign, limit;
249  unsigned vlc;
250  const int intra = 3 * type >> 2;
251  const uint8_t *const scan = scan_patterns[type];
252 
253  for (limit = (16 >> intra); index < 16; index = limit, limit += 8) {
254  for (; (vlc = svq3_get_ue_golomb(gb)) != 0; index++) {
255  if ((int32_t)vlc < 0)
256  return -1;
257 
258  sign = (vlc & 1) ? 0 : -1;
259  vlc = vlc + 1 >> 1;
260 
261  if (type == 3) {
262  if (vlc < 3) {
263  run = 0;
264  level = vlc;
265  } else if (vlc < 4) {
266  run = 1;
267  level = 1;
268  } else {
269  run = vlc & 0x3;
270  level = (vlc + 9 >> 2) - run;
271  }
272  } else {
273  if (vlc < 16U) {
274  run = svq3_dct_tables[intra][vlc].run;
275  level = svq3_dct_tables[intra][vlc].level;
276  } else if (intra) {
277  run = vlc & 0x7;
278  level = (vlc >> 3) + ((run == 0) ? 8 : ((run < 2) ? 2 : ((run < 5) ? 0 : -1)));
279  } else {
280  run = vlc & 0xF;
281  level = (vlc >> 4) + ((run == 0) ? 4 : ((run < 3) ? 2 : ((run < 10) ? 1 : 0)));
282  }
283  }
284 
285 
286  if ((index += run) >= limit)
287  return -1;
288 
289  block[scan[index]] = (level ^ sign) - sign;
290  }
291 
292  if (type != 2) {
293  break;
294  }
295  }
296 
297  return 0;
298 }
299 
300 static inline void svq3_mc_dir_part(SVQ3Context *s,
301  int x, int y, int width, int height,
302  int mx, int my, int dxy,
303  int thirdpel, int dir, int avg)
304 {
305  H264Context *h = &s->h;
306  const H264Picture *pic = (dir == 0) ? s->last_pic : s->next_pic;
307  uint8_t *src, *dest;
308  int i, emu = 0;
309  int blocksize = 2 - (width >> 3); // 16->0, 8->1, 4->2
310 
311  mx += x;
312  my += y;
313 
314  if (mx < 0 || mx >= s->h_edge_pos - width - 1 ||
315  my < 0 || my >= s->v_edge_pos - height - 1) {
316  emu = 1;
317  mx = av_clip(mx, -16, s->h_edge_pos - width + 15);
318  my = av_clip(my, -16, s->v_edge_pos - height + 15);
319  }
320 
321  /* form component predictions */
322  dest = h->cur_pic.f.data[0] + x + y * h->linesize;
323  src = pic->f.data[0] + mx + my * h->linesize;
324 
325  if (emu) {
327  h->linesize, h->linesize,
328  width + 1, height + 1,
329  mx, my, s->h_edge_pos, s->v_edge_pos);
330  src = h->edge_emu_buffer;
331  }
332  if (thirdpel)
333  (avg ? s->tdsp.avg_tpel_pixels_tab
334  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src, h->linesize,
335  width, height);
336  else
337  (avg ? s->hdsp.avg_pixels_tab
338  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src, h->linesize,
339  height);
340 
341  if (!(h->flags & CODEC_FLAG_GRAY)) {
342  mx = mx + (mx < (int) x) >> 1;
343  my = my + (my < (int) y) >> 1;
344  width = width >> 1;
345  height = height >> 1;
346  blocksize++;
347 
348  for (i = 1; i < 3; i++) {
349  dest = h->cur_pic.f.data[i] + (x >> 1) + (y >> 1) * h->uvlinesize;
350  src = pic->f.data[i] + mx + my * h->uvlinesize;
351 
352  if (emu) {
354  h->uvlinesize, h->uvlinesize,
355  width + 1, height + 1,
356  mx, my, (s->h_edge_pos >> 1),
357  s->v_edge_pos >> 1);
358  src = h->edge_emu_buffer;
359  }
360  if (thirdpel)
361  (avg ? s->tdsp.avg_tpel_pixels_tab
362  : s->tdsp.put_tpel_pixels_tab)[dxy](dest, src,
363  h->uvlinesize,
364  width, height);
365  else
366  (avg ? s->hdsp.avg_pixels_tab
367  : s->hdsp.put_pixels_tab)[blocksize][dxy](dest, src,
368  h->uvlinesize,
369  height);
370  }
371  }
372 }
373 
374 static inline int svq3_mc_dir(SVQ3Context *s, int size, int mode,
375  int dir, int avg)
376 {
377  int i, j, k, mx, my, dx, dy, x, y;
378  H264Context *h = &s->h;
379  const int part_width = ((size & 5) == 4) ? 4 : 16 >> (size & 1);
380  const int part_height = 16 >> ((unsigned)(size + 1) / 3);
381  const int extra_width = (mode == PREDICT_MODE) ? -16 * 6 : 0;
382  const int h_edge_pos = 6 * (s->h_edge_pos - part_width) - extra_width;
383  const int v_edge_pos = 6 * (s->v_edge_pos - part_height) - extra_width;
384 
385  for (i = 0; i < 16; i += part_height)
386  for (j = 0; j < 16; j += part_width) {
387  const int b_xy = (4 * h->mb_x + (j >> 2)) +
388  (4 * h->mb_y + (i >> 2)) * h->b_stride;
389  int dxy;
390  x = 16 * h->mb_x + j;
391  y = 16 * h->mb_y + i;
392  k = (j >> 2 & 1) + (i >> 1 & 2) +
393  (j >> 1 & 4) + (i & 8);
394 
395  if (mode != PREDICT_MODE) {
396  pred_motion(h, k, part_width >> 2, dir, 1, &mx, &my);
397  } else {
398  mx = s->next_pic->motion_val[0][b_xy][0] << 1;
399  my = s->next_pic->motion_val[0][b_xy][1] << 1;
400 
401  if (dir == 0) {
402  mx = mx * h->frame_num_offset /
403  h->prev_frame_num_offset + 1 >> 1;
404  my = my * h->frame_num_offset /
405  h->prev_frame_num_offset + 1 >> 1;
406  } else {
407  mx = mx * (h->frame_num_offset - h->prev_frame_num_offset) /
408  h->prev_frame_num_offset + 1 >> 1;
409  my = my * (h->frame_num_offset - h->prev_frame_num_offset) /
410  h->prev_frame_num_offset + 1 >> 1;
411  }
412  }
413 
414  /* clip motion vector prediction to frame border */
415  mx = av_clip(mx, extra_width - 6 * x, h_edge_pos - 6 * x);
416  my = av_clip(my, extra_width - 6 * y, v_edge_pos - 6 * y);
417 
418  /* get (optional) motion vector differential */
419  if (mode == PREDICT_MODE) {
420  dx = dy = 0;
421  } else {
422  dy = svq3_get_se_golomb(&h->gb);
423  dx = svq3_get_se_golomb(&h->gb);
424 
425  if (dx == INVALID_VLC || dy == INVALID_VLC) {
426  av_log(h->avctx, AV_LOG_ERROR, "invalid MV vlc\n");
427  return -1;
428  }
429  }
430 
431  /* compute motion vector */
432  if (mode == THIRDPEL_MODE) {
433  int fx, fy;
434  mx = (mx + 1 >> 1) + dx;
435  my = (my + 1 >> 1) + dy;
436  fx = (unsigned)(mx + 0x3000) / 3 - 0x1000;
437  fy = (unsigned)(my + 0x3000) / 3 - 0x1000;
438  dxy = (mx - 3 * fx) + 4 * (my - 3 * fy);
439 
440  svq3_mc_dir_part(s, x, y, part_width, part_height,
441  fx, fy, dxy, 1, dir, avg);
442  mx += mx;
443  my += my;
444  } else if (mode == HALFPEL_MODE || mode == PREDICT_MODE) {
445  mx = (unsigned)(mx + 1 + 0x3000) / 3 + dx - 0x1000;
446  my = (unsigned)(my + 1 + 0x3000) / 3 + dy - 0x1000;
447  dxy = (mx & 1) + 2 * (my & 1);
448 
449  svq3_mc_dir_part(s, x, y, part_width, part_height,
450  mx >> 1, my >> 1, dxy, 0, dir, avg);
451  mx *= 3;
452  my *= 3;
453  } else {
454  mx = (unsigned)(mx + 3 + 0x6000) / 6 + dx - 0x1000;
455  my = (unsigned)(my + 3 + 0x6000) / 6 + dy - 0x1000;
456 
457  svq3_mc_dir_part(s, x, y, part_width, part_height,
458  mx, my, 0, 0, dir, avg);
459  mx *= 6;
460  my *= 6;
461  }
462 
463  /* update mv_cache */
464  if (mode != PREDICT_MODE) {
465  int32_t mv = pack16to32(mx, my);
466 
467  if (part_height == 8 && i < 8) {
468  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 * 8], mv);
469 
470  if (part_width == 8 && j < 8)
471  AV_WN32A(h->mv_cache[dir][scan8[k] + 1 + 1 * 8], mv);
472  }
473  if (part_width == 8 && j < 8)
474  AV_WN32A(h->mv_cache[dir][scan8[k] + 1], mv);
475  if (part_width == 4 || part_height == 4)
476  AV_WN32A(h->mv_cache[dir][scan8[k]], mv);
477  }
478 
479  /* write back motion vectors */
480  fill_rectangle(h->cur_pic.motion_val[dir][b_xy],
481  part_width >> 2, part_height >> 2, h->b_stride,
482  pack16to32(mx, my), 4);
483  }
484 
485  return 0;
486 }
487 
488 static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
489 {
490  H264Context *h = &s->h;
491  int i, j, k, m, dir, mode;
492  int cbp = 0;
493  uint32_t vlc;
494  int8_t *top, *left;
495  const int mb_xy = h->mb_xy;
496  const int b_xy = 4 * h->mb_x + 4 * h->mb_y * h->b_stride;
497 
498  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
499  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
500  h->topright_samples_available = 0xFFFF;
501 
502  if (mb_type == 0) { /* SKIP */
503  if (h->pict_type == AV_PICTURE_TYPE_P ||
504  s->next_pic->mb_type[mb_xy] == -1) {
505  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
506  0, 0, 0, 0, 0, 0);
507 
508  if (h->pict_type == AV_PICTURE_TYPE_B)
509  svq3_mc_dir_part(s, 16 * h->mb_x, 16 * h->mb_y, 16, 16,
510  0, 0, 0, 0, 1, 1);
511 
512  mb_type = MB_TYPE_SKIP;
513  } else {
514  mb_type = FFMIN(s->next_pic->mb_type[mb_xy], 6);
515  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 0, 0) < 0)
516  return -1;
517  if (svq3_mc_dir(s, mb_type, PREDICT_MODE, 1, 1) < 0)
518  return -1;
519 
520  mb_type = MB_TYPE_16x16;
521  }
522  } else if (mb_type < 8) { /* INTER */
523  if (s->thirdpel_flag && s->halfpel_flag == !get_bits1(&h->gb))
524  mode = THIRDPEL_MODE;
525  else if (s->halfpel_flag &&
526  s->thirdpel_flag == !get_bits1(&h->gb))
527  mode = HALFPEL_MODE;
528  else
529  mode = FULLPEL_MODE;
530 
531  /* fill caches */
532  /* note ref_cache should contain here:
533  * ????????
534  * ???11111
535  * N??11111
536  * N??11111
537  * N??11111
538  */
539 
540  for (m = 0; m < 2; m++) {
541  if (h->mb_x > 0 && h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6] != -1) {
542  for (i = 0; i < 4; i++)
543  AV_COPY32(h->mv_cache[m][scan8[0] - 1 + i * 8],
544  h->cur_pic.motion_val[m][b_xy - 1 + i * h->b_stride]);
545  } else {
546  for (i = 0; i < 4; i++)
547  AV_ZERO32(h->mv_cache[m][scan8[0] - 1 + i * 8]);
548  }
549  if (h->mb_y > 0) {
550  memcpy(h->mv_cache[m][scan8[0] - 1 * 8],
551  h->cur_pic.motion_val[m][b_xy - h->b_stride],
552  4 * 2 * sizeof(int16_t));
553  memset(&h->ref_cache[m][scan8[0] - 1 * 8],
554  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1, 4);
555 
556  if (h->mb_x < h->mb_width - 1) {
557  AV_COPY32(h->mv_cache[m][scan8[0] + 4 - 1 * 8],
558  h->cur_pic.motion_val[m][b_xy - h->b_stride + 4]);
559  h->ref_cache[m][scan8[0] + 4 - 1 * 8] =
560  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride + 1] + 6] == -1 ||
561  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride]] == -1) ? PART_NOT_AVAILABLE : 1;
562  } else
563  h->ref_cache[m][scan8[0] + 4 - 1 * 8] = PART_NOT_AVAILABLE;
564  if (h->mb_x > 0) {
565  AV_COPY32(h->mv_cache[m][scan8[0] - 1 - 1 * 8],
566  h->cur_pic.motion_val[m][b_xy - h->b_stride - 1]);
567  h->ref_cache[m][scan8[0] - 1 - 1 * 8] =
568  (h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] == -1) ? PART_NOT_AVAILABLE : 1;
569  } else
570  h->ref_cache[m][scan8[0] - 1 - 1 * 8] = PART_NOT_AVAILABLE;
571  } else
572  memset(&h->ref_cache[m][scan8[0] - 1 * 8 - 1],
573  PART_NOT_AVAILABLE, 8);
574 
575  if (h->pict_type != AV_PICTURE_TYPE_B)
576  break;
577  }
578 
579  /* decode motion vector(s) and form prediction(s) */
580  if (h->pict_type == AV_PICTURE_TYPE_P) {
581  if (svq3_mc_dir(s, mb_type - 1, mode, 0, 0) < 0)
582  return -1;
583  } else { /* AV_PICTURE_TYPE_B */
584  if (mb_type != 2) {
585  if (svq3_mc_dir(s, 0, mode, 0, 0) < 0)
586  return -1;
587  } else {
588  for (i = 0; i < 4; i++)
589  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
590  0, 4 * 2 * sizeof(int16_t));
591  }
592  if (mb_type != 1) {
593  if (svq3_mc_dir(s, 0, mode, 1, mb_type == 3) < 0)
594  return -1;
595  } else {
596  for (i = 0; i < 4; i++)
597  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
598  0, 4 * 2 * sizeof(int16_t));
599  }
600  }
601 
602  mb_type = MB_TYPE_16x16;
603  } else if (mb_type == 8 || mb_type == 33) { /* INTRA4x4 */
604  memset(h->intra4x4_pred_mode_cache, -1, 8 * 5 * sizeof(int8_t));
605 
606  if (mb_type == 8) {
607  if (h->mb_x > 0) {
608  for (i = 0; i < 4; i++)
609  h->intra4x4_pred_mode_cache[scan8[0] - 1 + i * 8] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - 1] + 6 - i];
610  if (h->intra4x4_pred_mode_cache[scan8[0] - 1] == -1)
611  h->left_samples_available = 0x5F5F;
612  }
613  if (h->mb_y > 0) {
614  h->intra4x4_pred_mode_cache[4 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 0];
615  h->intra4x4_pred_mode_cache[5 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 1];
616  h->intra4x4_pred_mode_cache[6 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 2];
617  h->intra4x4_pred_mode_cache[7 + 8 * 0] = h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride] + 3];
618 
619  if (h->intra4x4_pred_mode_cache[4 + 8 * 0] == -1)
620  h->top_samples_available = 0x33FF;
621  }
622 
623  /* decode prediction codes for luma blocks */
624  for (i = 0; i < 16; i += 2) {
625  vlc = svq3_get_ue_golomb(&h->gb);
626 
627  if (vlc >= 25U) {
629  "luma prediction:%"PRIu32"\n", vlc);
630  return -1;
631  }
632 
633  left = &h->intra4x4_pred_mode_cache[scan8[i] - 1];
634  top = &h->intra4x4_pred_mode_cache[scan8[i] - 8];
635 
636  left[1] = svq3_pred_1[top[0] + 1][left[0] + 1][svq3_pred_0[vlc][0]];
637  left[2] = svq3_pred_1[top[1] + 1][left[1] + 1][svq3_pred_0[vlc][1]];
638 
639  if (left[1] == -1 || left[2] == -1) {
640  av_log(h->avctx, AV_LOG_ERROR, "weird prediction\n");
641  return -1;
642  }
643  }
644  } else { /* mb_type == 33, DC_128_PRED block type */
645  for (i = 0; i < 4; i++)
646  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_PRED, 4);
647  }
648 
650 
651  if (mb_type == 8) {
653 
654  h->top_samples_available = (h->mb_y == 0) ? 0x33FF : 0xFFFF;
655  h->left_samples_available = (h->mb_x == 0) ? 0x5F5F : 0xFFFF;
656  } else {
657  for (i = 0; i < 4; i++)
658  memset(&h->intra4x4_pred_mode_cache[scan8[0] + 8 * i], DC_128_PRED, 4);
659 
660  h->top_samples_available = 0x33FF;
661  h->left_samples_available = 0x5F5F;
662  }
663 
664  mb_type = MB_TYPE_INTRA4x4;
665  } else { /* INTRA16x16 */
666  dir = i_mb_type_info[mb_type - 8].pred_mode;
667  dir = (dir >> 1) ^ 3 * (dir & 1) ^ 1;
668 
669  if ((h->intra16x16_pred_mode = ff_h264_check_intra_pred_mode(h, dir, 0)) < 0) {
670  av_log(h->avctx, AV_LOG_ERROR, "ff_h264_check_intra_pred_mode < 0\n");
671  return h->intra16x16_pred_mode;
672  }
673 
674  cbp = i_mb_type_info[mb_type - 8].cbp;
675  mb_type = MB_TYPE_INTRA16x16;
676  }
677 
678  if (!IS_INTER(mb_type) && h->pict_type != AV_PICTURE_TYPE_I) {
679  for (i = 0; i < 4; i++)
680  memset(h->cur_pic.motion_val[0][b_xy + i * h->b_stride],
681  0, 4 * 2 * sizeof(int16_t));
682  if (h->pict_type == AV_PICTURE_TYPE_B) {
683  for (i = 0; i < 4; i++)
684  memset(h->cur_pic.motion_val[1][b_xy + i * h->b_stride],
685  0, 4 * 2 * sizeof(int16_t));
686  }
687  }
688  if (!IS_INTRA4x4(mb_type)) {
689  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy], DC_PRED, 8);
690  }
691  if (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B) {
692  memset(h->non_zero_count_cache + 8, 0, 14 * 8 * sizeof(uint8_t));
693  }
694 
695  if (!IS_INTRA16x16(mb_type) &&
696  (!IS_SKIP(mb_type) || h->pict_type == AV_PICTURE_TYPE_B)) {
697  if ((vlc = svq3_get_ue_golomb(&h->gb)) >= 48U){
698  av_log(h->avctx, AV_LOG_ERROR, "cbp_vlc=%"PRIu32"\n", vlc);
699  return -1;
700  }
701 
702  cbp = IS_INTRA(mb_type) ? golomb_to_intra4x4_cbp[vlc]
703  : golomb_to_inter_cbp[vlc];
704  }
705  if (IS_INTRA16x16(mb_type) ||
706  (h->pict_type != AV_PICTURE_TYPE_I && s->adaptive_quant && cbp)) {
707  h->qscale += svq3_get_se_golomb(&h->gb);
708 
709  if (h->qscale > 31u) {
710  av_log(h->avctx, AV_LOG_ERROR, "qscale:%d\n", h->qscale);
711  return -1;
712  }
713  }
714  if (IS_INTRA16x16(mb_type)) {
715  AV_ZERO128(h->mb_luma_dc[0] + 0);
716  AV_ZERO128(h->mb_luma_dc[0] + 8);
717  if (svq3_decode_block(&h->gb, h->mb_luma_dc[0], 0, 1)) {
719  "error while decoding intra luma dc\n");
720  return -1;
721  }
722  }
723 
724  if (cbp) {
725  const int index = IS_INTRA16x16(mb_type) ? 1 : 0;
726  const int type = ((h->qscale < 24 && IS_INTRA4x4(mb_type)) ? 2 : 1);
727 
728  for (i = 0; i < 4; i++)
729  if ((cbp & (1 << i))) {
730  for (j = 0; j < 4; j++) {
731  k = index ? (1 * (j & 1) + 2 * (i & 1) +
732  2 * (j & 2) + 4 * (i & 2))
733  : (4 * i + j);
734  h->non_zero_count_cache[scan8[k]] = 1;
735 
736  if (svq3_decode_block(&h->gb, &h->mb[16 * k], index, type)) {
738  "error while decoding block\n");
739  return -1;
740  }
741  }
742  }
743 
744  if ((cbp & 0x30)) {
745  for (i = 1; i < 3; ++i)
746  if (svq3_decode_block(&h->gb, &h->mb[16 * 16 * i], 0, 3)) {
748  "error while decoding chroma dc block\n");
749  return -1;
750  }
751 
752  if ((cbp & 0x20)) {
753  for (i = 1; i < 3; i++) {
754  for (j = 0; j < 4; j++) {
755  k = 16 * i + j;
756  h->non_zero_count_cache[scan8[k]] = 1;
757 
758  if (svq3_decode_block(&h->gb, &h->mb[16 * k], 1, 1)) {
760  "error while decoding chroma ac block\n");
761  return -1;
762  }
763  }
764  }
765  }
766  }
767  }
768 
769  h->cbp = cbp;
770  h->cur_pic.mb_type[mb_xy] = mb_type;
771 
772  if (IS_INTRA(mb_type))
774 
775  return 0;
776 }
777 
779 {
780  SVQ3Context *s = avctx->priv_data;
781  H264Context *h = &s->h;
782  const int mb_xy = h->mb_xy;
783  int i, header;
784  unsigned slice_id;
785 
786  header = get_bits(&h->gb, 8);
787 
788  if (((header & 0x9F) != 1 && (header & 0x9F) != 2) || (header & 0x60) == 0) {
789  /* TODO: what? */
790  av_log(avctx, AV_LOG_ERROR, "unsupported slice header (%02X)\n", header);
791  return -1;
792  } else {
793  int length = header >> 5 & 3;
794 
796  8 * show_bits(&h->gb, 8 * length) +
797  8 * length;
798 
799  if (s->next_slice_index > h->gb.size_in_bits) {
800  av_log(avctx, AV_LOG_ERROR, "slice after bitstream end\n");
801  return -1;
802  }
803 
804  h->gb.size_in_bits = s->next_slice_index - 8 * (length - 1);
805  skip_bits(&h->gb, 8);
806 
807  if (s->watermark_key) {
808  uint32_t header = AV_RL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1]);
809  AV_WL32(&h->gb.buffer[(get_bits_count(&h->gb) >> 3) + 1],
810  header ^ s->watermark_key);
811  }
812  if (length > 0) {
813  memmove((uint8_t *) &h->gb.buffer[get_bits_count(&h->gb) >> 3],
814  &h->gb.buffer[h->gb.size_in_bits >> 3], length - 1);
815  }
816  skip_bits_long(&h->gb, 0);
817  }
818 
819  if ((slice_id = svq3_get_ue_golomb(&h->gb)) >= 3) {
820  av_log(h->avctx, AV_LOG_ERROR, "illegal slice type %u \n", slice_id);
821  return -1;
822  }
823 
824  h->slice_type = golomb_to_pict_type[slice_id];
825 
826  if ((header & 0x9F) == 2) {
827  i = (h->mb_num < 64) ? 6 : (1 + av_log2(h->mb_num - 1));
828  h->mb_skip_run = get_bits(&h->gb, i) -
829  (h->mb_y * h->mb_width + h->mb_x);
830  } else {
831  skip_bits1(&h->gb);
832  h->mb_skip_run = 0;
833  }
834 
835  h->slice_num = get_bits(&h->gb, 8);
836  h->qscale = get_bits(&h->gb, 5);
837  s->adaptive_quant = get_bits1(&h->gb);
838 
839  /* unknown fields */
840  skip_bits1(&h->gb);
841 
842  if (s->unknown_flag)
843  skip_bits1(&h->gb);
844 
845  skip_bits1(&h->gb);
846  skip_bits(&h->gb, 2);
847 
848  if (skip_1stop_8data_bits(&h->gb) < 0)
849  return AVERROR_INVALIDDATA;
850 
851  /* reset intra predictors and invalidate motion vector references */
852  if (h->mb_x > 0) {
853  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - 1] + 3,
854  -1, 4 * sizeof(int8_t));
855  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_x],
856  -1, 8 * sizeof(int8_t) * h->mb_x);
857  }
858  if (h->mb_y > 0) {
859  memset(h->intra4x4_pred_mode + h->mb2br_xy[mb_xy - h->mb_stride],
860  -1, 8 * sizeof(int8_t) * (h->mb_width - h->mb_x));
861 
862  if (h->mb_x > 0)
863  h->intra4x4_pred_mode[h->mb2br_xy[mb_xy - h->mb_stride - 1] + 3] = -1;
864  }
865 
866  return 0;
867 }
868 
870 {
871  SVQ3Context *s = avctx->priv_data;
872  H264Context *h = &s->h;
873  int m;
874  unsigned char *extradata;
875  unsigned char *extradata_end;
876  unsigned int size;
877  int marker_found = 0;
878  int ret;
879 
880  s->cur_pic = av_mallocz(sizeof(*s->cur_pic));
881  s->last_pic = av_mallocz(sizeof(*s->last_pic));
882  s->next_pic = av_mallocz(sizeof(*s->next_pic));
883  if (!s->next_pic || !s->last_pic || !s->cur_pic) {
884  ret = AVERROR(ENOMEM);
885  goto fail;
886  }
887 
888  if ((ret = ff_h264_decode_init(avctx)) < 0)
889  goto fail;
890 
891  ff_hpeldsp_init(&s->hdsp, avctx->flags);
892  ff_tpeldsp_init(&s->tdsp);
893 
894  h->flags = avctx->flags;
895  h->is_complex = 1;
896  h->sps.chroma_format_idc = 1;
898  avctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
899  avctx->color_range = AVCOL_RANGE_JPEG;
900 
901  h->chroma_qp[0] = h->chroma_qp[1] = 4;
902  h->chroma_x_shift = h->chroma_y_shift = 1;
903 
904  s->halfpel_flag = 1;
905  s->thirdpel_flag = 1;
906  s->unknown_flag = 0;
907 
908  /* prowl for the "SEQH" marker in the extradata */
909  extradata = (unsigned char *)avctx->extradata;
910  extradata_end = avctx->extradata + avctx->extradata_size;
911  if (extradata) {
912  for (m = 0; m + 8 < avctx->extradata_size; m++) {
913  if (!memcmp(extradata, "SEQH", 4)) {
914  marker_found = 1;
915  break;
916  }
917  extradata++;
918  }
919  }
920 
921  /* if a match was found, parse the extra data */
922  if (marker_found) {
923  GetBitContext gb;
924  int frame_size_code;
925 
926  size = AV_RB32(&extradata[4]);
927  if (size > extradata_end - extradata - 8) {
928  ret = AVERROR_INVALIDDATA;
929  goto fail;
930  }
931  init_get_bits(&gb, extradata + 8, size * 8);
932 
933  /* 'frame size code' and optional 'width, height' */
934  frame_size_code = get_bits(&gb, 3);
935  switch (frame_size_code) {
936  case 0:
937  avctx->width = 160;
938  avctx->height = 120;
939  break;
940  case 1:
941  avctx->width = 128;
942  avctx->height = 96;
943  break;
944  case 2:
945  avctx->width = 176;
946  avctx->height = 144;
947  break;
948  case 3:
949  avctx->width = 352;
950  avctx->height = 288;
951  break;
952  case 4:
953  avctx->width = 704;
954  avctx->height = 576;
955  break;
956  case 5:
957  avctx->width = 240;
958  avctx->height = 180;
959  break;
960  case 6:
961  avctx->width = 320;
962  avctx->height = 240;
963  break;
964  case 7:
965  avctx->width = get_bits(&gb, 12);
966  avctx->height = get_bits(&gb, 12);
967  break;
968  }
969 
970  s->halfpel_flag = get_bits1(&gb);
971  s->thirdpel_flag = get_bits1(&gb);
972 
973  /* unknown fields */
974  skip_bits1(&gb);
975  skip_bits1(&gb);
976  skip_bits1(&gb);
977  skip_bits1(&gb);
978 
979  h->low_delay = get_bits1(&gb);
980 
981  /* unknown field */
982  skip_bits1(&gb);
983 
984  if (skip_1stop_8data_bits(&gb) < 0) {
985  ret = AVERROR_INVALIDDATA;
986  goto fail;
987  }
988 
989  s->unknown_flag = get_bits1(&gb);
990  avctx->has_b_frames = !h->low_delay;
991  if (s->unknown_flag) {
992 #if CONFIG_ZLIB
993  unsigned watermark_width = svq3_get_ue_golomb(&gb);
994  unsigned watermark_height = svq3_get_ue_golomb(&gb);
995  int u1 = svq3_get_ue_golomb(&gb);
996  int u2 = get_bits(&gb, 8);
997  int u3 = get_bits(&gb, 2);
998  int u4 = svq3_get_ue_golomb(&gb);
999  unsigned long buf_len = watermark_width *
1000  watermark_height * 4;
1001  int offset = get_bits_count(&gb) + 7 >> 3;
1002  uint8_t *buf;
1003 
1004  if (watermark_height <= 0 ||
1005  (uint64_t)watermark_width * 4 > UINT_MAX / watermark_height) {
1006  ret = -1;
1007  goto fail;
1008  }
1009 
1010  buf = av_malloc(buf_len);
1011  if (!buf) {
1012  ret = AVERROR(ENOMEM);
1013  goto fail;
1014  }
1015  av_log(avctx, AV_LOG_DEBUG, "watermark size: %ux%u\n",
1016  watermark_width, watermark_height);
1017  av_log(avctx, AV_LOG_DEBUG,
1018  "u1: %x u2: %x u3: %x compressed data size: %d offset: %d\n",
1019  u1, u2, u3, u4, offset);
1020  if (uncompress(buf, &buf_len, extradata + 8 + offset,
1021  size - offset) != Z_OK) {
1022  av_log(avctx, AV_LOG_ERROR,
1023  "could not uncompress watermark logo\n");
1024  av_free(buf);
1025  ret = -1;
1026  goto fail;
1027  }
1028  s->watermark_key = ff_svq1_packet_checksum(buf, buf_len, 0);
1029  s->watermark_key = s->watermark_key << 16 | s->watermark_key;
1030  av_log(avctx, AV_LOG_DEBUG,
1031  "watermark key %#"PRIx32"\n", s->watermark_key);
1032  av_free(buf);
1033 #else
1034  av_log(avctx, AV_LOG_ERROR,
1035  "this svq3 file contains watermark which need zlib support compiled in\n");
1036  ret = -1;
1037  goto fail;
1038 #endif
1039  }
1040  }
1041 
1042  h->width = avctx->width;
1043  h->height = avctx->height;
1044  h->mb_width = (h->width + 15) / 16;
1045  h->mb_height = (h->height + 15) / 16;
1046  h->mb_stride = h->mb_width + 1;
1047  h->mb_num = h->mb_width * h->mb_height;
1048  h->b_stride = 4 * h->mb_width;
1049  s->h_edge_pos = h->mb_width * 16;
1050  s->v_edge_pos = h->mb_height * 16;
1051 
1052  if ((ret = ff_h264_alloc_tables(h)) < 0) {
1053  av_log(avctx, AV_LOG_ERROR, "svq3 memory allocation failed\n");
1054  goto fail;
1055  }
1056 
1057  return 0;
1058 fail:
1059  svq3_decode_end(avctx);
1060  return ret;
1061 }
1062 
1063 static void free_picture(AVCodecContext *avctx, H264Picture *pic)
1064 {
1065  int i;
1066  for (i = 0; i < 2; i++) {
1067  av_buffer_unref(&pic->motion_val_buf[i]);
1068  av_buffer_unref(&pic->ref_index_buf[i]);
1069  }
1071 
1072  av_frame_unref(&pic->f);
1073 }
1074 
1075 static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
1076 {
1077  SVQ3Context *s = avctx->priv_data;
1078  H264Context *h = &s->h;
1079  const int big_mb_num = h->mb_stride * (h->mb_height + 1) + 1;
1080  const int mb_array_size = h->mb_stride * h->mb_height;
1081  const int b4_stride = h->mb_width * 4 + 1;
1082  const int b4_array_size = b4_stride * h->mb_height * 4;
1083  int ret;
1084 
1085  if (!pic->motion_val_buf[0]) {
1086  int i;
1087 
1088  pic->mb_type_buf = av_buffer_allocz((big_mb_num + h->mb_stride) * sizeof(uint32_t));
1089  if (!pic->mb_type_buf)
1090  return AVERROR(ENOMEM);
1091  pic->mb_type = (uint32_t*)pic->mb_type_buf->data + 2 * h->mb_stride + 1;
1092 
1093  for (i = 0; i < 2; i++) {
1094  pic->motion_val_buf[i] = av_buffer_allocz(2 * (b4_array_size + 4) * sizeof(int16_t));
1095  pic->ref_index_buf[i] = av_buffer_allocz(4 * mb_array_size);
1096  if (!pic->motion_val_buf[i] || !pic->ref_index_buf[i]) {
1097  ret = AVERROR(ENOMEM);
1098  goto fail;
1099  }
1100 
1101  pic->motion_val[i] = (int16_t (*)[2])pic->motion_val_buf[i]->data + 4;
1102  pic->ref_index[i] = pic->ref_index_buf[i]->data;
1103  }
1104  }
1105  pic->reference = !(h->pict_type == AV_PICTURE_TYPE_B);
1106 
1107  ret = ff_get_buffer(avctx, &pic->f,
1108  pic->reference ? AV_GET_BUFFER_FLAG_REF : 0);
1109  if (ret < 0)
1110  goto fail;
1111 
1112  if (!h->edge_emu_buffer) {
1113  h->edge_emu_buffer = av_mallocz_array(pic->f.linesize[0], 17);
1114  if (!h->edge_emu_buffer)
1115  return AVERROR(ENOMEM);
1116  }
1117 
1118  h->linesize = pic->f.linesize[0];
1119  h->uvlinesize = pic->f.linesize[1];
1120 
1121  return 0;
1122 fail:
1123  free_picture(avctx, pic);
1124  return ret;
1125 }
1126 
1127 static int svq3_decode_frame(AVCodecContext *avctx, void *data,
1128  int *got_frame, AVPacket *avpkt)
1129 {
1130  SVQ3Context *s = avctx->priv_data;
1131  H264Context *h = &s->h;
1132  int buf_size = avpkt->size;
1133  int left;
1134  uint8_t *buf;
1135  int ret, m, i;
1136 
1137  /* special case for last picture */
1138  if (buf_size == 0) {
1139  if (s->next_pic->f.data[0] && !h->low_delay && !s->last_frame_output) {
1140  ret = av_frame_ref(data, &s->next_pic->f);
1141  if (ret < 0)
1142  return ret;
1143  s->last_frame_output = 1;
1144  *got_frame = 1;
1145  }
1146  return 0;
1147  }
1148 
1149  h->mb_x = h->mb_y = h->mb_xy = 0;
1150 
1151  if (s->watermark_key) {
1152  av_fast_padded_malloc(&s->buf, &s->buf_size, buf_size);
1153  if (!s->buf)
1154  return AVERROR(ENOMEM);
1155  memcpy(s->buf, avpkt->data, buf_size);
1156  buf = s->buf;
1157  } else {
1158  buf = avpkt->data;
1159  }
1160 
1161  init_get_bits(&h->gb, buf, 8 * buf_size);
1162 
1163  if (svq3_decode_slice_header(avctx))
1164  return -1;
1165 
1166  h->pict_type = h->slice_type;
1167 
1168  if (h->pict_type != AV_PICTURE_TYPE_B)
1169  FFSWAP(H264Picture*, s->next_pic, s->last_pic);
1170 
1171  av_frame_unref(&s->cur_pic->f);
1172 
1173  /* for skipping the frame */
1174  s->cur_pic->f.pict_type = h->pict_type;
1176 
1177  ret = get_buffer(avctx, s->cur_pic);
1178  if (ret < 0)
1179  return ret;
1180 
1181  h->cur_pic_ptr = s->cur_pic;
1182  av_frame_unref(&h->cur_pic.f);
1183  memcpy(&h->cur_pic.tf, &s->cur_pic->tf, sizeof(h->cur_pic) - offsetof(H264Picture, tf));
1184  ret = av_frame_ref(&h->cur_pic.f, &s->cur_pic->f);
1185  if (ret < 0)
1186  return ret;
1187 
1188  for (i = 0; i < 16; i++) {
1189  h->block_offset[i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1190  h->block_offset[48 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->linesize * ((scan8[i] - scan8[0]) >> 3);
1191  }
1192  for (i = 0; i < 16; i++) {
1193  h->block_offset[16 + i] =
1194  h->block_offset[32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 4 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1195  h->block_offset[48 + 16 + i] =
1196  h->block_offset[48 + 32 + i] = (4 * ((scan8[i] - scan8[0]) & 7)) + 8 * h->uvlinesize * ((scan8[i] - scan8[0]) >> 3);
1197  }
1198 
1199  if (h->pict_type != AV_PICTURE_TYPE_I) {
1200  if (!s->last_pic->f.data[0]) {
1201  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1202  av_frame_unref(&s->last_pic->f);
1203  ret = get_buffer(avctx, s->last_pic);
1204  if (ret < 0)
1205  return ret;
1206  memset(s->last_pic->f.data[0], 0, avctx->height * s->last_pic->f.linesize[0]);
1207  memset(s->last_pic->f.data[1], 0x80, (avctx->height / 2) *
1208  s->last_pic->f.linesize[1]);
1209  memset(s->last_pic->f.data[2], 0x80, (avctx->height / 2) *
1210  s->last_pic->f.linesize[2]);
1211  }
1212 
1213  if (h->pict_type == AV_PICTURE_TYPE_B && !s->next_pic->f.data[0]) {
1214  av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
1215  av_frame_unref(&s->next_pic->f);
1216  ret = get_buffer(avctx, s->next_pic);
1217  if (ret < 0)
1218  return ret;
1219  memset(s->next_pic->f.data[0], 0, avctx->height * s->next_pic->f.linesize[0]);
1220  memset(s->next_pic->f.data[1], 0x80, (avctx->height / 2) *
1221  s->next_pic->f.linesize[1]);
1222  memset(s->next_pic->f.data[2], 0x80, (avctx->height / 2) *
1223  s->next_pic->f.linesize[2]);
1224  }
1225  }
1226 
1227  if (avctx->debug & FF_DEBUG_PICT_INFO)
1229  "%c hpel:%d, tpel:%d aqp:%d qp:%d, slice_num:%02X\n",
1231  s->halfpel_flag, s->thirdpel_flag,
1232  s->adaptive_quant, h->qscale, h->slice_num);
1233 
1234  if (avctx->skip_frame >= AVDISCARD_NONREF && h->pict_type == AV_PICTURE_TYPE_B ||
1236  avctx->skip_frame >= AVDISCARD_ALL)
1237  return 0;
1238 
1239  if (s->next_p_frame_damaged) {
1240  if (h->pict_type == AV_PICTURE_TYPE_B)
1241  return 0;
1242  else
1243  s->next_p_frame_damaged = 0;
1244  }
1245 
1246  if (h->pict_type == AV_PICTURE_TYPE_B) {
1248 
1249  if (h->frame_num_offset < 0)
1250  h->frame_num_offset += 256;
1251  if (h->frame_num_offset == 0 ||
1253  av_log(h->avctx, AV_LOG_ERROR, "error in B-frame picture id\n");
1254  return -1;
1255  }
1256  } else {
1257  h->prev_frame_num = h->frame_num;
1258  h->frame_num = h->slice_num;
1260 
1261  if (h->prev_frame_num_offset < 0)
1262  h->prev_frame_num_offset += 256;
1263  }
1264 
1265  for (m = 0; m < 2; m++) {
1266  int i;
1267  for (i = 0; i < 4; i++) {
1268  int j;
1269  for (j = -1; j < 4; j++)
1270  h->ref_cache[m][scan8[0] + 8 * i + j] = 1;
1271  if (i < 3)
1272  h->ref_cache[m][scan8[0] + 8 * i + j] = PART_NOT_AVAILABLE;
1273  }
1274  }
1275 
1276  for (h->mb_y = 0; h->mb_y < h->mb_height; h->mb_y++) {
1277  for (h->mb_x = 0; h->mb_x < h->mb_width; h->mb_x++) {
1278  unsigned mb_type;
1279  h->mb_xy = h->mb_x + h->mb_y * h->mb_stride;
1280 
1281  if ((get_bits_count(&h->gb) + 7) >= h->gb.size_in_bits &&
1282  ((get_bits_count(&h->gb) & 7) == 0 ||
1283  show_bits(&h->gb, -get_bits_count(&h->gb) & 7) == 0)) {
1284  skip_bits(&h->gb, s->next_slice_index - get_bits_count(&h->gb));
1285  h->gb.size_in_bits = 8 * buf_size;
1286 
1287  if (svq3_decode_slice_header(avctx))
1288  return -1;
1289 
1290  /* TODO: support s->mb_skip_run */
1291  }
1292 
1293  mb_type = svq3_get_ue_golomb(&h->gb);
1294 
1295  if (h->pict_type == AV_PICTURE_TYPE_I)
1296  mb_type += 8;
1297  else if (h->pict_type == AV_PICTURE_TYPE_B && mb_type >= 4)
1298  mb_type += 4;
1299  if (mb_type > 33 || svq3_decode_mb(s, mb_type)) {
1301  "error while decoding MB %d %d\n", h->mb_x, h->mb_y);
1302  return -1;
1303  }
1304 
1305  if (mb_type != 0 || h->cbp)
1307 
1308  if (h->pict_type != AV_PICTURE_TYPE_B && !h->low_delay)
1309  h->cur_pic.mb_type[h->mb_x + h->mb_y * h->mb_stride] =
1310  (h->pict_type == AV_PICTURE_TYPE_P && mb_type < 8) ? (mb_type - 1) : -1;
1311  }
1312 
1313  ff_draw_horiz_band(avctx, &s->cur_pic->f,
1314  s->last_pic->f.data[0] ? &s->last_pic->f : NULL,
1315  16 * h->mb_y, 16, h->picture_structure, 0,
1316  h->low_delay);
1317  }
1318 
1319  left = buf_size*8 - get_bits_count(&h->gb);
1320 
1321  if (h->mb_y != h->mb_height || h->mb_x != h->mb_width) {
1322  av_log(avctx, AV_LOG_INFO, "frame num %d incomplete pic x %d y %d left %d\n", avctx->frame_number, h->mb_y, h->mb_x, left);
1323  //av_hex_dump(stderr, buf+buf_size-8, 8);
1324  }
1325 
1326  if (left < 0) {
1327  av_log(avctx, AV_LOG_ERROR, "frame num %d left %d\n", avctx->frame_number, left);
1328  return -1;
1329  }
1330 
1331  if (h->pict_type == AV_PICTURE_TYPE_B || h->low_delay)
1332  ret = av_frame_ref(data, &s->cur_pic->f);
1333  else if (s->last_pic->f.data[0])
1334  ret = av_frame_ref(data, &s->last_pic->f);
1335  if (ret < 0)
1336  return ret;
1337 
1338  /* Do not output the last pic after seeking. */
1339  if (s->last_pic->f.data[0] || h->low_delay)
1340  *got_frame = 1;
1341 
1342  if (h->pict_type != AV_PICTURE_TYPE_B) {
1343  FFSWAP(H264Picture*, s->cur_pic, s->next_pic);
1344  } else {
1345  av_frame_unref(&s->cur_pic->f);
1346  }
1347 
1348  return buf_size;
1349 }
1350 
1352 {
1353  SVQ3Context *s = avctx->priv_data;
1354  H264Context *h = &s->h;
1355 
1356  free_picture(avctx, s->cur_pic);
1357  free_picture(avctx, s->next_pic);
1358  free_picture(avctx, s->last_pic);
1359  av_freep(&s->cur_pic);
1360  av_freep(&s->next_pic);
1361  av_freep(&s->last_pic);
1362 
1363  av_frame_unref(&h->cur_pic.f);
1364 
1366 
1367  av_freep(&s->buf);
1368  s->buf_size = 0;
1370 
1371  return 0;
1372 }
1373 
1375  .name = "svq3",
1376  .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 3 / Sorenson Video 3 / SVQ3"),
1377  .type = AVMEDIA_TYPE_VIDEO,
1378  .id = AV_CODEC_ID_SVQ3,
1379  .priv_data_size = sizeof(SVQ3Context),
1381  .close = svq3_decode_end,
1383  .capabilities = CODEC_CAP_DRAW_HORIZ_BAND |
1384  CODEC_CAP_DR1 |
1386  .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUVJ420P,
1387  AV_PIX_FMT_NONE},
1388 };
int chroma_format_idc
Definition: h264.h:177
#define MB_TYPE_INTRA16x16
Definition: avcodec.h:893
uint8_t pred_mode
Definition: h264data.h:75
#define NULL
Definition: coverity.c:32
#define MB_TYPE_SKIP
Definition: avcodec.h:903
discard all frames except keyframes
Definition: avcodec.h:666
uint8_t * edge_emu_buffer
Definition: h264.h:756
const char * s
Definition: avisynth_c.h:669
unsigned int top_samples_available
Definition: h264.h:397
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
GetBitContext gb
Definition: h264.h:346
int low_delay
Definition: h264.h:367
int mb_num
Definition: h264.h:543
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:124
ptrdiff_t const GLvoid * data
Definition: opengl_enc.c:101
ptrdiff_t uvlinesize
Definition: h264.h:361
int cbp
Definition: h264.h:510
HpelDSPContext hdsp
Definition: svq3.c:74
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
Definition: get_bits.h:260
uint16_t ff_svq1_packet_checksum(const uint8_t *data, const int length, int value)
Definition: svq13.c:60
static int svq3_decode_block(GetBitContext *gb, int16_t *block, int index, const int type)
Definition: svq3.c:242
else temp
Definition: vf_mcdeint.c:257
static void skip_bits_long(GetBitContext *s, int n)
Definition: get_bits.h:217
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
av_cold int ff_h264_decode_init(AVCodecContext *avctx)
Definition: h264.c:628
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1958
int mb_y
Definition: h264.h:536
AVBufferRef * mb_type_buf
Definition: h264.h:303
int size
Definition: avcodec.h:1161
#define MB_TYPE_INTRA4x4
Definition: avcodec.h:892
int chroma_x_shift
Definition: h264.h:362
const uint8_t * buffer
Definition: get_bits.h:55
static unsigned svq3_get_ue_golomb(GetBitContext *gb)
Definition: golomb.h:115
#define INVALID_VLC
Definition: golomb.h:38
int16_t(*[2] motion_val)[2]
Definition: h264.h:301
int flags
Definition: h264.h:370
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1442
int mb_height
Definition: h264.h:541
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:139
int v_edge_pos
Definition: svq3.c:89
int16_t mv_cache[2][5 *8][2]
Motion vector cache.
Definition: h264.h:413
H264Context.
Definition: h264.h:339
discard all
Definition: avcodec.h:667
uint8_t run
Definition: svq3.c:149
struct AVFrame f
Definition: h264.h:293
#define FULLPEL_MODE
Definition: svq3.c:93
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
Definition: mpegutils.c:30
AVCodec.
Definition: avcodec.h:3173
int picture_structure
Definition: h264.h:457
#define AV_WN32A(p, v)
Definition: intreadwrite.h:538
#define AV_COPY32(d, s)
Definition: intreadwrite.h:586
static av_always_inline uint32_t pack16to32(int a, int b)
Definition: h264.h:953
int mb_skip_run
Definition: h264.h:540
Macro definitions for various function/variable attributes.
#define tf
Definition: regdef.h:73
static int svq3_mc_dir(SVQ3Context *s, int size, int mode, int dir, int avg)
Definition: svq3.c:374
enum AVDiscard skip_frame
Skip decoding for selected frames.
Definition: avcodec.h:2939
static const uint8_t golomb_to_pict_type[5]
Definition: h264data.h:37
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
int thirdpel_flag
Definition: svq3.c:80
if()
Definition: avfilter.c:975
static const uint8_t luma_dc_zigzag_scan[16]
Definition: svq3.c:114
uint8_t
#define av_cold
Definition: attributes.h:74
int prev_frame_num_offset
for POC type 2
Definition: h264.h:586
#define av_malloc(s)
#define DC_PRED8x8
Definition: h264pred.h:68
mode
Definition: f_perms.c:27
#define AV_RB32
Definition: intreadwrite.h:130
H264Picture * last_pic
Definition: svq3.c:78
int mb_xy
Definition: h264.h:544
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:278
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1353
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:787
int height
Definition: h264.h:360
int mb_x
Definition: h264.h:536
static const IMbInfo i_mb_type_info[26]
Definition: h264data.h:79
uint8_t * data
Definition: avcodec.h:1160
thirdpel DSP context
Definition: tpeldsp.h:42
static int get_bits_count(const GetBitContext *s)
Definition: get_bits.h:212
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:84
int chroma_y_shift
Definition: h264.h:362
thirdpel DSP functions
static void fill_rectangle(SDL_Surface *screen, int x, int y, int w, int h, int color, int update)
Definition: ffplay.c:779
ptrdiff_t size
Definition: opengl_enc.c:101
static const uint8_t header[24]
Definition: sdr2.c:67
#define av_log(a,...)
unsigned m
Definition: audioconvert.c:187
int width
Definition: h264.h:360
static int svq3_decode_mb(SVQ3Context *s, unsigned int mb_type)
Definition: svq3.c:488
H.264 / AVC / MPEG4 part10 codec.
#define U(x)
Definition: vp56_arith.h:37
int frame_num
Definition: h264.h:582
static void free_picture(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1063
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:192
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:175
int next_slice_index
Definition: svq3.c:82
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1531
int16_t mb_luma_dc[3][16 *2]
Definition: h264.h:499
#define HALFPEL_MODE
Definition: svq3.c:94
#define CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:822
#define AVERROR(e)
Definition: error.h:43
av_cold void ff_tpeldsp_init(TpelDSPContext *c)
Definition: tpeldsp.c:312
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:180
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:196
int flags
CODEC_FLAG_*.
Definition: avcodec.h:1333
ThreadFrame tf
Definition: h264.h:295
GLsizei GLsizei * length
Definition: opengl_enc.c:115
const char * name
Name of the codec implementation.
Definition: avcodec.h:3180
#define IS_SKIP(a)
Definition: mpegutils.h:77
#define PREDICT_MODE
Definition: svq3.c:96
static const uint8_t offset[127][2]
Definition: vf_spp.c:92
Libavcodec external API header.
Sorenson Vector Quantizer #1 (SVQ1) video codec.
static const uint8_t scan8[16 *3+3]
Definition: h264.h:937
static av_always_inline void pred_motion(H264Context *const h, int n, int part_width, int list, int ref, int *const mx, int *const my)
Get the predicted MV.
Definition: h264_mvpred.h:94
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
Definition: hpeldsp.c:338
static int svq3_get_se_golomb(GetBitContext *gb)
Definition: golomb.h:224
int chroma_pred_mode
Definition: h264.h:377
void ff_svq3_add_idct_c(uint8_t *dst, int16_t *block, int stride, int qp, int dc)
Definition: svq3.c:202
useful rectangle filling function
unsigned int left_samples_available
Definition: h264.h:399
tpel_mc_func avg_tpel_pixels_tab[11]
Definition: tpeldsp.h:54
Half-pel DSP context.
Definition: hpeldsp.h:45
AVBufferRef * motion_val_buf[2]
Definition: h264.h:300
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:234
int frame_num_offset
for POC type 2
Definition: h264.h:585
uint32_t * mb2br_xy
Definition: h264.h:430
#define CODEC_CAP_DRAW_HORIZ_BAND
Decoder can use draw_horiz_band callback.
Definition: avcodec.h:781
ptrdiff_t linesize
Definition: h264.h:361
#define FFMIN(a, b)
Definition: common.h:66
float y
int reference
Definition: h264.h:326
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
H264Context h
Definition: svq3.c:73
ret
Definition: avfilter.c:974
int width
picture width / height.
Definition: avcodec.h:1412
void ff_svq3_luma_dc_dequant_idct_c(int16_t *output, int16_t *input, int qp)
Definition: svq3.c:167
uint32_t * mb_type
Definition: h264.h:304
SPS sps
current sps
Definition: h264.h:437
int size_in_bits
Definition: get_bits.h:57
int32_t
static unsigned int show_bits(GetBitContext *s, int n)
Show 1-25 bits.
Definition: get_bits.h:287
static av_cold int svq3_decode_init(AVCodecContext *avctx)
Definition: svq3.c:869
tpel_mc_func put_tpel_pixels_tab[11]
Thirdpel motion compensation with rounding (a + b + 1) >> 1.
Definition: tpeldsp.h:53
int8_t intra4x4_pred_mode_cache[5 *8]
Definition: h264.h:393
#define AV_RL32
Definition: intreadwrite.h:146
float u
unsigned int topright_samples_available
Definition: h264.h:398
H264Picture * cur_pic
Definition: svq3.c:76
int slice_type
Definition: h264.h:449
static const uint8_t golomb_to_intra4x4_cbp[48]
Definition: h264data.h:42
int last_frame_output
Definition: svq3.c:90
#define PART_NOT_AVAILABLE
Definition: h264.h:416
int next_p_frame_damaged
Definition: svq3.c:87
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:515
#define IS_INTRA16x16(a)
Definition: mpegutils.h:72
static const int8_t mv[256][2]
Definition: 4xm.c:77
VideoDSPContext vdsp
Definition: h264.h:342
Half-pel DSP functions.
AVCodec ff_svq3_decoder
Definition: svq3.c:1374
int mb_stride
Definition: h264.h:542
#define AV_LOG_INFO
Standard information.
Definition: log.h:186
AVCodecContext * avctx
Definition: h264.h:341
AVS_Value src
Definition: avisynth_c.h:524
H264 / AVC / MPEG4 part10 codec data table
static const uint8_t zigzag_scan[16+1]
Definition: h264data.h:54
int prev_frame_num
frame_num of the last pic for POC type 1/2
Definition: h264.h:587
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:191
int debug
debug
Definition: avcodec.h:2563
main external API structure.
Definition: avcodec.h:1239
int ff_h264_check_intra4x4_pred_mode(H264Context *h)
Check if the top & left blocks are available if needed and change the dc mode so it only uses the ava...
Definition: h264.c:145
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_h264_alloc_tables(H264Context *h)
Allocate tables.
Definition: h264.c:427
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: utils.c:1030
int16_t mb[16 *48 *2]
as a dct coefficient is int32_t in high depth, we need to reserve twice the space.
Definition: h264.h:498
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
void * buf
Definition: avisynth_c.h:595
GLint GLenum type
Definition: opengl_enc.c:105
int extradata_size
Definition: avcodec.h:1354
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:82
static unsigned int get_bits1(GetBitContext *s)
Definition: get_bits.h:304
BYTE int const BYTE int int int height
Definition: avisynth_c.h:714
static void skip_bits1(GetBitContext *s)
Definition: get_bits.h:329
static void skip_bits(GetBitContext *s, int n)
Definition: get_bits.h:297
int index
Definition: gxfenc.c:89
static const uint8_t chroma_dc_scan[4]
Definition: h264data.h:61
int8_t * ref_index[2]
Definition: h264.h:310
static void svq3_mc_dir_part(SVQ3Context *s, int x, int y, int width, int height, int mx, int my, int dxy, int thirdpel, int dir, int avg)
Definition: svq3.c:300
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:410
#define MB_TYPE_16x16
Definition: avcodec.h:895
H264Picture * cur_pic_ptr
Definition: h264.h:350
static int svq3_decode_end(AVCodecContext *avctx)
Definition: svq3.c:1351
static const uint8_t svq3_pred_0[25][2]
Definition: svq3.c:121
#define IS_INTER(a)
Definition: mpegutils.h:75
int unknown_flag
Definition: svq3.c:81
uint8_t * buf
Definition: svq3.c:84
int block_offset[2 *(16 *3)]
block_offset[ 0..23] for frame macroblocks block_offset[24..47] for field macroblocks ...
Definition: h264.h:427
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:377
av_cold void ff_h264_free_context(H264Context *h)
Free any data that may have been allocated in the H264 context like SPS, PPS etc. ...
Definition: h264.c:1920
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:174
uint8_t level
Definition: svq3.c:150
#define FF_DEBUG_PICT_INFO
Definition: avcodec.h:2564
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(constuint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(constint16_t *) pi >>8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(constint16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(constint32_t *) pi >>24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(constint32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(constfloat *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(constfloat *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(constfloat *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(constdouble *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(constdouble *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(constdouble *) pi *(1U<< 31))))#defineSET_CONV_FUNC_GROUP(ofmt, ifmt) staticvoidset_generic_function(AudioConvert *ac){}voidff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enumAVSampleFormatout_fmt, enumAVSampleFormatin_fmt, intchannels, intsample_rate, intapply_map){AudioConvert *ac;intin_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) returnNULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt)>2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);returnNULL;}returnac;}in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}elseif(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;elseac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);returnac;}intff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){intuse_generic=1;intlen=in->nb_samples;intp;if(ac->dc){av_dlog(ac->avr,"%dsamples-audio_convert:%sto%s(dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));returnff_convert_dither(ac-> dc
Definition: vp9.h:48
#define AV_ZERO128(d)
Definition: intreadwrite.h:622
static int decode(AVCodecContext *avctx, void *data, int *got_sub, AVPacket *avpkt)
Definition: ccaption_dec.c:520
discard all non reference
Definition: avcodec.h:663
int is_complex
Definition: h264.h:546
GLint GLenum GLboolean GLsizei stride
Definition: opengl_enc.c:105
int qscale
Definition: h264.h:364
uint8_t cbp
Definition: h264data.h:76
common internal api header.
H264Picture * next_pic
Definition: svq3.c:77
#define CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:736
int h_edge_pos
Definition: svq3.c:88
static int get_buffer(AVCodecContext *avctx, H264Picture *pic)
Definition: svq3.c:1075
H.264 / AVC / MPEG4 part10 motion vector predicion.
Bi-dir predicted.
Definition: avutil.h:269
#define stride
int chroma_qp[2]
Definition: h264.h:355
static const uint8_t golomb_to_inter_cbp[48]
Definition: h264data.h:48
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
Definition: h264.h:997
int intra16x16_pred_mode
Definition: h264.h:378
#define IS_INTRA(x, y)
static const uint32_t svq3_dequant_coeff[32]
Definition: svq3.c:158
void * priv_data
Definition: avcodec.h:1281
#define THIRDPEL_MODE
Definition: svq3.c:95
#define PICT_FRAME
Definition: mpegutils.h:35
#define IS_INTRA4x4(a)
Definition: mpegutils.h:71
#define av_free(p)
uint8_t non_zero_count_cache[15 *8]
non zero coeff count cache.
Definition: h264.h:406
#define av_log2
Definition: intmath.h:105
static int svq3_decode_slice_header(AVCodecContext *avctx)
Definition: svq3.c:778
H264Picture cur_pic
Definition: h264.h:351
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:229
#define AV_ZERO32(d)
Definition: intreadwrite.h:614
int mb_width
Definition: h264.h:541
enum AVPictureType pict_type
Definition: h264.h:649
TpelDSPContext tdsp
Definition: svq3.c:75
static const uint8_t svq3_scan[16]
Definition: svq3.c:107
AVBufferRef * ref_index_buf[2]
Definition: h264.h:309
static void * av_mallocz_array(size_t nmemb, size_t size)
Definition: mem.h:228
static const int8_t svq3_pred_1[6][6][5]
Definition: svq3.c:133
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2014
#define av_freep(p)
uint32_t watermark_key
Definition: svq3.c:83
static int skip_1stop_8data_bits(GetBitContext *gb)
Definition: get_bits.h:593
int8_t * intra4x4_pred_mode
Definition: h264.h:394
#define FFSWAP(type, a, b)
Definition: common.h:69
int buf_size
Definition: svq3.c:85
exp golomb vlc stuff
int slice_num
Definition: h264.h:447
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
Definition: avcodec.h:1137
static int svq3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: svq3.c:1127
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:967
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:250
for(j=16;j >0;--j)
void ff_h264_hl_decode_mb(H264Context *h)
Definition: h264_mb.c:809
int b_stride
Definition: h264.h:431
Predicted.
Definition: avutil.h:268
int halfpel_flag
Definition: svq3.c:79
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static const struct @81 svq3_dct_tables[2][16]
int adaptive_quant
Definition: svq3.c:86
int8_t ref_cache[2][5 *8]
Definition: h264.h:414
static int width
static int16_t block[64]
Definition: dct-test.c:110