59 uint16_t luma_intra_matrix[64],
60 uint16_t chroma_intra_matrix[64],
67 int matrix_count = 1 + !!memcmp(luma_intra_matrix,
69 sizeof(luma_intra_matrix[0]) * 64);
72 put_bits(p, 16, 2 + matrix_count * (1 + 64));
77 put_bits(p, 8, luma_intra_matrix[j]);
80 if (matrix_count > 1) {
85 put_bits(p, 8, chroma_intra_matrix[j]);
151 size = strlen(
"CS=ITU601")+3;
158 int chroma_h_shift, chroma_v_shift;
166 vsample[0] = hsample[0] =
167 vsample[1] = hsample[1] =
168 vsample[2] = hsample[2] = 1;
170 vsample[0] = vsample[1] = vsample[2] = 2;
171 hsample[0] = hsample[1] = hsample[2] = 1;
174 vsample[1] = 2 >> chroma_v_shift;
175 vsample[2] = 2 >> chroma_v_shift;
177 hsample[1] = 2 >> chroma_h_shift;
178 hsample[2] = 2 >> chroma_h_shift;
184 uint16_t luma_intra_matrix[64],
185 uint16_t chroma_intra_matrix[64])
188 int hsample[3], vsample[3];
190 int chroma_matrix = !!memcmp(luma_intra_matrix,
192 sizeof(luma_intra_matrix[0])*64);
203 jpeg_table_header(avctx, pb, intra_scantable, luma_intra_matrix, chroma_intra_matrix, hsample);
232 put_bits(pb, 8, lossless ? 0 : chroma_matrix);
238 put_bits(pb, 8, lossless ? 0 : chroma_matrix);
286 int align= (-(size_t)(buf))&3;
299 for(i=0; i<size && i<
align; i++){
300 if(buf[i]==0xFF) ff_count++;
302 for(; i<size-15; i+=16){
305 v= *(uint32_t*)(&buf[i]);
306 acc= (((v & (v>>4))&0x0F0F0F0F)+0x01010101)&0x10101010;
307 v= *(uint32_t*)(&buf[i+4]);
308 acc+=(((v & (v>>4))&0x0F0F0F0F)+0x01010101)&0x10101010;
309 v= *(uint32_t*)(&buf[i+8]);
310 acc+=(((v & (v>>4))&0x0F0F0F0F)+0x01010101)&0x10101010;
311 v= *(uint32_t*)(&buf[i+12]);
312 acc+=(((v & (v>>4))&0x0F0F0F0F)+0x01010101)&0x10101010;
320 if(buf[i]==0xFF) ff_count++;
323 if(ff_count==0)
return;
328 for(i=size-1; ff_count; i--){
364 uint8_t *huff_size, uint16_t *huff_code)
369 put_bits(pb, huff_size[0], huff_code[0]);
379 put_bits(pb, huff_size[nbits], huff_code[nbits]);
const struct AVCodec * codec
const char const char void * val
static void put_sbits(PutBitContext *pb, int n, int32_t value)
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
const uint8_t avpriv_mjpeg_bits_ac_luminance[17]
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
const uint8_t avpriv_mjpeg_bits_dc_chrominance[17]
const uint8_t avpriv_mjpeg_bits_ac_chrominance[17]
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
MJPEG encoder and decoder.
void ff_mjpeg_encode_stuffing(MpegEncContext *s)
#define av_assert0(cond)
assert() equivalent, that is always enabled.
static av_cold int end(AVCodecContext *avctx)
#define CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
int slice_context_count
number of used thread_contexts
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int last_dc[3]
last DC values for MPEG1
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
Libavcodec external API header.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void skip_put_bytes(PutBitContext *s, int n)
Skip the given number of bytes.
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
int width
picture width / height.
const uint8_t avpriv_mjpeg_bits_dc_luminance[17]
const uint8_t avpriv_mjpeg_val_dc[12]
void ff_mjpeg_escape_FF(PutBitContext *pb, int start)
packed RGB 8:8:8, 24bpp, BGRBGR...
const AVS_VideoInfo int align
struct MpegEncContext * thread_context[MAX_THREADS]
void ff_mjpeg_encode_dc(PutBitContext *pb, int val, uint8_t *huff_size, uint16_t *huff_code)
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
main external API structure.
static void jpeg_table_header(AVCodecContext *avctx, PutBitContext *p, ScanTable *intra_scantable, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64], int hsample[3])
void ff_mjpeg_init_hvsample(AVCodecContext *avctx, int hsample[3], int vsample[3])
packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined
const uint8_t avpriv_mjpeg_val_ac_luminance[]
struct AVCodecContext * avctx
PutBitContext pb
bit output
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
common internal and external API header
int prediction_method
prediction method (needed for huffyuv)
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_...
static void put_marker(PutBitContext *p, int code)
const uint8_t avpriv_mjpeg_val_ac_chrominance[]
static int put_huffman_table(PutBitContext *p, int table_class, int table_id, const uint8_t *bits_table, const uint8_t *value_table)
void avpriv_put_string(PutBitContext *pb, const char *string, int terminate_string)
Put the string string in the bitstream.
static void jpeg_put_comments(AVCodecContext *avctx, PutBitContext *p)