2 * huffyuv codec for libavcodec
4 * Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
6 * see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
9 * This file is part of Libav.
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
28 * huffyuv codec for libavcodec.
51 typedef enum Predictor{
57 typedef struct HYuvContext{
58 AVCodecContext *avctx;
66 int yuy2; //use yuy2 instead of 422P
67 int bgr32; //use bgr32 instead of bgr24
74 uint64_t stats[3][256];
76 uint32_t bits[3][256];
77 uint32_t pix_bgr_map[1<<VLC_BITS];
78 VLC vlc[6]; //Y,U,V,YY,YU,YV
80 uint8_t *bitstream_buffer;
81 unsigned int bitstream_buffer_size;
85 static const unsigned char classic_shift_luma[] = {
86 34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8,
87 16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70,
91 static const unsigned char classic_shift_chroma[] = {
92 66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183,
93 56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119,
94 214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0
97 static const unsigned char classic_add_luma[256] = {
98 3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37,
99 73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36,
100 68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36,
101 35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39,
102 37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37,
103 35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29,
104 27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16,
105 15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14,
106 12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6,
107 12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15,
108 18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25,
109 28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49,
110 28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60,
111 62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52,
112 54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43,
113 46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8,
116 static const unsigned char classic_add_chroma[256] = {
117 3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9,
118 7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7,
119 11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77,
120 43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63,
121 143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
122 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22,
123 17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111,
124 112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1,
125 0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134,
126 135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96,
127 52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41,
128 19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36,
129 7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26,
130 83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13,
131 14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8,
132 6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2,
135 static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){
139 const int temp= src[i];
146 const int temp= src[i];
150 s->dsp.diff_bytes(dst+16, src+16, src+15, w-16);
155 static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){
161 for(i=0; i<FFMIN(w,4); i++){
162 const int rt= src[i*4+R];
163 const int gt= src[i*4+G];
164 const int bt= src[i*4+B];
172 s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16);
173 *red= src[(w-1)*4+R];
174 *green= src[(w-1)*4+G];
175 *blue= src[(w-1)*4+B];
178 static int read_len_table(uint8_t *dst, GetBitContext *gb){
182 repeat= get_bits(gb, 3);
183 val = get_bits(gb, 5);
185 repeat= get_bits(gb, 8);
186 //printf("%d %d\n", val, repeat);
188 av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n");
197 static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){
201 for(len=32; len>0; len--){
202 for(index=0; index<256; index++){
203 if(len_table[index]==len)
207 av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n");
215 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
221 static void heap_sift(HeapElem *h, int root, int size)
223 while(root*2+1 < size) {
224 int child = root*2+1;
225 if(child < size-1 && h[child].val > h[child+1].val)
227 if(h[root].val > h[child].val) {
228 FFSWAP(HeapElem, h[root], h[child]);
235 static void generate_len_table(uint8_t *dst, const uint64_t *stats){
242 for(offset=1; ; offset<<=1){
243 for(i=0; i<size; i++){
245 h[i].val = (stats[i] << 8) + offset;
247 for(i=size/2-1; i>=0; i--)
248 heap_sift(h, i, size);
250 for(next=size; next<size*2-1; next++){
251 // merge the two smallest entries, and put it back in the heap
252 uint64_t min1v = h[0].val;
253 up[h[0].name] = next;
254 h[0].val = INT64_MAX;
255 heap_sift(h, 0, size);
256 up[h[0].name] = next;
259 heap_sift(h, 0, size);
263 for(i=2*size-3; i>=size; i--)
264 len[i] = len[up[i]] + 1;
265 for(i=0; i<size; i++) {
266 dst[i] = len[up[i]] + 1;
267 if(dst[i] >= 32) break;
272 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
274 static void generate_joint_tables(HYuvContext *s){
275 uint16_t symbols[1<<VLC_BITS];
276 uint16_t bits[1<<VLC_BITS];
277 uint8_t len[1<<VLC_BITS];
278 if(s->bitstream_bpp < 24){
281 for(i=y=0; y<256; y++){
282 int len0 = s->len[0][y];
283 int limit = VLC_BITS - len0;
286 for(u=0; u<256; u++){
287 int len1 = s->len[p][u];
290 len[i] = len0 + len1;
291 bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
292 symbols[i] = (y<<8) + u;
293 if(symbols[i] != 0xffff) // reserved to mean "invalid"
297 free_vlc(&s->vlc[3+p]);
298 init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0);
301 uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map;
302 int i, b, g, r, code;
303 int p0 = s->decorrelate;
304 int p1 = !s->decorrelate;
305 // restrict the range to +/-16 becaues that's pretty much guaranteed to
306 // cover all the combinations that fit in 11 bits total, and it doesn't
307 // matter if we miss a few rare codes.
308 for(i=0, g=-16; g<16; g++){
309 int len0 = s->len[p0][g&255];
310 int limit0 = VLC_BITS - len0;
313 for(b=-16; b<16; b++){
314 int len1 = s->len[p1][b&255];
315 int limit1 = limit0 - len1;
318 code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255];
319 for(r=-16; r<16; r++){
320 int len2 = s->len[2][r&255];
323 len[i] = len0 + len1 + len2;
324 bits[i] = (code << len2) + s->bits[2][r&255];
338 free_vlc(&s->vlc[3]);
339 init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0);
343 static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){
347 init_get_bits(&gb, src, length*8);
350 if(read_len_table(s->len[i], &gb)<0)
352 if(generate_bits_table(s->bits[i], s->len[i])<0){
355 free_vlc(&s->vlc[i]);
356 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
359 generate_joint_tables(s);
361 return (get_bits_count(&gb)+7)/8;
364 static int read_old_huffman_tables(HYuvContext *s){
369 init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8);
370 if(read_len_table(s->len[0], &gb)<0)
372 init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8);
373 if(read_len_table(s->len[1], &gb)<0)
376 for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i];
377 for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i];
379 if(s->bitstream_bpp >= 24){
380 memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t));
381 memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t));
383 memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t));
384 memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t));
387 free_vlc(&s->vlc[i]);
388 init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0);
391 generate_joint_tables(s);
395 av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
400 static av_cold void alloc_temp(HYuvContext *s){
403 if(s->bitstream_bpp<24){
405 s->temp[i]= av_malloc(s->width + 16);
408 s->temp[0]= av_mallocz(4*s->width + 16);
412 static av_cold int common_init(AVCodecContext *avctx){
413 HYuvContext *s = avctx->priv_data;
416 s->flags= avctx->flags;
418 dsputil_init(&s->dsp, avctx);
420 s->width= avctx->width;
421 s->height= avctx->height;
422 assert(s->width>0 && s->height>0);
427 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
428 static av_cold int decode_init(AVCodecContext *avctx)
430 HYuvContext *s = avctx->priv_data;
433 memset(s->vlc, 0, 3*sizeof(VLC));
435 avctx->coded_frame= &s->picture;
436 s->interlaced= s->height > 288;
439 //if(avctx->extradata)
440 // printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
441 if(avctx->extradata_size){
442 if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12)
443 s->version=1; // do such files exist at all?
450 int method, interlace;
452 if (avctx->extradata_size < 4)
455 method= ((uint8_t*)avctx->extradata)[0];
456 s->decorrelate= method&64 ? 1 : 0;
457 s->predictor= method&63;
458 s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
459 if(s->bitstream_bpp==0)
460 s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
461 interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4;
462 s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced;
463 s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0;
465 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0)
468 switch(avctx->bits_per_coded_sample&7){
479 s->decorrelate= avctx->bits_per_coded_sample >= 24;
482 s->predictor= MEDIAN;
486 s->predictor= LEFT; //OLD
490 s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
493 if(read_old_huffman_tables(s) < 0)
497 switch(s->bitstream_bpp){
499 avctx->pix_fmt = PIX_FMT_YUV420P;
503 avctx->pix_fmt = PIX_FMT_YUYV422;
505 avctx->pix_fmt = PIX_FMT_YUV422P;
511 avctx->pix_fmt = PIX_FMT_RGB32;
513 avctx->pix_fmt = PIX_FMT_BGR24;
522 // av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
527 static av_cold int decode_init_thread_copy(AVCodecContext *avctx)
529 HYuvContext *s = avctx->priv_data;
532 avctx->coded_frame= &s->picture;
535 for (i = 0; i < 6; i++)
536 s->vlc[i].table = NULL;
539 if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size) < 0)
542 if(read_old_huffman_tables(s) < 0)
548 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
550 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
551 static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){
559 for(; i<256 && len[i]==val && repeat<255; i++)
562 assert(val < 32 && val >0 && repeat<256 && repeat>0);
565 buf[index++]= repeat;
567 buf[index++]= val | (repeat<<5);
574 static av_cold int encode_init(AVCodecContext *avctx)
576 HYuvContext *s = avctx->priv_data;
581 avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772
582 avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132
585 avctx->coded_frame= &s->picture;
587 switch(avctx->pix_fmt){
588 case PIX_FMT_YUV420P:
589 s->bitstream_bpp= 12;
591 case PIX_FMT_YUV422P:
592 s->bitstream_bpp= 16;
595 s->bitstream_bpp= 24;
598 av_log(avctx, AV_LOG_ERROR, "format not supported\n");
601 avctx->bits_per_coded_sample= s->bitstream_bpp;
602 s->decorrelate= s->bitstream_bpp >= 24;
603 s->predictor= avctx->prediction_method;
604 s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0;
605 if(avctx->context_model==1){
606 s->context= avctx->context_model;
607 if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
608 av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
613 if(avctx->codec->id==CODEC_ID_HUFFYUV){
614 if(avctx->pix_fmt==PIX_FMT_YUV420P){
615 av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
618 if(avctx->context_model){
619 av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
622 if(s->interlaced != ( s->height > 288 ))
623 av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
626 if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){
627 av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
631 ((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6);
632 ((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
633 ((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20;
635 ((uint8_t*)avctx->extradata)[2]|= 0x40;
636 ((uint8_t*)avctx->extradata)[3]= 0;
637 s->avctx->extradata_size= 4;
640 char *p= avctx->stats_in;
650 for(j=0; j<256; j++){
651 s->stats[i][j]+= strtol(p, &next, 0);
652 if(next==p) return -1;
656 if(p[0]==0 || p[1]==0 || p[2]==0) break;
660 for(j=0; j<256; j++){
661 int d= FFMIN(j, 256-j);
663 s->stats[i][j]= 100000000/(d+1);
668 generate_len_table(s->len[i], s->stats[i]);
670 if(generate_bits_table(s->bits[i], s->len[i])<0){
674 s->avctx->extradata_size+=
675 store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]);
680 int pels = s->width*s->height / (i?40:10);
681 for(j=0; j<256; j++){
682 int d= FFMIN(j, 256-j);
683 s->stats[i][j]= pels/(d+1);
692 // printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
700 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
702 /* TODO instead of restarting the read when the code isn't in the first level
703 * of the joint table, jump into the 2nd level of the individual table. */
704 #define READ_2PIX(dst0, dst1, plane1){\
705 uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\
710 dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\
711 dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
715 static void decode_422_bitstream(HYuvContext *s, int count){
720 if(count >= (get_bits_left(&s->gb))/(31*4)){
721 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
722 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
723 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
726 for(i=0; i<count; i++){
727 READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1);
728 READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2);
733 static void decode_gray_bitstream(HYuvContext *s, int count){
738 if(count >= (get_bits_left(&s->gb))/(31*2)){
739 for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){
740 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
743 for(i=0; i<count; i++){
744 READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0);
749 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
750 static int encode_422_bitstream(HYuvContext *s, int offset, int count){
752 const uint8_t *y = s->temp[0] + offset;
753 const uint8_t *u = s->temp[1] + offset/2;
754 const uint8_t *v = s->temp[2] + offset/2;
756 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){
757 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
768 if(s->flags&CODEC_FLAG_PASS1){
769 for(i=0; i<count; i++){
777 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
780 for(i=0; i<count; i++){
783 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
785 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
787 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
789 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
792 for(i=0; i<count; i++){
794 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);
795 put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]);
796 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
797 put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]);
803 static int encode_gray_bitstream(HYuvContext *s, int count){
806 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){
807 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
812 int y0 = s->temp[0][2*i];\
813 int y1 = s->temp[0][2*i+1];
818 put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\
819 put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]);
822 if(s->flags&CODEC_FLAG_PASS1){
823 for(i=0; i<count; i++){
828 if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
832 for(i=0; i<count; i++){
838 for(i=0; i<count; i++){
845 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
847 static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){
849 for(i=0; i<count; i++){
850 int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1);
852 *(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code];
853 }else if(decorrelate){
854 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
855 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G];
856 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G];
858 s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);
859 s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3);
860 s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
863 s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3);
867 static void decode_bgr_bitstream(HYuvContext *s, int count){
869 if(s->bitstream_bpp==24)
870 decode_bgr_1(s, count, 1, 0);
872 decode_bgr_1(s, count, 1, 1);
874 if(s->bitstream_bpp==24)
875 decode_bgr_1(s, count, 0, 0);
877 decode_bgr_1(s, count, 0, 1);
881 static int encode_bgr_bitstream(HYuvContext *s, int count){
884 if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){
885 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
890 int g= s->temp[0][4*i+G];\
891 int b= (s->temp[0][4*i+B] - g) & 0xff;\
892 int r= (s->temp[0][4*i+R] - g) & 0xff;
898 put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
899 put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
900 put_bits(&s->pb, s->len[2][r], s->bits[2][r]);
902 if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
903 for(i=0; i<count; i++){
907 }else if(s->context || (s->flags&CODEC_FLAG_PASS1)){
908 for(i=0; i<count; i++){
914 for(i=0; i<count; i++){
922 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
923 static void draw_slice(HYuvContext *s, int y){
927 if(s->avctx->draw_horiz_band==NULL)
930 h= y - s->last_slice_end;
933 if(s->bitstream_bpp==12){
939 offset[0] = s->picture.linesize[0]*y;
940 offset[1] = s->picture.linesize[1]*cy;
941 offset[2] = s->picture.linesize[2]*cy;
945 s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
947 s->last_slice_end= y + h;
950 static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){
951 const uint8_t *buf = avpkt->data;
952 int buf_size = avpkt->size;
953 HYuvContext *s = avctx->priv_data;
954 const int width= s->width;
955 const int width2= s->width>>1;
956 const int height= s->height;
957 int fake_ystride, fake_ustride, fake_vstride;
958 AVFrame * const p= &s->picture;
961 AVFrame *picture = data;
963 av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
964 if (!s->bitstream_buffer)
965 return AVERROR(ENOMEM);
967 memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
968 s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
971 ff_thread_release_buffer(avctx, p);
974 if(ff_thread_get_buffer(avctx, p) < 0){
975 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
980 table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size);
985 if((unsigned)(buf_size-table_size) >= INT_MAX/8)
988 init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
990 fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0];
991 fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1];
992 fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2];
994 s->last_slice_end= 0;
996 if(s->bitstream_bpp<24){
998 int lefty, leftu, leftv;
999 int lefttopy, lefttopu, lefttopv;
1002 p->data[0][3]= get_bits(&s->gb, 8);
1003 p->data[0][2]= get_bits(&s->gb, 8);
1004 p->data[0][1]= get_bits(&s->gb, 8);
1005 p->data[0][0]= get_bits(&s->gb, 8);
1007 av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
1011 leftv= p->data[2][0]= get_bits(&s->gb, 8);
1012 lefty= p->data[0][1]= get_bits(&s->gb, 8);
1013 leftu= p->data[1][0]= get_bits(&s->gb, 8);
1014 p->data[0][0]= get_bits(&s->gb, 8);
1016 switch(s->predictor){
1019 decode_422_bitstream(s, width-2);
1020 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1021 if(!(s->flags&CODEC_FLAG_GRAY)){
1022 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1023 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1026 for(cy=y=1; y<s->height; y++,cy++){
1027 uint8_t *ydst, *udst, *vdst;
1029 if(s->bitstream_bpp==12){
1030 decode_gray_bitstream(s, width);
1032 ydst= p->data[0] + p->linesize[0]*y;
1034 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1035 if(s->predictor == PLANE){
1037 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1040 if(y>=s->height) break;
1045 ydst= p->data[0] + p->linesize[0]*y;
1046 udst= p->data[1] + p->linesize[1]*cy;
1047 vdst= p->data[2] + p->linesize[2]*cy;
1049 decode_422_bitstream(s, width);
1050 lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
1051 if(!(s->flags&CODEC_FLAG_GRAY)){
1052 leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
1053 leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
1055 if(s->predictor == PLANE){
1056 if(cy>s->interlaced){
1057 s->dsp.add_bytes(ydst, ydst - fake_ystride, width);
1058 if(!(s->flags&CODEC_FLAG_GRAY)){
1059 s->dsp.add_bytes(udst, udst - fake_ustride, width2);
1060 s->dsp.add_bytes(vdst, vdst - fake_vstride, width2);
1065 draw_slice(s, height);
1069 /* first line except first 2 pixels is left predicted */
1070 decode_422_bitstream(s, width-2);
1071 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty);
1072 if(!(s->flags&CODEC_FLAG_GRAY)){
1073 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu);
1074 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv);
1079 /* second line is left predicted for interlaced case */
1081 decode_422_bitstream(s, width);
1082 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty);
1083 if(!(s->flags&CODEC_FLAG_GRAY)){
1084 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu);
1085 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv);
1090 /* next 4 pixels are left predicted too */
1091 decode_422_bitstream(s, 4);
1092 lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty);
1093 if(!(s->flags&CODEC_FLAG_GRAY)){
1094 leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu);
1095 leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv);
1098 /* next line except the first 4 pixels is median predicted */
1099 lefttopy= p->data[0][3];
1100 decode_422_bitstream(s, width-4);
1101 s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy);
1102 if(!(s->flags&CODEC_FLAG_GRAY)){
1103 lefttopu= p->data[1][1];
1104 lefttopv= p->data[2][1];
1105 s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu);
1106 s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv);
1110 for(; y<height; y++,cy++){
1111 uint8_t *ydst, *udst, *vdst;
1113 if(s->bitstream_bpp==12){
1115 decode_gray_bitstream(s, width);
1116 ydst= p->data[0] + p->linesize[0]*y;
1117 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1120 if(y>=height) break;
1124 decode_422_bitstream(s, width);
1126 ydst= p->data[0] + p->linesize[0]*y;
1127 udst= p->data[1] + p->linesize[1]*cy;
1128 vdst= p->data[2] + p->linesize[2]*cy;
1130 s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
1131 if(!(s->flags&CODEC_FLAG_GRAY)){
1132 s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
1133 s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
1137 draw_slice(s, height);
1143 int leftr, leftg, leftb, lefta;
1144 const int last_line= (height-1)*p->linesize[0];
1146 if(s->bitstream_bpp==32){
1147 lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8);
1148 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1149 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1150 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1152 leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8);
1153 leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8);
1154 leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8);
1155 lefta= p->data[0][last_line+A]= 255;
1156 skip_bits(&s->gb, 8);
1160 switch(s->predictor){
1163 decode_bgr_bitstream(s, width-1);
1164 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta);
1166 for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down.
1167 decode_bgr_bitstream(s, width);
1169 s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta);
1170 if(s->predictor == PLANE){
1171 if(s->bitstream_bpp!=32) lefta=0;
1172 if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){
1173 s->dsp.add_bytes(p->data[0] + p->linesize[0]*y,
1174 p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride);
1178 draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
1181 av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
1185 av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
1192 *data_size = sizeof(AVFrame);
1194 return (get_bits_count(&s->gb)+31)/32*4 + table_size;
1196 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1198 static int common_end(HYuvContext *s){
1202 av_freep(&s->temp[i]);
1207 #if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
1208 static av_cold int decode_end(AVCodecContext *avctx)
1210 HYuvContext *s = avctx->priv_data;
1213 if (s->picture.data[0])
1214 avctx->release_buffer(avctx, &s->picture);
1217 av_freep(&s->bitstream_buffer);
1220 free_vlc(&s->vlc[i]);
1225 #endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */
1227 #if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
1228 static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){
1229 HYuvContext *s = avctx->priv_data;
1230 AVFrame *pict = data;
1231 const int width= s->width;
1232 const int width2= s->width>>1;
1233 const int height= s->height;
1234 const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
1235 const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
1236 const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
1237 AVFrame * const p= &s->picture;
1241 p->pict_type= AV_PICTURE_TYPE_I;
1246 generate_len_table(s->len[i], s->stats[i]);
1247 if(generate_bits_table(s->bits[i], s->len[i])<0)
1249 size+= store_table(s, s->len[i], &buf[size]);
1253 for(j=0; j<256; j++)
1254 s->stats[i][j] >>= 1;
1257 init_put_bits(&s->pb, buf+size, buf_size-size);
1259 if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
1260 int lefty, leftu, leftv, y, cy;
1262 put_bits(&s->pb, 8, leftv= p->data[2][0]);
1263 put_bits(&s->pb, 8, lefty= p->data[0][1]);
1264 put_bits(&s->pb, 8, leftu= p->data[1][0]);
1265 put_bits(&s->pb, 8, p->data[0][0]);
1267 lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0);
1268 leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0);
1269 leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0);
1271 encode_422_bitstream(s, 2, width-2);
1273 if(s->predictor==MEDIAN){
1274 int lefttopy, lefttopu, lefttopv;
1277 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty);
1278 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu);
1279 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv);
1281 encode_422_bitstream(s, 0, width);
1285 lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty);
1286 leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu);
1287 leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv);
1289 encode_422_bitstream(s, 0, 4);
1291 lefttopy= p->data[0][3];
1292 lefttopu= p->data[1][1];
1293 lefttopv= p->data[2][1];
1294 s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy);
1295 s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu);
1296 s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv);
1297 encode_422_bitstream(s, 0, width-4);
1300 for(; y<height; y++,cy++){
1301 uint8_t *ydst, *udst, *vdst;
1303 if(s->bitstream_bpp==12){
1305 ydst= p->data[0] + p->linesize[0]*y;
1306 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1307 encode_gray_bitstream(s, width);
1310 if(y>=height) break;
1312 ydst= p->data[0] + p->linesize[0]*y;
1313 udst= p->data[1] + p->linesize[1]*cy;
1314 vdst= p->data[2] + p->linesize[2]*cy;
1316 s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
1317 s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
1318 s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
1320 encode_422_bitstream(s, 0, width);
1323 for(cy=y=1; y<height; y++,cy++){
1324 uint8_t *ydst, *udst, *vdst;
1326 /* encode a luma only line & y++ */
1327 if(s->bitstream_bpp==12){
1328 ydst= p->data[0] + p->linesize[0]*y;
1330 if(s->predictor == PLANE && s->interlaced < y){
1331 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1333 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1335 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1337 encode_gray_bitstream(s, width);
1339 if(y>=height) break;
1342 ydst= p->data[0] + p->linesize[0]*y;
1343 udst= p->data[1] + p->linesize[1]*cy;
1344 vdst= p->data[2] + p->linesize[2]*cy;
1346 if(s->predictor == PLANE && s->interlaced < cy){
1347 s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
1348 s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
1349 s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
1351 lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty);
1352 leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu);
1353 leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv);
1355 lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
1356 leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
1357 leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
1360 encode_422_bitstream(s, 0, width);
1363 }else if(avctx->pix_fmt == PIX_FMT_RGB32){
1364 uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
1365 const int stride = -p->linesize[0];
1366 const int fake_stride = -fake_ystride;
1368 int leftr, leftg, leftb;
1370 put_bits(&s->pb, 8, leftr= data[R]);
1371 put_bits(&s->pb, 8, leftg= data[G]);
1372 put_bits(&s->pb, 8, leftb= data[B]);
1373 put_bits(&s->pb, 8, 0);
1375 sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb);
1376 encode_bgr_bitstream(s, width-1);
1378 for(y=1; y<s->height; y++){
1379 uint8_t *dst = data + y*stride;
1380 if(s->predictor == PLANE && s->interlaced < y){
1381 s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4);
1382 sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
1384 sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
1386 encode_bgr_bitstream(s, width);
1389 av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
1393 size+= (put_bits_count(&s->pb)+31)/8;
1394 put_bits(&s->pb, 16, 0);
1395 put_bits(&s->pb, 15, 0);
1398 if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){
1400 char *p= avctx->stats_out;
1401 char *end= p + 1024*30;
1403 for(j=0; j<256; j++){
1404 snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]);
1408 snprintf(p, end-p, "\n");
1412 avctx->stats_out[0] = '\0';
1413 if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
1414 flush_put_bits(&s->pb);
1415 s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size);
1418 s->picture_number++;
1423 static av_cold int encode_end(AVCodecContext *avctx)
1425 HYuvContext *s = avctx->priv_data;
1429 av_freep(&avctx->extradata);
1430 av_freep(&avctx->stats_out);
1434 #endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */
1436 #if CONFIG_HUFFYUV_DECODER
1437 AVCodec ff_huffyuv_decoder = {
1439 .type = AVMEDIA_TYPE_VIDEO,
1440 .id = CODEC_ID_HUFFYUV,
1441 .priv_data_size = sizeof(HYuvContext),
1442 .init = decode_init,
1443 .close = decode_end,
1444 .decode = decode_frame,
1445 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1446 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1447 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1451 #if CONFIG_FFVHUFF_DECODER
1452 AVCodec ff_ffvhuff_decoder = {
1454 .type = AVMEDIA_TYPE_VIDEO,
1455 .id = CODEC_ID_FFVHUFF,
1456 .priv_data_size = sizeof(HYuvContext),
1457 .init = decode_init,
1458 .close = decode_end,
1459 .decode = decode_frame,
1460 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_FRAME_THREADS,
1461 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
1462 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
1466 #if CONFIG_HUFFYUV_ENCODER
1467 AVCodec ff_huffyuv_encoder = {
1469 .type = AVMEDIA_TYPE_VIDEO,
1470 .id = CODEC_ID_HUFFYUV,
1471 .priv_data_size = sizeof(HYuvContext),
1472 .init = encode_init,
1473 .encode = encode_frame,
1474 .close = encode_end,
1475 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1476 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
1480 #if CONFIG_FFVHUFF_ENCODER
1481 AVCodec ff_ffvhuff_encoder = {
1483 .type = AVMEDIA_TYPE_VIDEO,
1484 .id = CODEC_ID_FFVHUFF,
1485 .priv_data_size = sizeof(HYuvContext),
1486 .init = encode_init,
1487 .encode = encode_frame,
1488 .close = encode_end,
1489 .pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE},
1490 .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),