ffmpeg / libavcodec / huffyuv.c @ d36beb3f
History | View | Annotate | Download (47.7 KB)
1 |
/*
|
---|---|
2 |
* huffyuv codec for libavcodec
|
3 |
*
|
4 |
* Copyright (c) 2002-2003 Michael Niedermayer <michaelni@gmx.at>
|
5 |
*
|
6 |
* see http://www.pcisys.net/~melanson/codecs/huffyuv.txt for a description of
|
7 |
* the algorithm used
|
8 |
*
|
9 |
* This file is part of FFmpeg.
|
10 |
*
|
11 |
* FFmpeg is free software; you can redistribute it and/or
|
12 |
* modify it under the terms of the GNU Lesser General Public
|
13 |
* License as published by the Free Software Foundation; either
|
14 |
* version 2.1 of the License, or (at your option) any later version.
|
15 |
*
|
16 |
* FFmpeg is distributed in the hope that it will be useful,
|
17 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
18 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
19 |
* Lesser General Public License for more details.
|
20 |
*
|
21 |
* You should have received a copy of the GNU Lesser General Public
|
22 |
* License along with FFmpeg; if not, write to the Free Software
|
23 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
24 |
*/
|
25 |
|
26 |
/**
|
27 |
* @file
|
28 |
* huffyuv codec for libavcodec.
|
29 |
*/
|
30 |
|
31 |
#include "avcodec.h" |
32 |
#include "get_bits.h" |
33 |
#include "put_bits.h" |
34 |
#include "dsputil.h" |
35 |
|
36 |
#define VLC_BITS 11 |
37 |
|
38 |
#if HAVE_BIGENDIAN
|
39 |
#define B 3 |
40 |
#define G 2 |
41 |
#define R 1 |
42 |
#define A 0 |
43 |
#else
|
44 |
#define B 0 |
45 |
#define G 1 |
46 |
#define R 2 |
47 |
#define A 3 |
48 |
#endif
|
49 |
|
50 |
typedef enum Predictor{ |
51 |
LEFT= 0,
|
52 |
PLANE, |
53 |
MEDIAN, |
54 |
} Predictor; |
55 |
|
56 |
typedef struct HYuvContext{ |
57 |
AVCodecContext *avctx; |
58 |
Predictor predictor; |
59 |
GetBitContext gb; |
60 |
PutBitContext pb; |
61 |
int interlaced;
|
62 |
int decorrelate;
|
63 |
int bitstream_bpp;
|
64 |
int version;
|
65 |
int yuy2; //use yuy2 instead of 422P |
66 |
int bgr32; //use bgr32 instead of bgr24 |
67 |
int width, height;
|
68 |
int flags;
|
69 |
int context;
|
70 |
int picture_number;
|
71 |
int last_slice_end;
|
72 |
uint8_t *temp[3];
|
73 |
uint64_t stats[3][256]; |
74 |
uint8_t len[3][256]; |
75 |
uint32_t bits[3][256]; |
76 |
uint32_t pix_bgr_map[1<<VLC_BITS];
|
77 |
VLC vlc[6]; //Y,U,V,YY,YU,YV |
78 |
AVFrame picture; |
79 |
uint8_t *bitstream_buffer; |
80 |
unsigned int bitstream_buffer_size; |
81 |
DSPContext dsp; |
82 |
}HYuvContext; |
83 |
|
84 |
static const unsigned char classic_shift_luma[] = { |
85 |
34,36,35,69,135,232,9,16,10,24,11,23,12,16,13,10,14,8,15,8, |
86 |
16,8,17,20,16,10,207,206,205,236,11,8,10,21,9,23,8,8,199,70, |
87 |
69,68, 0 |
88 |
}; |
89 |
|
90 |
static const unsigned char classic_shift_chroma[] = { |
91 |
66,36,37,38,39,40,41,75,76,77,110,239,144,81,82,83,84,85,118,183, |
92 |
56,57,88,89,56,89,154,57,58,57,26,141,57,56,58,57,58,57,184,119, |
93 |
214,245,116,83,82,49,80,79,78,77,44,75,41,40,39,38,37,36,34, 0 |
94 |
}; |
95 |
|
96 |
static const unsigned char classic_add_luma[256] = { |
97 |
3, 9, 5, 12, 10, 35, 32, 29, 27, 50, 48, 45, 44, 41, 39, 37, |
98 |
73, 70, 68, 65, 64, 61, 58, 56, 53, 50, 49, 46, 44, 41, 38, 36, |
99 |
68, 65, 63, 61, 58, 55, 53, 51, 48, 46, 45, 43, 41, 39, 38, 36, |
100 |
35, 33, 32, 30, 29, 27, 26, 25, 48, 47, 46, 44, 43, 41, 40, 39, |
101 |
37, 36, 35, 34, 32, 31, 30, 28, 27, 26, 24, 23, 22, 20, 19, 37, |
102 |
35, 34, 33, 31, 30, 29, 27, 26, 24, 23, 21, 20, 18, 17, 15, 29, |
103 |
27, 26, 24, 22, 21, 19, 17, 16, 14, 26, 25, 23, 21, 19, 18, 16, |
104 |
15, 27, 25, 23, 21, 19, 17, 16, 14, 26, 25, 23, 21, 18, 17, 14, |
105 |
12, 17, 19, 13, 4, 9, 2, 11, 1, 7, 8, 0, 16, 3, 14, 6, |
106 |
12, 10, 5, 15, 18, 11, 10, 13, 15, 16, 19, 20, 22, 24, 27, 15, |
107 |
18, 20, 22, 24, 26, 14, 17, 20, 22, 24, 27, 15, 18, 20, 23, 25, |
108 |
28, 16, 19, 22, 25, 28, 32, 36, 21, 25, 29, 33, 38, 42, 45, 49, |
109 |
28, 31, 34, 37, 40, 42, 44, 47, 49, 50, 52, 54, 56, 57, 59, 60, |
110 |
62, 64, 66, 67, 69, 35, 37, 39, 40, 42, 43, 45, 47, 48, 51, 52, |
111 |
54, 55, 57, 59, 60, 62, 63, 66, 67, 69, 71, 72, 38, 40, 42, 43, |
112 |
46, 47, 49, 51, 26, 28, 30, 31, 33, 34, 18, 19, 11, 13, 7, 8, |
113 |
}; |
114 |
|
115 |
static const unsigned char classic_add_chroma[256] = { |
116 |
3, 1, 2, 2, 2, 2, 3, 3, 7, 5, 7, 5, 8, 6, 11, 9, |
117 |
7, 13, 11, 10, 9, 8, 7, 5, 9, 7, 6, 4, 7, 5, 8, 7, |
118 |
11, 8, 13, 11, 19, 15, 22, 23, 20, 33, 32, 28, 27, 29, 51, 77, |
119 |
43, 45, 76, 81, 46, 82, 75, 55, 56,144, 58, 80, 60, 74,147, 63, |
120 |
143, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, |
121 |
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 27, 30, 21, 22, |
122 |
17, 14, 5, 6,100, 54, 47, 50, 51, 53,106,107,108,109,110,111, |
123 |
112,113,114,115, 4,117,118, 92, 94,121,122, 3,124,103, 2, 1, |
124 |
0,129,130,131,120,119,126,125,136,137,138,139,140,141,142,134, |
125 |
135,132,133,104, 64,101, 62, 57,102, 95, 93, 59, 61, 28, 97, 96, |
126 |
52, 49, 48, 29, 32, 25, 24, 46, 23, 98, 45, 44, 43, 20, 42, 41, |
127 |
19, 18, 99, 40, 15, 39, 38, 16, 13, 12, 11, 37, 10, 9, 8, 36, |
128 |
7,128,127,105,123,116, 35, 34, 33,145, 31, 79, 42,146, 78, 26, |
129 |
83, 48, 49, 50, 44, 47, 26, 31, 30, 18, 17, 19, 21, 24, 25, 13, |
130 |
14, 16, 17, 18, 20, 21, 12, 14, 15, 9, 10, 6, 9, 6, 5, 8, |
131 |
6, 12, 8, 10, 7, 9, 6, 4, 6, 2, 2, 3, 3, 3, 3, 2, |
132 |
}; |
133 |
|
134 |
static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int left){ |
135 |
int i;
|
136 |
if(w<32){ |
137 |
for(i=0; i<w; i++){ |
138 |
const int temp= src[i]; |
139 |
dst[i]= temp - left; |
140 |
left= temp; |
141 |
} |
142 |
return left;
|
143 |
}else{
|
144 |
for(i=0; i<16; i++){ |
145 |
const int temp= src[i]; |
146 |
dst[i]= temp - left; |
147 |
left= temp; |
148 |
} |
149 |
s->dsp.diff_bytes(dst+16, src+16, src+15, w-16); |
150 |
return src[w-1]; |
151 |
} |
152 |
} |
153 |
|
154 |
static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst, uint8_t *src, int w, int *red, int *green, int *blue){ |
155 |
int i;
|
156 |
int r,g,b;
|
157 |
r= *red; |
158 |
g= *green; |
159 |
b= *blue; |
160 |
for(i=0; i<FFMIN(w,4); i++){ |
161 |
const int rt= src[i*4+R]; |
162 |
const int gt= src[i*4+G]; |
163 |
const int bt= src[i*4+B]; |
164 |
dst[i*4+R]= rt - r;
|
165 |
dst[i*4+G]= gt - g;
|
166 |
dst[i*4+B]= bt - b;
|
167 |
r = rt; |
168 |
g = gt; |
169 |
b = bt; |
170 |
} |
171 |
s->dsp.diff_bytes(dst+16, src+16, src+12, w*4-16); |
172 |
*red= src[(w-1)*4+R]; |
173 |
*green= src[(w-1)*4+G]; |
174 |
*blue= src[(w-1)*4+B]; |
175 |
} |
176 |
|
177 |
static int read_len_table(uint8_t *dst, GetBitContext *gb){ |
178 |
int i, val, repeat;
|
179 |
|
180 |
for(i=0; i<256;){ |
181 |
repeat= get_bits(gb, 3);
|
182 |
val = get_bits(gb, 5);
|
183 |
if(repeat==0) |
184 |
repeat= get_bits(gb, 8);
|
185 |
//printf("%d %d\n", val, repeat);
|
186 |
if(i+repeat > 256) { |
187 |
av_log(NULL, AV_LOG_ERROR, "Error reading huffman table\n"); |
188 |
return -1; |
189 |
} |
190 |
while (repeat--)
|
191 |
dst[i++] = val; |
192 |
} |
193 |
return 0; |
194 |
} |
195 |
|
196 |
static int generate_bits_table(uint32_t *dst, const uint8_t *len_table){ |
197 |
int len, index;
|
198 |
uint32_t bits=0;
|
199 |
|
200 |
for(len=32; len>0; len--){ |
201 |
for(index=0; index<256; index++){ |
202 |
if(len_table[index]==len)
|
203 |
dst[index]= bits++; |
204 |
} |
205 |
if(bits & 1){ |
206 |
av_log(NULL, AV_LOG_ERROR, "Error generating huffman table\n"); |
207 |
return -1; |
208 |
} |
209 |
bits >>= 1;
|
210 |
} |
211 |
return 0; |
212 |
} |
213 |
|
214 |
#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
|
215 |
typedef struct { |
216 |
uint64_t val; |
217 |
int name;
|
218 |
} HeapElem; |
219 |
|
220 |
static void heap_sift(HeapElem *h, int root, int size) |
221 |
{ |
222 |
while(root*2+1 < size) { |
223 |
int child = root*2+1; |
224 |
if(child < size-1 && h[child].val > h[child+1].val) |
225 |
child++; |
226 |
if(h[root].val > h[child].val) {
|
227 |
FFSWAP(HeapElem, h[root], h[child]); |
228 |
root = child; |
229 |
} else
|
230 |
break;
|
231 |
} |
232 |
} |
233 |
|
234 |
static void generate_len_table(uint8_t *dst, const uint64_t *stats){ |
235 |
HeapElem h[256];
|
236 |
int up[2*256]; |
237 |
int len[2*256]; |
238 |
int offset, i, next;
|
239 |
int size = 256; |
240 |
|
241 |
for(offset=1; ; offset<<=1){ |
242 |
for(i=0; i<size; i++){ |
243 |
h[i].name = i; |
244 |
h[i].val = (stats[i] << 8) + offset;
|
245 |
} |
246 |
for(i=size/2-1; i>=0; i--) |
247 |
heap_sift(h, i, size); |
248 |
|
249 |
for(next=size; next<size*2-1; next++){ |
250 |
// merge the two smallest entries, and put it back in the heap
|
251 |
uint64_t min1v = h[0].val;
|
252 |
up[h[0].name] = next;
|
253 |
h[0].val = INT64_MAX;
|
254 |
heap_sift(h, 0, size);
|
255 |
up[h[0].name] = next;
|
256 |
h[0].name = next;
|
257 |
h[0].val += min1v;
|
258 |
heap_sift(h, 0, size);
|
259 |
} |
260 |
|
261 |
len[2*size-2] = 0; |
262 |
for(i=2*size-3; i>=size; i--) |
263 |
len[i] = len[up[i]] + 1;
|
264 |
for(i=0; i<size; i++) { |
265 |
dst[i] = len[up[i]] + 1;
|
266 |
if(dst[i] >= 32) break; |
267 |
} |
268 |
if(i==size) break; |
269 |
} |
270 |
} |
271 |
#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ |
272 |
|
273 |
static void generate_joint_tables(HYuvContext *s){ |
274 |
uint16_t symbols[1<<VLC_BITS];
|
275 |
uint16_t bits[1<<VLC_BITS];
|
276 |
uint8_t len[1<<VLC_BITS];
|
277 |
if(s->bitstream_bpp < 24){ |
278 |
int p, i, y, u;
|
279 |
for(p=0; p<3; p++){ |
280 |
for(i=y=0; y<256; y++){ |
281 |
int len0 = s->len[0][y]; |
282 |
int limit = VLC_BITS - len0;
|
283 |
if(limit <= 0) |
284 |
continue;
|
285 |
for(u=0; u<256; u++){ |
286 |
int len1 = s->len[p][u];
|
287 |
if(len1 > limit)
|
288 |
continue;
|
289 |
len[i] = len0 + len1; |
290 |
bits[i] = (s->bits[0][y] << len1) + s->bits[p][u];
|
291 |
symbols[i] = (y<<8) + u;
|
292 |
if(symbols[i] != 0xffff) // reserved to mean "invalid" |
293 |
i++; |
294 |
} |
295 |
} |
296 |
free_vlc(&s->vlc[3+p]);
|
297 |
init_vlc_sparse(&s->vlc[3+p], VLC_BITS, i, len, 1, 1, bits, 2, 2, symbols, 2, 2, 0); |
298 |
} |
299 |
}else{
|
300 |
uint8_t (*map)[4] = (uint8_t(*)[4])s->pix_bgr_map; |
301 |
int i, b, g, r, code;
|
302 |
int p0 = s->decorrelate;
|
303 |
int p1 = !s->decorrelate;
|
304 |
// restrict the range to +/-16 becaues that's pretty much guaranteed to
|
305 |
// cover all the combinations that fit in 11 bits total, and it doesn't
|
306 |
// matter if we miss a few rare codes.
|
307 |
for(i=0, g=-16; g<16; g++){ |
308 |
int len0 = s->len[p0][g&255]; |
309 |
int limit0 = VLC_BITS - len0;
|
310 |
if(limit0 < 2) |
311 |
continue;
|
312 |
for(b=-16; b<16; b++){ |
313 |
int len1 = s->len[p1][b&255]; |
314 |
int limit1 = limit0 - len1;
|
315 |
if(limit1 < 1) |
316 |
continue;
|
317 |
code = (s->bits[p0][g&255] << len1) + s->bits[p1][b&255]; |
318 |
for(r=-16; r<16; r++){ |
319 |
int len2 = s->len[2][r&255]; |
320 |
if(len2 > limit1)
|
321 |
continue;
|
322 |
len[i] = len0 + len1 + len2; |
323 |
bits[i] = (code << len2) + s->bits[2][r&255]; |
324 |
if(s->decorrelate){
|
325 |
map[i][G] = g; |
326 |
map[i][B] = g+b; |
327 |
map[i][R] = g+r; |
328 |
}else{
|
329 |
map[i][B] = g; |
330 |
map[i][G] = b; |
331 |
map[i][R] = r; |
332 |
} |
333 |
i++; |
334 |
} |
335 |
} |
336 |
} |
337 |
free_vlc(&s->vlc[3]);
|
338 |
init_vlc(&s->vlc[3], VLC_BITS, i, len, 1, 1, bits, 2, 2, 0); |
339 |
} |
340 |
} |
341 |
|
342 |
static int read_huffman_tables(HYuvContext *s, const uint8_t *src, int length){ |
343 |
GetBitContext gb; |
344 |
int i;
|
345 |
|
346 |
init_get_bits(&gb, src, length*8);
|
347 |
|
348 |
for(i=0; i<3; i++){ |
349 |
if(read_len_table(s->len[i], &gb)<0) |
350 |
return -1; |
351 |
if(generate_bits_table(s->bits[i], s->len[i])<0){ |
352 |
return -1; |
353 |
} |
354 |
#if 0
|
355 |
for(j=0; j<256; j++){
|
356 |
printf("%6X, %2d, %3d\n", s->bits[i][j], s->len[i][j], j);
|
357 |
}
|
358 |
#endif
|
359 |
free_vlc(&s->vlc[i]); |
360 |
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); |
361 |
} |
362 |
|
363 |
generate_joint_tables(s); |
364 |
|
365 |
return (get_bits_count(&gb)+7)/8; |
366 |
} |
367 |
|
368 |
static int read_old_huffman_tables(HYuvContext *s){ |
369 |
#if 1 |
370 |
GetBitContext gb; |
371 |
int i;
|
372 |
|
373 |
init_get_bits(&gb, classic_shift_luma, sizeof(classic_shift_luma)*8); |
374 |
if(read_len_table(s->len[0], &gb)<0) |
375 |
return -1; |
376 |
init_get_bits(&gb, classic_shift_chroma, sizeof(classic_shift_chroma)*8); |
377 |
if(read_len_table(s->len[1], &gb)<0) |
378 |
return -1; |
379 |
|
380 |
for(i=0; i<256; i++) s->bits[0][i] = classic_add_luma [i]; |
381 |
for(i=0; i<256; i++) s->bits[1][i] = classic_add_chroma[i]; |
382 |
|
383 |
if(s->bitstream_bpp >= 24){ |
384 |
memcpy(s->bits[1], s->bits[0], 256*sizeof(uint32_t)); |
385 |
memcpy(s->len[1] , s->len [0], 256*sizeof(uint8_t)); |
386 |
} |
387 |
memcpy(s->bits[2], s->bits[1], 256*sizeof(uint32_t)); |
388 |
memcpy(s->len[2] , s->len [1], 256*sizeof(uint8_t)); |
389 |
|
390 |
for(i=0; i<3; i++){ |
391 |
free_vlc(&s->vlc[i]); |
392 |
init_vlc(&s->vlc[i], VLC_BITS, 256, s->len[i], 1, 1, s->bits[i], 4, 4, 0); |
393 |
} |
394 |
|
395 |
generate_joint_tables(s); |
396 |
|
397 |
return 0; |
398 |
#else
|
399 |
av_log(s->avctx, AV_LOG_DEBUG, "v1 huffyuv is not supported \n");
|
400 |
return -1; |
401 |
#endif
|
402 |
} |
403 |
|
404 |
static av_cold void alloc_temp(HYuvContext *s){ |
405 |
int i;
|
406 |
|
407 |
if(s->bitstream_bpp<24){ |
408 |
for(i=0; i<3; i++){ |
409 |
s->temp[i]= av_malloc(s->width + 16);
|
410 |
} |
411 |
}else{
|
412 |
s->temp[0]= av_mallocz(4*s->width + 16); |
413 |
} |
414 |
} |
415 |
|
416 |
static av_cold int common_init(AVCodecContext *avctx){ |
417 |
HYuvContext *s = avctx->priv_data; |
418 |
|
419 |
s->avctx= avctx; |
420 |
s->flags= avctx->flags; |
421 |
|
422 |
dsputil_init(&s->dsp, avctx); |
423 |
|
424 |
s->width= avctx->width; |
425 |
s->height= avctx->height; |
426 |
assert(s->width>0 && s->height>0); |
427 |
|
428 |
return 0; |
429 |
} |
430 |
|
431 |
#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
|
432 |
static av_cold int decode_init(AVCodecContext *avctx) |
433 |
{ |
434 |
HYuvContext *s = avctx->priv_data; |
435 |
|
436 |
common_init(avctx); |
437 |
memset(s->vlc, 0, 3*sizeof(VLC)); |
438 |
|
439 |
avctx->coded_frame= &s->picture; |
440 |
s->interlaced= s->height > 288;
|
441 |
|
442 |
s->bgr32=1;
|
443 |
//if(avctx->extradata)
|
444 |
// printf("extradata:%X, extradata_size:%d\n", *(uint32_t*)avctx->extradata, avctx->extradata_size);
|
445 |
if(avctx->extradata_size){
|
446 |
if((avctx->bits_per_coded_sample&7) && avctx->bits_per_coded_sample != 12) |
447 |
s->version=1; // do such files exist at all? |
448 |
else
|
449 |
s->version=2;
|
450 |
}else
|
451 |
s->version=0;
|
452 |
|
453 |
if(s->version==2){ |
454 |
int method, interlace;
|
455 |
|
456 |
if (avctx->extradata_size < 4) |
457 |
return -1; |
458 |
|
459 |
method= ((uint8_t*)avctx->extradata)[0];
|
460 |
s->decorrelate= method&64 ? 1 : 0; |
461 |
s->predictor= method&63;
|
462 |
s->bitstream_bpp= ((uint8_t*)avctx->extradata)[1];
|
463 |
if(s->bitstream_bpp==0) |
464 |
s->bitstream_bpp= avctx->bits_per_coded_sample&~7;
|
465 |
interlace= (((uint8_t*)avctx->extradata)[2] & 0x30) >> 4; |
466 |
s->interlaced= (interlace==1) ? 1 : (interlace==2) ? 0 : s->interlaced; |
467 |
s->context= ((uint8_t*)avctx->extradata)[2] & 0x40 ? 1 : 0; |
468 |
|
469 |
if(read_huffman_tables(s, ((uint8_t*)avctx->extradata)+4, avctx->extradata_size-4) < 0) |
470 |
return -1; |
471 |
}else{
|
472 |
switch(avctx->bits_per_coded_sample&7){ |
473 |
case 1: |
474 |
s->predictor= LEFT; |
475 |
s->decorrelate= 0;
|
476 |
break;
|
477 |
case 2: |
478 |
s->predictor= LEFT; |
479 |
s->decorrelate= 1;
|
480 |
break;
|
481 |
case 3: |
482 |
s->predictor= PLANE; |
483 |
s->decorrelate= avctx->bits_per_coded_sample >= 24;
|
484 |
break;
|
485 |
case 4: |
486 |
s->predictor= MEDIAN; |
487 |
s->decorrelate= 0;
|
488 |
break;
|
489 |
default:
|
490 |
s->predictor= LEFT; //OLD
|
491 |
s->decorrelate= 0;
|
492 |
break;
|
493 |
} |
494 |
s->bitstream_bpp= avctx->bits_per_coded_sample & ~7;
|
495 |
s->context= 0;
|
496 |
|
497 |
if(read_old_huffman_tables(s) < 0) |
498 |
return -1; |
499 |
} |
500 |
|
501 |
switch(s->bitstream_bpp){
|
502 |
case 12: |
503 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
504 |
break;
|
505 |
case 16: |
506 |
if(s->yuy2){
|
507 |
avctx->pix_fmt = PIX_FMT_YUYV422; |
508 |
}else{
|
509 |
avctx->pix_fmt = PIX_FMT_YUV422P; |
510 |
} |
511 |
break;
|
512 |
case 24: |
513 |
case 32: |
514 |
if(s->bgr32){
|
515 |
avctx->pix_fmt = PIX_FMT_RGB32; |
516 |
}else{
|
517 |
avctx->pix_fmt = PIX_FMT_BGR24; |
518 |
} |
519 |
break;
|
520 |
default:
|
521 |
assert(0);
|
522 |
} |
523 |
|
524 |
alloc_temp(s); |
525 |
|
526 |
// av_log(NULL, AV_LOG_DEBUG, "pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
|
527 |
|
528 |
return 0; |
529 |
} |
530 |
#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ |
531 |
|
532 |
#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
|
533 |
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf){ |
534 |
int i;
|
535 |
int index= 0; |
536 |
|
537 |
for(i=0; i<256;){ |
538 |
int val= len[i];
|
539 |
int repeat=0; |
540 |
|
541 |
for(; i<256 && len[i]==val && repeat<255; i++) |
542 |
repeat++; |
543 |
|
544 |
assert(val < 32 && val >0 && repeat<256 && repeat>0); |
545 |
if(repeat>7){ |
546 |
buf[index++]= val; |
547 |
buf[index++]= repeat; |
548 |
}else{
|
549 |
buf[index++]= val | (repeat<<5);
|
550 |
} |
551 |
} |
552 |
|
553 |
return index;
|
554 |
} |
555 |
|
556 |
static av_cold int encode_init(AVCodecContext *avctx) |
557 |
{ |
558 |
HYuvContext *s = avctx->priv_data; |
559 |
int i, j;
|
560 |
|
561 |
common_init(avctx); |
562 |
|
563 |
avctx->extradata= av_mallocz(1024*30); // 256*3+4 == 772 |
564 |
avctx->stats_out= av_mallocz(1024*30); // 21*256*3(%llu ) + 3(\n) + 1(0) = 16132 |
565 |
s->version=2;
|
566 |
|
567 |
avctx->coded_frame= &s->picture; |
568 |
|
569 |
switch(avctx->pix_fmt){
|
570 |
case PIX_FMT_YUV420P:
|
571 |
s->bitstream_bpp= 12;
|
572 |
break;
|
573 |
case PIX_FMT_YUV422P:
|
574 |
s->bitstream_bpp= 16;
|
575 |
break;
|
576 |
case PIX_FMT_RGB32:
|
577 |
s->bitstream_bpp= 24;
|
578 |
break;
|
579 |
default:
|
580 |
av_log(avctx, AV_LOG_ERROR, "format not supported\n");
|
581 |
return -1; |
582 |
} |
583 |
avctx->bits_per_coded_sample= s->bitstream_bpp; |
584 |
s->decorrelate= s->bitstream_bpp >= 24;
|
585 |
s->predictor= avctx->prediction_method; |
586 |
s->interlaced= avctx->flags&CODEC_FLAG_INTERLACED_ME ? 1 : 0; |
587 |
if(avctx->context_model==1){ |
588 |
s->context= avctx->context_model; |
589 |
if(s->flags & (CODEC_FLAG_PASS1|CODEC_FLAG_PASS2)){
|
590 |
av_log(avctx, AV_LOG_ERROR, "context=1 is not compatible with 2 pass huffyuv encoding\n");
|
591 |
return -1; |
592 |
} |
593 |
}else s->context= 0; |
594 |
|
595 |
if(avctx->codec->id==CODEC_ID_HUFFYUV){
|
596 |
if(avctx->pix_fmt==PIX_FMT_YUV420P){
|
597 |
av_log(avctx, AV_LOG_ERROR, "Error: YV12 is not supported by huffyuv; use vcodec=ffvhuff or format=422p\n");
|
598 |
return -1; |
599 |
} |
600 |
if(avctx->context_model){
|
601 |
av_log(avctx, AV_LOG_ERROR, "Error: per-frame huffman tables are not supported by huffyuv; use vcodec=ffvhuff\n");
|
602 |
return -1; |
603 |
} |
604 |
if(s->interlaced != ( s->height > 288 )) |
605 |
av_log(avctx, AV_LOG_INFO, "using huffyuv 2.2.0 or newer interlacing flag\n");
|
606 |
} |
607 |
|
608 |
if(s->bitstream_bpp>=24 && s->predictor==MEDIAN){ |
609 |
av_log(avctx, AV_LOG_ERROR, "Error: RGB is incompatible with median predictor\n");
|
610 |
return -1; |
611 |
} |
612 |
|
613 |
((uint8_t*)avctx->extradata)[0]= s->predictor | (s->decorrelate << 6); |
614 |
((uint8_t*)avctx->extradata)[1]= s->bitstream_bpp;
|
615 |
((uint8_t*)avctx->extradata)[2]= s->interlaced ? 0x10 : 0x20; |
616 |
if(s->context)
|
617 |
((uint8_t*)avctx->extradata)[2]|= 0x40; |
618 |
((uint8_t*)avctx->extradata)[3]= 0; |
619 |
s->avctx->extradata_size= 4;
|
620 |
|
621 |
if(avctx->stats_in){
|
622 |
char *p= avctx->stats_in;
|
623 |
|
624 |
for(i=0; i<3; i++) |
625 |
for(j=0; j<256; j++) |
626 |
s->stats[i][j]= 1;
|
627 |
|
628 |
for(;;){
|
629 |
for(i=0; i<3; i++){ |
630 |
char *next;
|
631 |
|
632 |
for(j=0; j<256; j++){ |
633 |
s->stats[i][j]+= strtol(p, &next, 0);
|
634 |
if(next==p) return -1; |
635 |
p=next; |
636 |
} |
637 |
} |
638 |
if(p[0]==0 || p[1]==0 || p[2]==0) break; |
639 |
} |
640 |
}else{
|
641 |
for(i=0; i<3; i++) |
642 |
for(j=0; j<256; j++){ |
643 |
int d= FFMIN(j, 256-j); |
644 |
|
645 |
s->stats[i][j]= 100000000/(d+1); |
646 |
} |
647 |
} |
648 |
|
649 |
for(i=0; i<3; i++){ |
650 |
generate_len_table(s->len[i], s->stats[i]); |
651 |
|
652 |
if(generate_bits_table(s->bits[i], s->len[i])<0){ |
653 |
return -1; |
654 |
} |
655 |
|
656 |
s->avctx->extradata_size+= |
657 |
store_table(s, s->len[i], &((uint8_t*)s->avctx->extradata)[s->avctx->extradata_size]); |
658 |
} |
659 |
|
660 |
if(s->context){
|
661 |
for(i=0; i<3; i++){ |
662 |
int pels = s->width*s->height / (i?40:10); |
663 |
for(j=0; j<256; j++){ |
664 |
int d= FFMIN(j, 256-j); |
665 |
s->stats[i][j]= pels/(d+1);
|
666 |
} |
667 |
} |
668 |
}else{
|
669 |
for(i=0; i<3; i++) |
670 |
for(j=0; j<256; j++) |
671 |
s->stats[i][j]= 0;
|
672 |
} |
673 |
|
674 |
// printf("pred:%d bpp:%d hbpp:%d il:%d\n", s->predictor, s->bitstream_bpp, avctx->bits_per_coded_sample, s->interlaced);
|
675 |
|
676 |
alloc_temp(s); |
677 |
|
678 |
s->picture_number=0;
|
679 |
|
680 |
return 0; |
681 |
} |
682 |
#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ |
683 |
|
684 |
/* TODO instead of restarting the read when the code isn't in the first level
|
685 |
* of the joint table, jump into the 2nd level of the individual table. */
|
686 |
#define READ_2PIX(dst0, dst1, plane1){\
|
687 |
uint16_t code = get_vlc2(&s->gb, s->vlc[3+plane1].table, VLC_BITS, 1);\ |
688 |
if(code != 0xffff){\ |
689 |
dst0 = code>>8;\
|
690 |
dst1 = code;\ |
691 |
}else{\
|
692 |
dst0 = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3);\ |
693 |
dst1 = get_vlc2(&s->gb, s->vlc[plane1].table, VLC_BITS, 3);\
|
694 |
}\ |
695 |
} |
696 |
|
697 |
static void decode_422_bitstream(HYuvContext *s, int count){ |
698 |
int i;
|
699 |
|
700 |
count/=2;
|
701 |
|
702 |
if(count >= (get_bits_left(&s->gb))/(31*4)){ |
703 |
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ |
704 |
READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); |
705 |
READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); |
706 |
} |
707 |
}else{
|
708 |
for(i=0; i<count; i++){ |
709 |
READ_2PIX(s->temp[0][2*i ], s->temp[1][i], 1); |
710 |
READ_2PIX(s->temp[0][2*i+1], s->temp[2][i], 2); |
711 |
} |
712 |
} |
713 |
} |
714 |
|
715 |
static void decode_gray_bitstream(HYuvContext *s, int count){ |
716 |
int i;
|
717 |
|
718 |
count/=2;
|
719 |
|
720 |
if(count >= (get_bits_left(&s->gb))/(31*2)){ |
721 |
for(i=0; i<count && get_bits_count(&s->gb) < s->gb.size_in_bits; i++){ |
722 |
READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); |
723 |
} |
724 |
}else{
|
725 |
for(i=0; i<count; i++){ |
726 |
READ_2PIX(s->temp[0][2*i ], s->temp[0][2*i+1], 0); |
727 |
} |
728 |
} |
729 |
} |
730 |
|
731 |
#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
|
732 |
static int encode_422_bitstream(HYuvContext *s, int offset, int count){ |
733 |
int i;
|
734 |
const uint8_t *y = s->temp[0] + offset; |
735 |
const uint8_t *u = s->temp[1] + offset/2; |
736 |
const uint8_t *v = s->temp[2] + offset/2; |
737 |
|
738 |
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 2*4*count){ |
739 |
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
740 |
return -1; |
741 |
} |
742 |
|
743 |
#define LOAD4\
|
744 |
int y0 = y[2*i];\ |
745 |
int y1 = y[2*i+1];\ |
746 |
int u0 = u[i];\
|
747 |
int v0 = v[i];
|
748 |
|
749 |
count/=2;
|
750 |
if(s->flags&CODEC_FLAG_PASS1){
|
751 |
for(i=0; i<count; i++){ |
752 |
LOAD4; |
753 |
s->stats[0][y0]++;
|
754 |
s->stats[1][u0]++;
|
755 |
s->stats[0][y1]++;
|
756 |
s->stats[2][v0]++;
|
757 |
} |
758 |
} |
759 |
if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
|
760 |
return 0; |
761 |
if(s->context){
|
762 |
for(i=0; i<count; i++){ |
763 |
LOAD4; |
764 |
s->stats[0][y0]++;
|
765 |
put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); |
766 |
s->stats[1][u0]++;
|
767 |
put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); |
768 |
s->stats[0][y1]++;
|
769 |
put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); |
770 |
s->stats[2][v0]++;
|
771 |
put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); |
772 |
} |
773 |
}else{
|
774 |
for(i=0; i<count; i++){ |
775 |
LOAD4; |
776 |
put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]); |
777 |
put_bits(&s->pb, s->len[1][u0], s->bits[1][u0]); |
778 |
put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); |
779 |
put_bits(&s->pb, s->len[2][v0], s->bits[2][v0]); |
780 |
} |
781 |
} |
782 |
return 0; |
783 |
} |
784 |
|
785 |
static int encode_gray_bitstream(HYuvContext *s, int count){ |
786 |
int i;
|
787 |
|
788 |
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*count){ |
789 |
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
790 |
return -1; |
791 |
} |
792 |
|
793 |
#define LOAD2\
|
794 |
int y0 = s->temp[0][2*i];\ |
795 |
int y1 = s->temp[0][2*i+1]; |
796 |
#define STAT2\
|
797 |
s->stats[0][y0]++;\
|
798 |
s->stats[0][y1]++;
|
799 |
#define WRITE2\
|
800 |
put_bits(&s->pb, s->len[0][y0], s->bits[0][y0]);\ |
801 |
put_bits(&s->pb, s->len[0][y1], s->bits[0][y1]); |
802 |
|
803 |
count/=2;
|
804 |
if(s->flags&CODEC_FLAG_PASS1){
|
805 |
for(i=0; i<count; i++){ |
806 |
LOAD2; |
807 |
STAT2; |
808 |
} |
809 |
} |
810 |
if(s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)
|
811 |
return 0; |
812 |
|
813 |
if(s->context){
|
814 |
for(i=0; i<count; i++){ |
815 |
LOAD2; |
816 |
STAT2; |
817 |
WRITE2; |
818 |
} |
819 |
}else{
|
820 |
for(i=0; i<count; i++){ |
821 |
LOAD2; |
822 |
WRITE2; |
823 |
} |
824 |
} |
825 |
return 0; |
826 |
} |
827 |
#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ |
828 |
|
829 |
static av_always_inline void decode_bgr_1(HYuvContext *s, int count, int decorrelate, int alpha){ |
830 |
int i;
|
831 |
for(i=0; i<count; i++){ |
832 |
int code = get_vlc2(&s->gb, s->vlc[3].table, VLC_BITS, 1); |
833 |
if(code != -1){ |
834 |
*(uint32_t*)&s->temp[0][4*i] = s->pix_bgr_map[code]; |
835 |
}else if(decorrelate){ |
836 |
s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); |
837 |
s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3) + s->temp[0][4*i+G]; |
838 |
s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3) + s->temp[0][4*i+G]; |
839 |
}else{
|
840 |
s->temp[0][4*i+B] = get_vlc2(&s->gb, s->vlc[0].table, VLC_BITS, 3); |
841 |
s->temp[0][4*i+G] = get_vlc2(&s->gb, s->vlc[1].table, VLC_BITS, 3); |
842 |
s->temp[0][4*i+R] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); |
843 |
} |
844 |
if(alpha)
|
845 |
s->temp[0][4*i+A] = get_vlc2(&s->gb, s->vlc[2].table, VLC_BITS, 3); |
846 |
} |
847 |
} |
848 |
|
849 |
static void decode_bgr_bitstream(HYuvContext *s, int count){ |
850 |
if(s->decorrelate){
|
851 |
if(s->bitstream_bpp==24) |
852 |
decode_bgr_1(s, count, 1, 0); |
853 |
else
|
854 |
decode_bgr_1(s, count, 1, 1); |
855 |
}else{
|
856 |
if(s->bitstream_bpp==24) |
857 |
decode_bgr_1(s, count, 0, 0); |
858 |
else
|
859 |
decode_bgr_1(s, count, 0, 1); |
860 |
} |
861 |
} |
862 |
|
863 |
static int encode_bgr_bitstream(HYuvContext *s, int count){ |
864 |
int i;
|
865 |
|
866 |
if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3*4*count){ |
867 |
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
868 |
return -1; |
869 |
} |
870 |
|
871 |
#define LOAD3\
|
872 |
int g= s->temp[0][4*i+G];\ |
873 |
int b= (s->temp[0][4*i+B] - g) & 0xff;\ |
874 |
int r= (s->temp[0][4*i+R] - g) & 0xff; |
875 |
#define STAT3\
|
876 |
s->stats[0][b]++;\
|
877 |
s->stats[1][g]++;\
|
878 |
s->stats[2][r]++;
|
879 |
#define WRITE3\
|
880 |
put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\ |
881 |
put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\ |
882 |
put_bits(&s->pb, s->len[2][r], s->bits[2][r]); |
883 |
|
884 |
if((s->flags&CODEC_FLAG_PASS1) && (s->avctx->flags2&CODEC_FLAG2_NO_OUTPUT)){
|
885 |
for(i=0; i<count; i++){ |
886 |
LOAD3; |
887 |
STAT3; |
888 |
} |
889 |
}else if(s->context || (s->flags&CODEC_FLAG_PASS1)){ |
890 |
for(i=0; i<count; i++){ |
891 |
LOAD3; |
892 |
STAT3; |
893 |
WRITE3; |
894 |
} |
895 |
}else{
|
896 |
for(i=0; i<count; i++){ |
897 |
LOAD3; |
898 |
WRITE3; |
899 |
} |
900 |
} |
901 |
return 0; |
902 |
} |
903 |
|
904 |
#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
|
905 |
static void draw_slice(HYuvContext *s, int y){ |
906 |
int h, cy;
|
907 |
int offset[4]; |
908 |
|
909 |
if(s->avctx->draw_horiz_band==NULL) |
910 |
return;
|
911 |
|
912 |
h= y - s->last_slice_end; |
913 |
y -= h; |
914 |
|
915 |
if(s->bitstream_bpp==12){ |
916 |
cy= y>>1;
|
917 |
}else{
|
918 |
cy= y; |
919 |
} |
920 |
|
921 |
offset[0] = s->picture.linesize[0]*y; |
922 |
offset[1] = s->picture.linesize[1]*cy; |
923 |
offset[2] = s->picture.linesize[2]*cy; |
924 |
offset[3] = 0; |
925 |
emms_c(); |
926 |
|
927 |
s->avctx->draw_horiz_band(s->avctx, &s->picture, offset, y, 3, h);
|
928 |
|
929 |
s->last_slice_end= y + h; |
930 |
} |
931 |
|
932 |
static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *avpkt){ |
933 |
const uint8_t *buf = avpkt->data;
|
934 |
int buf_size = avpkt->size;
|
935 |
HYuvContext *s = avctx->priv_data; |
936 |
const int width= s->width; |
937 |
const int width2= s->width>>1; |
938 |
const int height= s->height; |
939 |
int fake_ystride, fake_ustride, fake_vstride;
|
940 |
AVFrame * const p= &s->picture;
|
941 |
int table_size= 0; |
942 |
|
943 |
AVFrame *picture = data; |
944 |
|
945 |
av_fast_malloc(&s->bitstream_buffer, &s->bitstream_buffer_size, buf_size + FF_INPUT_BUFFER_PADDING_SIZE); |
946 |
if (!s->bitstream_buffer)
|
947 |
return AVERROR(ENOMEM);
|
948 |
|
949 |
memset(s->bitstream_buffer + buf_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
950 |
s->dsp.bswap_buf((uint32_t*)s->bitstream_buffer, (const uint32_t*)buf, buf_size/4); |
951 |
|
952 |
if(p->data[0]) |
953 |
avctx->release_buffer(avctx, p); |
954 |
|
955 |
p->reference= 0;
|
956 |
if(avctx->get_buffer(avctx, p) < 0){ |
957 |
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
958 |
return -1; |
959 |
} |
960 |
|
961 |
if(s->context){
|
962 |
table_size = read_huffman_tables(s, s->bitstream_buffer, buf_size); |
963 |
if(table_size < 0) |
964 |
return -1; |
965 |
} |
966 |
|
967 |
if((unsigned)(buf_size-table_size) >= INT_MAX/8) |
968 |
return -1; |
969 |
|
970 |
init_get_bits(&s->gb, s->bitstream_buffer+table_size, (buf_size-table_size)*8);
|
971 |
|
972 |
fake_ystride= s->interlaced ? p->linesize[0]*2 : p->linesize[0]; |
973 |
fake_ustride= s->interlaced ? p->linesize[1]*2 : p->linesize[1]; |
974 |
fake_vstride= s->interlaced ? p->linesize[2]*2 : p->linesize[2]; |
975 |
|
976 |
s->last_slice_end= 0;
|
977 |
|
978 |
if(s->bitstream_bpp<24){ |
979 |
int y, cy;
|
980 |
int lefty, leftu, leftv;
|
981 |
int lefttopy, lefttopu, lefttopv;
|
982 |
|
983 |
if(s->yuy2){
|
984 |
p->data[0][3]= get_bits(&s->gb, 8); |
985 |
p->data[0][2]= get_bits(&s->gb, 8); |
986 |
p->data[0][1]= get_bits(&s->gb, 8); |
987 |
p->data[0][0]= get_bits(&s->gb, 8); |
988 |
|
989 |
av_log(avctx, AV_LOG_ERROR, "YUY2 output is not implemented yet\n");
|
990 |
return -1; |
991 |
}else{
|
992 |
|
993 |
leftv= p->data[2][0]= get_bits(&s->gb, 8); |
994 |
lefty= p->data[0][1]= get_bits(&s->gb, 8); |
995 |
leftu= p->data[1][0]= get_bits(&s->gb, 8); |
996 |
p->data[0][0]= get_bits(&s->gb, 8); |
997 |
|
998 |
switch(s->predictor){
|
999 |
case LEFT:
|
1000 |
case PLANE:
|
1001 |
decode_422_bitstream(s, width-2);
|
1002 |
lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); |
1003 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1004 |
leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); |
1005 |
leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); |
1006 |
} |
1007 |
|
1008 |
for(cy=y=1; y<s->height; y++,cy++){ |
1009 |
uint8_t *ydst, *udst, *vdst; |
1010 |
|
1011 |
if(s->bitstream_bpp==12){ |
1012 |
decode_gray_bitstream(s, width); |
1013 |
|
1014 |
ydst= p->data[0] + p->linesize[0]*y; |
1015 |
|
1016 |
lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
|
1017 |
if(s->predictor == PLANE){
|
1018 |
if(y>s->interlaced)
|
1019 |
s->dsp.add_bytes(ydst, ydst - fake_ystride, width); |
1020 |
} |
1021 |
y++; |
1022 |
if(y>=s->height) break; |
1023 |
} |
1024 |
|
1025 |
draw_slice(s, y); |
1026 |
|
1027 |
ydst= p->data[0] + p->linesize[0]*y; |
1028 |
udst= p->data[1] + p->linesize[1]*cy; |
1029 |
vdst= p->data[2] + p->linesize[2]*cy; |
1030 |
|
1031 |
decode_422_bitstream(s, width); |
1032 |
lefty= s->dsp.add_hfyu_left_prediction(ydst, s->temp[0], width, lefty);
|
1033 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1034 |
leftu= s->dsp.add_hfyu_left_prediction(udst, s->temp[1], width2, leftu);
|
1035 |
leftv= s->dsp.add_hfyu_left_prediction(vdst, s->temp[2], width2, leftv);
|
1036 |
} |
1037 |
if(s->predictor == PLANE){
|
1038 |
if(cy>s->interlaced){
|
1039 |
s->dsp.add_bytes(ydst, ydst - fake_ystride, width); |
1040 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1041 |
s->dsp.add_bytes(udst, udst - fake_ustride, width2); |
1042 |
s->dsp.add_bytes(vdst, vdst - fake_vstride, width2); |
1043 |
} |
1044 |
} |
1045 |
} |
1046 |
} |
1047 |
draw_slice(s, height); |
1048 |
|
1049 |
break;
|
1050 |
case MEDIAN:
|
1051 |
/* first line except first 2 pixels is left predicted */
|
1052 |
decode_422_bitstream(s, width-2);
|
1053 |
lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + 2, s->temp[0], width-2, lefty); |
1054 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1055 |
leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + 1, s->temp[1], width2-1, leftu); |
1056 |
leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + 1, s->temp[2], width2-1, leftv); |
1057 |
} |
1058 |
|
1059 |
cy=y=1;
|
1060 |
|
1061 |
/* second line is left predicted for interlaced case */
|
1062 |
if(s->interlaced){
|
1063 |
decode_422_bitstream(s, width); |
1064 |
lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + p->linesize[0], s->temp[0], width, lefty); |
1065 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1066 |
leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + p->linesize[2], s->temp[1], width2, leftu); |
1067 |
leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + p->linesize[1], s->temp[2], width2, leftv); |
1068 |
} |
1069 |
y++; cy++; |
1070 |
} |
1071 |
|
1072 |
/* next 4 pixels are left predicted too */
|
1073 |
decode_422_bitstream(s, 4);
|
1074 |
lefty= s->dsp.add_hfyu_left_prediction(p->data[0] + fake_ystride, s->temp[0], 4, lefty); |
1075 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1076 |
leftu= s->dsp.add_hfyu_left_prediction(p->data[1] + fake_ustride, s->temp[1], 2, leftu); |
1077 |
leftv= s->dsp.add_hfyu_left_prediction(p->data[2] + fake_vstride, s->temp[2], 2, leftv); |
1078 |
} |
1079 |
|
1080 |
/* next line except the first 4 pixels is median predicted */
|
1081 |
lefttopy= p->data[0][3]; |
1082 |
decode_422_bitstream(s, width-4);
|
1083 |
s->dsp.add_hfyu_median_prediction(p->data[0] + fake_ystride+4, p->data[0]+4, s->temp[0], width-4, &lefty, &lefttopy); |
1084 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1085 |
lefttopu= p->data[1][1]; |
1086 |
lefttopv= p->data[2][1]; |
1087 |
s->dsp.add_hfyu_median_prediction(p->data[1] + fake_ustride+2, p->data[1]+2, s->temp[1], width2-2, &leftu, &lefttopu); |
1088 |
s->dsp.add_hfyu_median_prediction(p->data[2] + fake_vstride+2, p->data[2]+2, s->temp[2], width2-2, &leftv, &lefttopv); |
1089 |
} |
1090 |
y++; cy++; |
1091 |
|
1092 |
for(; y<height; y++,cy++){
|
1093 |
uint8_t *ydst, *udst, *vdst; |
1094 |
|
1095 |
if(s->bitstream_bpp==12){ |
1096 |
while(2*cy > y){ |
1097 |
decode_gray_bitstream(s, width); |
1098 |
ydst= p->data[0] + p->linesize[0]*y; |
1099 |
s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
|
1100 |
y++; |
1101 |
} |
1102 |
if(y>=height) break; |
1103 |
} |
1104 |
draw_slice(s, y); |
1105 |
|
1106 |
decode_422_bitstream(s, width); |
1107 |
|
1108 |
ydst= p->data[0] + p->linesize[0]*y; |
1109 |
udst= p->data[1] + p->linesize[1]*cy; |
1110 |
vdst= p->data[2] + p->linesize[2]*cy; |
1111 |
|
1112 |
s->dsp.add_hfyu_median_prediction(ydst, ydst - fake_ystride, s->temp[0], width, &lefty, &lefttopy);
|
1113 |
if(!(s->flags&CODEC_FLAG_GRAY)){
|
1114 |
s->dsp.add_hfyu_median_prediction(udst, udst - fake_ustride, s->temp[1], width2, &leftu, &lefttopu);
|
1115 |
s->dsp.add_hfyu_median_prediction(vdst, vdst - fake_vstride, s->temp[2], width2, &leftv, &lefttopv);
|
1116 |
} |
1117 |
} |
1118 |
|
1119 |
draw_slice(s, height); |
1120 |
break;
|
1121 |
} |
1122 |
} |
1123 |
}else{
|
1124 |
int y;
|
1125 |
int leftr, leftg, leftb, lefta;
|
1126 |
const int last_line= (height-1)*p->linesize[0]; |
1127 |
|
1128 |
if(s->bitstream_bpp==32){ |
1129 |
lefta= p->data[0][last_line+A]= get_bits(&s->gb, 8); |
1130 |
leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); |
1131 |
leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); |
1132 |
leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); |
1133 |
}else{
|
1134 |
leftr= p->data[0][last_line+R]= get_bits(&s->gb, 8); |
1135 |
leftg= p->data[0][last_line+G]= get_bits(&s->gb, 8); |
1136 |
leftb= p->data[0][last_line+B]= get_bits(&s->gb, 8); |
1137 |
lefta= p->data[0][last_line+A]= 255; |
1138 |
skip_bits(&s->gb, 8);
|
1139 |
} |
1140 |
|
1141 |
if(s->bgr32){
|
1142 |
switch(s->predictor){
|
1143 |
case LEFT:
|
1144 |
case PLANE:
|
1145 |
decode_bgr_bitstream(s, width-1);
|
1146 |
s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + last_line+4, s->temp[0], width-1, &leftr, &leftg, &leftb, &lefta); |
1147 |
|
1148 |
for(y=s->height-2; y>=0; y--){ //Yes it is stored upside down. |
1149 |
decode_bgr_bitstream(s, width); |
1150 |
|
1151 |
s->dsp.add_hfyu_left_prediction_bgr32(p->data[0] + p->linesize[0]*y, s->temp[0], width, &leftr, &leftg, &leftb, &lefta); |
1152 |
if(s->predictor == PLANE){
|
1153 |
if(s->bitstream_bpp!=32) lefta=0; |
1154 |
if((y&s->interlaced)==0 && y<s->height-1-s->interlaced){ |
1155 |
s->dsp.add_bytes(p->data[0] + p->linesize[0]*y, |
1156 |
p->data[0] + p->linesize[0]*y + fake_ystride, fake_ystride); |
1157 |
} |
1158 |
} |
1159 |
} |
1160 |
draw_slice(s, height); // just 1 large slice as this is not possible in reverse order
|
1161 |
break;
|
1162 |
default:
|
1163 |
av_log(avctx, AV_LOG_ERROR, "prediction type not supported!\n");
|
1164 |
} |
1165 |
}else{
|
1166 |
|
1167 |
av_log(avctx, AV_LOG_ERROR, "BGR24 output is not implemented yet\n");
|
1168 |
return -1; |
1169 |
} |
1170 |
} |
1171 |
emms_c(); |
1172 |
|
1173 |
*picture= *p; |
1174 |
*data_size = sizeof(AVFrame);
|
1175 |
|
1176 |
return (get_bits_count(&s->gb)+31)/32*4 + table_size; |
1177 |
} |
1178 |
#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ |
1179 |
|
1180 |
static int common_end(HYuvContext *s){ |
1181 |
int i;
|
1182 |
|
1183 |
for(i=0; i<3; i++){ |
1184 |
av_freep(&s->temp[i]); |
1185 |
} |
1186 |
return 0; |
1187 |
} |
1188 |
|
1189 |
#if CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER
|
1190 |
static av_cold int decode_end(AVCodecContext *avctx) |
1191 |
{ |
1192 |
HYuvContext *s = avctx->priv_data; |
1193 |
int i;
|
1194 |
|
1195 |
if (s->picture.data[0]) |
1196 |
avctx->release_buffer(avctx, &s->picture); |
1197 |
|
1198 |
common_end(s); |
1199 |
av_freep(&s->bitstream_buffer); |
1200 |
|
1201 |
for(i=0; i<6; i++){ |
1202 |
free_vlc(&s->vlc[i]); |
1203 |
} |
1204 |
|
1205 |
return 0; |
1206 |
} |
1207 |
#endif /* CONFIG_HUFFYUV_DECODER || CONFIG_FFVHUFF_DECODER */ |
1208 |
|
1209 |
#if CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER
|
1210 |
static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data){ |
1211 |
HYuvContext *s = avctx->priv_data; |
1212 |
AVFrame *pict = data; |
1213 |
const int width= s->width; |
1214 |
const int width2= s->width>>1; |
1215 |
const int height= s->height; |
1216 |
const int fake_ystride= s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; |
1217 |
const int fake_ustride= s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; |
1218 |
const int fake_vstride= s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; |
1219 |
AVFrame * const p= &s->picture;
|
1220 |
int i, j, size=0; |
1221 |
|
1222 |
*p = *pict; |
1223 |
p->pict_type= FF_I_TYPE; |
1224 |
p->key_frame= 1;
|
1225 |
|
1226 |
if(s->context){
|
1227 |
for(i=0; i<3; i++){ |
1228 |
generate_len_table(s->len[i], s->stats[i]); |
1229 |
if(generate_bits_table(s->bits[i], s->len[i])<0) |
1230 |
return -1; |
1231 |
size+= store_table(s, s->len[i], &buf[size]); |
1232 |
} |
1233 |
|
1234 |
for(i=0; i<3; i++) |
1235 |
for(j=0; j<256; j++) |
1236 |
s->stats[i][j] >>= 1;
|
1237 |
} |
1238 |
|
1239 |
init_put_bits(&s->pb, buf+size, buf_size-size); |
1240 |
|
1241 |
if(avctx->pix_fmt == PIX_FMT_YUV422P || avctx->pix_fmt == PIX_FMT_YUV420P){
|
1242 |
int lefty, leftu, leftv, y, cy;
|
1243 |
|
1244 |
put_bits(&s->pb, 8, leftv= p->data[2][0]); |
1245 |
put_bits(&s->pb, 8, lefty= p->data[0][1]); |
1246 |
put_bits(&s->pb, 8, leftu= p->data[1][0]); |
1247 |
put_bits(&s->pb, 8, p->data[0][0]); |
1248 |
|
1249 |
lefty= sub_left_prediction(s, s->temp[0], p->data[0], width , 0); |
1250 |
leftu= sub_left_prediction(s, s->temp[1], p->data[1], width2, 0); |
1251 |
leftv= sub_left_prediction(s, s->temp[2], p->data[2], width2, 0); |
1252 |
|
1253 |
encode_422_bitstream(s, 2, width-2); |
1254 |
|
1255 |
if(s->predictor==MEDIAN){
|
1256 |
int lefttopy, lefttopu, lefttopv;
|
1257 |
cy=y=1;
|
1258 |
if(s->interlaced){
|
1259 |
lefty= sub_left_prediction(s, s->temp[0], p->data[0]+p->linesize[0], width , lefty); |
1260 |
leftu= sub_left_prediction(s, s->temp[1], p->data[1]+p->linesize[1], width2, leftu); |
1261 |
leftv= sub_left_prediction(s, s->temp[2], p->data[2]+p->linesize[2], width2, leftv); |
1262 |
|
1263 |
encode_422_bitstream(s, 0, width);
|
1264 |
y++; cy++; |
1265 |
} |
1266 |
|
1267 |
lefty= sub_left_prediction(s, s->temp[0], p->data[0]+fake_ystride, 4, lefty); |
1268 |
leftu= sub_left_prediction(s, s->temp[1], p->data[1]+fake_ustride, 2, leftu); |
1269 |
leftv= sub_left_prediction(s, s->temp[2], p->data[2]+fake_vstride, 2, leftv); |
1270 |
|
1271 |
encode_422_bitstream(s, 0, 4); |
1272 |
|
1273 |
lefttopy= p->data[0][3]; |
1274 |
lefttopu= p->data[1][1]; |
1275 |
lefttopv= p->data[2][1]; |
1276 |
s->dsp.sub_hfyu_median_prediction(s->temp[0], p->data[0]+4, p->data[0] + fake_ystride+4, width-4 , &lefty, &lefttopy); |
1277 |
s->dsp.sub_hfyu_median_prediction(s->temp[1], p->data[1]+2, p->data[1] + fake_ustride+2, width2-2, &leftu, &lefttopu); |
1278 |
s->dsp.sub_hfyu_median_prediction(s->temp[2], p->data[2]+2, p->data[2] + fake_vstride+2, width2-2, &leftv, &lefttopv); |
1279 |
encode_422_bitstream(s, 0, width-4); |
1280 |
y++; cy++; |
1281 |
|
1282 |
for(; y<height; y++,cy++){
|
1283 |
uint8_t *ydst, *udst, *vdst; |
1284 |
|
1285 |
if(s->bitstream_bpp==12){ |
1286 |
while(2*cy > y){ |
1287 |
ydst= p->data[0] + p->linesize[0]*y; |
1288 |
s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
|
1289 |
encode_gray_bitstream(s, width); |
1290 |
y++; |
1291 |
} |
1292 |
if(y>=height) break; |
1293 |
} |
1294 |
ydst= p->data[0] + p->linesize[0]*y; |
1295 |
udst= p->data[1] + p->linesize[1]*cy; |
1296 |
vdst= p->data[2] + p->linesize[2]*cy; |
1297 |
|
1298 |
s->dsp.sub_hfyu_median_prediction(s->temp[0], ydst - fake_ystride, ydst, width , &lefty, &lefttopy);
|
1299 |
s->dsp.sub_hfyu_median_prediction(s->temp[1], udst - fake_ustride, udst, width2, &leftu, &lefttopu);
|
1300 |
s->dsp.sub_hfyu_median_prediction(s->temp[2], vdst - fake_vstride, vdst, width2, &leftv, &lefttopv);
|
1301 |
|
1302 |
encode_422_bitstream(s, 0, width);
|
1303 |
} |
1304 |
}else{
|
1305 |
for(cy=y=1; y<height; y++,cy++){ |
1306 |
uint8_t *ydst, *udst, *vdst; |
1307 |
|
1308 |
/* encode a luma only line & y++ */
|
1309 |
if(s->bitstream_bpp==12){ |
1310 |
ydst= p->data[0] + p->linesize[0]*y; |
1311 |
|
1312 |
if(s->predictor == PLANE && s->interlaced < y){
|
1313 |
s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
|
1314 |
|
1315 |
lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); |
1316 |
}else{
|
1317 |
lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
|
1318 |
} |
1319 |
encode_gray_bitstream(s, width); |
1320 |
y++; |
1321 |
if(y>=height) break; |
1322 |
} |
1323 |
|
1324 |
ydst= p->data[0] + p->linesize[0]*y; |
1325 |
udst= p->data[1] + p->linesize[1]*cy; |
1326 |
vdst= p->data[2] + p->linesize[2]*cy; |
1327 |
|
1328 |
if(s->predictor == PLANE && s->interlaced < cy){
|
1329 |
s->dsp.diff_bytes(s->temp[1], ydst, ydst - fake_ystride, width);
|
1330 |
s->dsp.diff_bytes(s->temp[2], udst, udst - fake_ustride, width2);
|
1331 |
s->dsp.diff_bytes(s->temp[2] + width2, vdst, vdst - fake_vstride, width2);
|
1332 |
|
1333 |
lefty= sub_left_prediction(s, s->temp[0], s->temp[1], width , lefty); |
1334 |
leftu= sub_left_prediction(s, s->temp[1], s->temp[2], width2, leftu); |
1335 |
leftv= sub_left_prediction(s, s->temp[2], s->temp[2] + width2, width2, leftv); |
1336 |
}else{
|
1337 |
lefty= sub_left_prediction(s, s->temp[0], ydst, width , lefty);
|
1338 |
leftu= sub_left_prediction(s, s->temp[1], udst, width2, leftu);
|
1339 |
leftv= sub_left_prediction(s, s->temp[2], vdst, width2, leftv);
|
1340 |
} |
1341 |
|
1342 |
encode_422_bitstream(s, 0, width);
|
1343 |
} |
1344 |
} |
1345 |
}else if(avctx->pix_fmt == PIX_FMT_RGB32){ |
1346 |
uint8_t *data = p->data[0] + (height-1)*p->linesize[0]; |
1347 |
const int stride = -p->linesize[0]; |
1348 |
const int fake_stride = -fake_ystride; |
1349 |
int y;
|
1350 |
int leftr, leftg, leftb;
|
1351 |
|
1352 |
put_bits(&s->pb, 8, leftr= data[R]);
|
1353 |
put_bits(&s->pb, 8, leftg= data[G]);
|
1354 |
put_bits(&s->pb, 8, leftb= data[B]);
|
1355 |
put_bits(&s->pb, 8, 0); |
1356 |
|
1357 |
sub_left_prediction_bgr32(s, s->temp[0], data+4, width-1, &leftr, &leftg, &leftb); |
1358 |
encode_bgr_bitstream(s, width-1);
|
1359 |
|
1360 |
for(y=1; y<s->height; y++){ |
1361 |
uint8_t *dst = data + y*stride; |
1362 |
if(s->predictor == PLANE && s->interlaced < y){
|
1363 |
s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*4); |
1364 |
sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb); |
1365 |
}else{
|
1366 |
sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
|
1367 |
} |
1368 |
encode_bgr_bitstream(s, width); |
1369 |
} |
1370 |
}else{
|
1371 |
av_log(avctx, AV_LOG_ERROR, "Format not supported!\n");
|
1372 |
} |
1373 |
emms_c(); |
1374 |
|
1375 |
size+= (put_bits_count(&s->pb)+31)/8; |
1376 |
put_bits(&s->pb, 16, 0); |
1377 |
put_bits(&s->pb, 15, 0); |
1378 |
size/= 4;
|
1379 |
|
1380 |
if((s->flags&CODEC_FLAG_PASS1) && (s->picture_number&31)==0){ |
1381 |
int j;
|
1382 |
char *p= avctx->stats_out;
|
1383 |
char *end= p + 1024*30; |
1384 |
for(i=0; i<3; i++){ |
1385 |
for(j=0; j<256; j++){ |
1386 |
snprintf(p, end-p, "%"PRIu64" ", s->stats[i][j]); |
1387 |
p+= strlen(p); |
1388 |
s->stats[i][j]= 0;
|
1389 |
} |
1390 |
snprintf(p, end-p, "\n");
|
1391 |
p++; |
1392 |
} |
1393 |
} else
|
1394 |
avctx->stats_out[0] = '\0'; |
1395 |
if(!(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)){
|
1396 |
flush_put_bits(&s->pb); |
1397 |
s->dsp.bswap_buf((uint32_t*)buf, (uint32_t*)buf, size); |
1398 |
} |
1399 |
|
1400 |
s->picture_number++; |
1401 |
|
1402 |
return size*4; |
1403 |
} |
1404 |
|
1405 |
static av_cold int encode_end(AVCodecContext *avctx) |
1406 |
{ |
1407 |
HYuvContext *s = avctx->priv_data; |
1408 |
|
1409 |
common_end(s); |
1410 |
|
1411 |
av_freep(&avctx->extradata); |
1412 |
av_freep(&avctx->stats_out); |
1413 |
|
1414 |
return 0; |
1415 |
} |
1416 |
#endif /* CONFIG_HUFFYUV_ENCODER || CONFIG_FFVHUFF_ENCODER */ |
1417 |
|
1418 |
#if CONFIG_HUFFYUV_DECODER
|
1419 |
AVCodec ff_huffyuv_decoder = { |
1420 |
"huffyuv",
|
1421 |
AVMEDIA_TYPE_VIDEO, |
1422 |
CODEC_ID_HUFFYUV, |
1423 |
sizeof(HYuvContext),
|
1424 |
decode_init, |
1425 |
NULL,
|
1426 |
decode_end, |
1427 |
decode_frame, |
1428 |
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, |
1429 |
NULL,
|
1430 |
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
|
1431 |
}; |
1432 |
#endif
|
1433 |
|
1434 |
#if CONFIG_FFVHUFF_DECODER
|
1435 |
AVCodec ff_ffvhuff_decoder = { |
1436 |
"ffvhuff",
|
1437 |
AVMEDIA_TYPE_VIDEO, |
1438 |
CODEC_ID_FFVHUFF, |
1439 |
sizeof(HYuvContext),
|
1440 |
decode_init, |
1441 |
NULL,
|
1442 |
decode_end, |
1443 |
decode_frame, |
1444 |
CODEC_CAP_DR1 | CODEC_CAP_DRAW_HORIZ_BAND, |
1445 |
NULL,
|
1446 |
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
|
1447 |
}; |
1448 |
#endif
|
1449 |
|
1450 |
#if CONFIG_HUFFYUV_ENCODER
|
1451 |
AVCodec ff_huffyuv_encoder = { |
1452 |
"huffyuv",
|
1453 |
AVMEDIA_TYPE_VIDEO, |
1454 |
CODEC_ID_HUFFYUV, |
1455 |
sizeof(HYuvContext),
|
1456 |
encode_init, |
1457 |
encode_frame, |
1458 |
encode_end, |
1459 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, |
1460 |
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
|
1461 |
}; |
1462 |
#endif
|
1463 |
|
1464 |
#if CONFIG_FFVHUFF_ENCODER
|
1465 |
AVCodec ff_ffvhuff_encoder = { |
1466 |
"ffvhuff",
|
1467 |
AVMEDIA_TYPE_VIDEO, |
1468 |
CODEC_ID_FFVHUFF, |
1469 |
sizeof(HYuvContext),
|
1470 |
encode_init, |
1471 |
encode_frame, |
1472 |
encode_end, |
1473 |
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_RGB32, PIX_FMT_NONE}, |
1474 |
.long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
|
1475 |
}; |
1476 |
#endif
|