ffmpeg / libavcodec / pnm.c @ 5509bffa
History | View | Annotate | Download (16.1 KB)
1 |
/*
|
---|---|
2 |
* PNM image format
|
3 |
* Copyright (c) 2002, 2003 Fabrice Bellard.
|
4 |
*
|
5 |
* This library is free software; you can redistribute it and/or
|
6 |
* modify it under the terms of the GNU Lesser General Public
|
7 |
* License as published by the Free Software Foundation; either
|
8 |
* version 2 of the License, or (at your option) any later version.
|
9 |
*
|
10 |
* This library is distributed in the hope that it will be useful,
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
13 |
* Lesser General Public License for more details.
|
14 |
*
|
15 |
* You should have received a copy of the GNU Lesser General Public
|
16 |
* License along with this library; if not, write to the Free Software
|
17 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
18 |
*/
|
19 |
#include "avcodec.h" |
20 |
#include "mpegvideo.h" //only for ParseContext |
21 |
|
22 |
typedef struct PNMContext { |
23 |
uint8_t *bytestream; |
24 |
uint8_t *bytestream_start; |
25 |
uint8_t *bytestream_end; |
26 |
AVFrame picture; |
27 |
} PNMContext; |
28 |
|
29 |
static inline int pnm_space(int c) |
30 |
{ |
31 |
return (c == ' ' || c == '\n' || c == '\r' || c == '\t'); |
32 |
} |
33 |
|
34 |
static void pnm_get(PNMContext *sc, char *str, int buf_size) |
35 |
{ |
36 |
char *s;
|
37 |
int c;
|
38 |
|
39 |
/* skip spaces and comments */
|
40 |
for(;;) {
|
41 |
c = *sc->bytestream++; |
42 |
if (c == '#') { |
43 |
do {
|
44 |
c = *sc->bytestream++; |
45 |
} while (c != '\n' && sc->bytestream < sc->bytestream_end); |
46 |
} else if (!pnm_space(c)) { |
47 |
break;
|
48 |
} |
49 |
} |
50 |
|
51 |
s = str; |
52 |
while (sc->bytestream < sc->bytestream_end && !pnm_space(c)) {
|
53 |
if ((s - str) < buf_size - 1) |
54 |
*s++ = c; |
55 |
c = *sc->bytestream++; |
56 |
} |
57 |
*s = '\0';
|
58 |
} |
59 |
|
60 |
static int common_init(AVCodecContext *avctx){ |
61 |
PNMContext *s = avctx->priv_data; |
62 |
|
63 |
avcodec_get_frame_defaults((AVFrame*)&s->picture); |
64 |
avctx->coded_frame= (AVFrame*)&s->picture; |
65 |
|
66 |
return 0; |
67 |
} |
68 |
|
69 |
static int pnm_decode_header(AVCodecContext *avctx, PNMContext * const s){ |
70 |
char buf1[32], tuple_type[32]; |
71 |
int h, w, depth, maxval;;
|
72 |
|
73 |
pnm_get(s, buf1, sizeof(buf1));
|
74 |
if (!strcmp(buf1, "P4")) { |
75 |
avctx->pix_fmt = PIX_FMT_MONOWHITE; |
76 |
} else if (!strcmp(buf1, "P5")) { |
77 |
if (avctx->codec_id == CODEC_ID_PGMYUV)
|
78 |
avctx->pix_fmt = PIX_FMT_YUV420P; |
79 |
else
|
80 |
avctx->pix_fmt = PIX_FMT_GRAY8; |
81 |
} else if (!strcmp(buf1, "P6")) { |
82 |
avctx->pix_fmt = PIX_FMT_RGB24; |
83 |
} else if (!strcmp(buf1, "P7")) { |
84 |
w = -1;
|
85 |
h = -1;
|
86 |
maxval = -1;
|
87 |
depth = -1;
|
88 |
tuple_type[0] = '\0'; |
89 |
for(;;) {
|
90 |
pnm_get(s, buf1, sizeof(buf1));
|
91 |
if (!strcmp(buf1, "WIDTH")) { |
92 |
pnm_get(s, buf1, sizeof(buf1));
|
93 |
w = strtol(buf1, NULL, 10); |
94 |
} else if (!strcmp(buf1, "HEIGHT")) { |
95 |
pnm_get(s, buf1, sizeof(buf1));
|
96 |
h = strtol(buf1, NULL, 10); |
97 |
} else if (!strcmp(buf1, "DEPTH")) { |
98 |
pnm_get(s, buf1, sizeof(buf1));
|
99 |
depth = strtol(buf1, NULL, 10); |
100 |
} else if (!strcmp(buf1, "MAXVAL")) { |
101 |
pnm_get(s, buf1, sizeof(buf1));
|
102 |
maxval = strtol(buf1, NULL, 10); |
103 |
} else if (!strcmp(buf1, "TUPLETYPE")) { |
104 |
pnm_get(s, tuple_type, sizeof(tuple_type));
|
105 |
} else if (!strcmp(buf1, "ENDHDR")) { |
106 |
break;
|
107 |
} else {
|
108 |
return -1; |
109 |
} |
110 |
} |
111 |
/* check that all tags are present */
|
112 |
if (w <= 0 || h <= 0 || maxval <= 0 || depth <= 0 || tuple_type[0] == '\0' || avcodec_check_dimensions(avctx, w, h)) |
113 |
return -1; |
114 |
|
115 |
avctx->width = w; |
116 |
avctx->height = h; |
117 |
if (depth == 1) { |
118 |
if (maxval == 1) |
119 |
avctx->pix_fmt = PIX_FMT_MONOWHITE; |
120 |
else
|
121 |
avctx->pix_fmt = PIX_FMT_GRAY8; |
122 |
} else if (depth == 3) { |
123 |
avctx->pix_fmt = PIX_FMT_RGB24; |
124 |
} else if (depth == 4) { |
125 |
avctx->pix_fmt = PIX_FMT_RGBA32; |
126 |
} else {
|
127 |
return -1; |
128 |
} |
129 |
return 0; |
130 |
} else {
|
131 |
return -1; |
132 |
} |
133 |
pnm_get(s, buf1, sizeof(buf1));
|
134 |
avctx->width = atoi(buf1); |
135 |
if (avctx->width <= 0) |
136 |
return -1; |
137 |
pnm_get(s, buf1, sizeof(buf1));
|
138 |
avctx->height = atoi(buf1); |
139 |
if(avcodec_check_dimensions(avctx, avctx->width, avctx->height))
|
140 |
return -1; |
141 |
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
|
142 |
pnm_get(s, buf1, sizeof(buf1));
|
143 |
} |
144 |
|
145 |
/* more check if YUV420 */
|
146 |
if (avctx->pix_fmt == PIX_FMT_YUV420P) {
|
147 |
if ((avctx->width & 1) != 0) |
148 |
return -1; |
149 |
h = (avctx->height * 2);
|
150 |
if ((h % 3) != 0) |
151 |
return -1; |
152 |
h /= 3;
|
153 |
avctx->height = h; |
154 |
} |
155 |
return 0; |
156 |
} |
157 |
|
158 |
static int pnm_decode_frame(AVCodecContext *avctx, |
159 |
void *data, int *data_size, |
160 |
uint8_t *buf, int buf_size)
|
161 |
{ |
162 |
PNMContext * const s = avctx->priv_data;
|
163 |
AVFrame *picture = data; |
164 |
AVFrame * const p= (AVFrame*)&s->picture;
|
165 |
int i, n, linesize, h;
|
166 |
unsigned char *ptr; |
167 |
|
168 |
s->bytestream_start= |
169 |
s->bytestream= buf; |
170 |
s->bytestream_end= buf + buf_size; |
171 |
|
172 |
if(pnm_decode_header(avctx, s) < 0) |
173 |
return -1; |
174 |
|
175 |
if(p->data[0]) |
176 |
avctx->release_buffer(avctx, p); |
177 |
|
178 |
p->reference= 0;
|
179 |
if(avctx->get_buffer(avctx, p) < 0){ |
180 |
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
181 |
return -1; |
182 |
} |
183 |
p->pict_type= FF_I_TYPE; |
184 |
p->key_frame= 1;
|
185 |
|
186 |
switch(avctx->pix_fmt) {
|
187 |
default:
|
188 |
return -1; |
189 |
case PIX_FMT_RGB24:
|
190 |
n = avctx->width * 3;
|
191 |
goto do_read;
|
192 |
case PIX_FMT_GRAY8:
|
193 |
n = avctx->width; |
194 |
goto do_read;
|
195 |
case PIX_FMT_MONOWHITE:
|
196 |
case PIX_FMT_MONOBLACK:
|
197 |
n = (avctx->width + 7) >> 3; |
198 |
do_read:
|
199 |
ptr = p->data[0];
|
200 |
linesize = p->linesize[0];
|
201 |
if(s->bytestream + n*avctx->height > s->bytestream_end)
|
202 |
return -1; |
203 |
for(i = 0; i < avctx->height; i++) { |
204 |
memcpy(ptr, s->bytestream, n); |
205 |
s->bytestream += n; |
206 |
ptr += linesize; |
207 |
} |
208 |
break;
|
209 |
case PIX_FMT_YUV420P:
|
210 |
{ |
211 |
unsigned char *ptr1, *ptr2; |
212 |
|
213 |
n = avctx->width; |
214 |
ptr = p->data[0];
|
215 |
linesize = p->linesize[0];
|
216 |
if(s->bytestream + n*avctx->height*3/2 > s->bytestream_end) |
217 |
return -1; |
218 |
for(i = 0; i < avctx->height; i++) { |
219 |
memcpy(ptr, s->bytestream, n); |
220 |
s->bytestream += n; |
221 |
ptr += linesize; |
222 |
} |
223 |
ptr1 = p->data[1];
|
224 |
ptr2 = p->data[2];
|
225 |
n >>= 1;
|
226 |
h = avctx->height >> 1;
|
227 |
for(i = 0; i < h; i++) { |
228 |
memcpy(ptr1, s->bytestream, n); |
229 |
s->bytestream += n; |
230 |
memcpy(ptr2, s->bytestream, n); |
231 |
s->bytestream += n; |
232 |
ptr1 += p->linesize[1];
|
233 |
ptr2 += p->linesize[2];
|
234 |
} |
235 |
} |
236 |
break;
|
237 |
case PIX_FMT_RGBA32:
|
238 |
ptr = p->data[0];
|
239 |
linesize = p->linesize[0];
|
240 |
if(s->bytestream + avctx->width*avctx->height*4 > s->bytestream_end) |
241 |
return -1; |
242 |
for(i = 0; i < avctx->height; i++) { |
243 |
int j, r, g, b, a;
|
244 |
|
245 |
for(j = 0;j < avctx->width; j++) { |
246 |
r = *s->bytestream++; |
247 |
g = *s->bytestream++; |
248 |
b = *s->bytestream++; |
249 |
a = *s->bytestream++; |
250 |
((uint32_t *)ptr)[j] = (a << 24) | (r << 16) | (g << 8) | b; |
251 |
} |
252 |
ptr += linesize; |
253 |
} |
254 |
break;
|
255 |
} |
256 |
*picture= *(AVFrame*)&s->picture; |
257 |
*data_size = sizeof(AVPicture);
|
258 |
|
259 |
return s->bytestream - s->bytestream_start;
|
260 |
} |
261 |
|
262 |
static int pnm_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){ |
263 |
PNMContext *s = avctx->priv_data; |
264 |
AVFrame *pict = data; |
265 |
AVFrame * const p= (AVFrame*)&s->picture;
|
266 |
int i, h, h1, c, n, linesize;
|
267 |
uint8_t *ptr, *ptr1, *ptr2; |
268 |
|
269 |
if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){ |
270 |
av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
271 |
return -1; |
272 |
} |
273 |
|
274 |
*p = *pict; |
275 |
p->pict_type= FF_I_TYPE; |
276 |
p->key_frame= 1;
|
277 |
|
278 |
s->bytestream_start= |
279 |
s->bytestream= outbuf; |
280 |
s->bytestream_end= outbuf+buf_size; |
281 |
|
282 |
h = avctx->height; |
283 |
h1 = h; |
284 |
switch(avctx->pix_fmt) {
|
285 |
case PIX_FMT_MONOWHITE:
|
286 |
c = '4';
|
287 |
n = (avctx->width + 7) >> 3; |
288 |
break;
|
289 |
case PIX_FMT_GRAY8:
|
290 |
c = '5';
|
291 |
n = avctx->width; |
292 |
break;
|
293 |
case PIX_FMT_RGB24:
|
294 |
c = '6';
|
295 |
n = avctx->width * 3;
|
296 |
break;
|
297 |
case PIX_FMT_YUV420P:
|
298 |
c = '5';
|
299 |
n = avctx->width; |
300 |
h1 = (h * 3) / 2; |
301 |
break;
|
302 |
default:
|
303 |
return -1; |
304 |
} |
305 |
snprintf(s->bytestream, s->bytestream_end - s->bytestream, |
306 |
"P%c\n%d %d\n",
|
307 |
c, avctx->width, h1); |
308 |
s->bytestream += strlen(s->bytestream); |
309 |
if (avctx->pix_fmt != PIX_FMT_MONOWHITE) {
|
310 |
snprintf(s->bytestream, s->bytestream_end - s->bytestream, |
311 |
"%d\n", 255); |
312 |
s->bytestream += strlen(s->bytestream); |
313 |
} |
314 |
|
315 |
ptr = p->data[0];
|
316 |
linesize = p->linesize[0];
|
317 |
for(i=0;i<h;i++) { |
318 |
memcpy(s->bytestream, ptr, n); |
319 |
s->bytestream += n; |
320 |
ptr += linesize; |
321 |
} |
322 |
|
323 |
if (avctx->pix_fmt == PIX_FMT_YUV420P) {
|
324 |
h >>= 1;
|
325 |
n >>= 1;
|
326 |
ptr1 = p->data[1];
|
327 |
ptr2 = p->data[2];
|
328 |
for(i=0;i<h;i++) { |
329 |
memcpy(s->bytestream, ptr1, n); |
330 |
s->bytestream += n; |
331 |
memcpy(s->bytestream, ptr2, n); |
332 |
s->bytestream += n; |
333 |
ptr1 += p->linesize[1];
|
334 |
ptr2 += p->linesize[2];
|
335 |
} |
336 |
} |
337 |
return s->bytestream - s->bytestream_start;
|
338 |
} |
339 |
|
340 |
static int pam_encode_frame(AVCodecContext *avctx, unsigned char *outbuf, int buf_size, void *data){ |
341 |
PNMContext *s = avctx->priv_data; |
342 |
AVFrame *pict = data; |
343 |
AVFrame * const p= (AVFrame*)&s->picture;
|
344 |
int i, h, w, n, linesize, depth, maxval;
|
345 |
const char *tuple_type; |
346 |
uint8_t *ptr; |
347 |
|
348 |
if(buf_size < avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height) + 200){ |
349 |
av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
350 |
return -1; |
351 |
} |
352 |
|
353 |
*p = *pict; |
354 |
p->pict_type= FF_I_TYPE; |
355 |
p->key_frame= 1;
|
356 |
|
357 |
s->bytestream_start= |
358 |
s->bytestream= outbuf; |
359 |
s->bytestream_end= outbuf+buf_size; |
360 |
|
361 |
h = avctx->height; |
362 |
w = avctx->width; |
363 |
switch(avctx->pix_fmt) {
|
364 |
case PIX_FMT_MONOWHITE:
|
365 |
n = (w + 7) >> 3; |
366 |
depth = 1;
|
367 |
maxval = 1;
|
368 |
tuple_type = "BLACKANDWHITE";
|
369 |
break;
|
370 |
case PIX_FMT_GRAY8:
|
371 |
n = w; |
372 |
depth = 1;
|
373 |
maxval = 255;
|
374 |
tuple_type = "GRAYSCALE";
|
375 |
break;
|
376 |
case PIX_FMT_RGB24:
|
377 |
n = w * 3;
|
378 |
depth = 3;
|
379 |
maxval = 255;
|
380 |
tuple_type = "RGB";
|
381 |
break;
|
382 |
case PIX_FMT_RGBA32:
|
383 |
n = w * 4;
|
384 |
depth = 4;
|
385 |
maxval = 255;
|
386 |
tuple_type = "RGB_ALPHA";
|
387 |
break;
|
388 |
default:
|
389 |
return -1; |
390 |
} |
391 |
snprintf(s->bytestream, s->bytestream_end - s->bytestream, |
392 |
"P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\nTUPLETYPE %s\nENDHDR\n",
|
393 |
w, h, depth, maxval, tuple_type); |
394 |
s->bytestream += strlen(s->bytestream); |
395 |
|
396 |
ptr = p->data[0];
|
397 |
linesize = p->linesize[0];
|
398 |
|
399 |
if (avctx->pix_fmt == PIX_FMT_RGBA32) {
|
400 |
int j;
|
401 |
unsigned int v; |
402 |
|
403 |
for(i=0;i<h;i++) { |
404 |
for(j=0;j<w;j++) { |
405 |
v = ((uint32_t *)ptr)[j]; |
406 |
*s->bytestream++ = v >> 16;
|
407 |
*s->bytestream++ = v >> 8;
|
408 |
*s->bytestream++ = v; |
409 |
*s->bytestream++ = v >> 24;
|
410 |
} |
411 |
ptr += linesize; |
412 |
} |
413 |
} else {
|
414 |
for(i=0;i<h;i++) { |
415 |
memcpy(s->bytestream, ptr, n); |
416 |
s->bytestream += n; |
417 |
ptr += linesize; |
418 |
} |
419 |
} |
420 |
return s->bytestream - s->bytestream_start;
|
421 |
} |
422 |
|
423 |
#if 0
|
424 |
static int pnm_probe(AVProbeData *pd)
|
425 |
{
|
426 |
const char *p = pd->buf;
|
427 |
if (pd->buf_size >= 8 &&
|
428 |
p[0] == 'P' &&
|
429 |
p[1] >= '4' && p[1] <= '6' &&
|
430 |
pnm_space(p[2]) )
|
431 |
return AVPROBE_SCORE_MAX - 1; /* to permit pgmyuv probe */
|
432 |
else
|
433 |
return 0;
|
434 |
}
|
435 |
|
436 |
static int pgmyuv_probe(AVProbeData *pd)
|
437 |
{
|
438 |
if (match_ext(pd->filename, "pgmyuv"))
|
439 |
return AVPROBE_SCORE_MAX;
|
440 |
else
|
441 |
return 0;
|
442 |
}
|
443 |
|
444 |
static int pam_probe(AVProbeData *pd)
|
445 |
{
|
446 |
const char *p = pd->buf;
|
447 |
if (pd->buf_size >= 8 &&
|
448 |
p[0] == 'P' &&
|
449 |
p[1] == '7' &&
|
450 |
p[2] == '\n')
|
451 |
return AVPROBE_SCORE_MAX;
|
452 |
else
|
453 |
return 0;
|
454 |
}
|
455 |
#endif
|
456 |
|
457 |
static int pnm_parse(AVCodecParserContext *s, |
458 |
AVCodecContext *avctx, |
459 |
uint8_t **poutbuf, int *poutbuf_size,
|
460 |
const uint8_t *buf, int buf_size) |
461 |
{ |
462 |
ParseContext *pc = s->priv_data; |
463 |
PNMContext pnmctx; |
464 |
int next;
|
465 |
|
466 |
for(; pc->overread>0; pc->overread--){ |
467 |
pc->buffer[pc->index++]= pc->buffer[pc->overread_index++]; |
468 |
} |
469 |
retry:
|
470 |
if(pc->index){
|
471 |
pnmctx.bytestream_start= |
472 |
pnmctx.bytestream= pc->buffer; |
473 |
pnmctx.bytestream_end= pc->buffer + pc->index; |
474 |
}else{
|
475 |
pnmctx.bytestream_start= |
476 |
pnmctx.bytestream= (uint8_t *) buf; /* casts avoid warnings */
|
477 |
pnmctx.bytestream_end= (uint8_t *) buf + buf_size; |
478 |
} |
479 |
if(pnm_decode_header(avctx, &pnmctx) < 0){ |
480 |
if(pnmctx.bytestream < pnmctx.bytestream_end){
|
481 |
if(pc->index){
|
482 |
pc->index=0;
|
483 |
}else{
|
484 |
buf++; |
485 |
buf_size--; |
486 |
} |
487 |
goto retry;
|
488 |
} |
489 |
#if 0
|
490 |
if(pc->index && pc->index*2 + FF_INPUT_BUFFER_PADDING_SIZE < pc->buffer_size && buf_size > pc->index){
|
491 |
memcpy(pc->buffer + pc->index, buf, pc->index);
|
492 |
pc->index += pc->index;
|
493 |
buf += pc->index;
|
494 |
buf_size -= pc->index;
|
495 |
goto retry;
|
496 |
}
|
497 |
#endif
|
498 |
next= END_NOT_FOUND; |
499 |
}else{
|
500 |
next= pnmctx.bytestream - pnmctx.bytestream_start |
501 |
+ avpicture_get_size(avctx->pix_fmt, avctx->width, avctx->height); |
502 |
if(pnmctx.bytestream_start!=buf)
|
503 |
next-= pc->index; |
504 |
if(next > buf_size)
|
505 |
next= END_NOT_FOUND; |
506 |
} |
507 |
|
508 |
if(ff_combine_frame(pc, next, (uint8_t **)&buf, &buf_size)<0){ |
509 |
*poutbuf = NULL;
|
510 |
*poutbuf_size = 0;
|
511 |
return buf_size;
|
512 |
} |
513 |
*poutbuf = (uint8_t *)buf; |
514 |
*poutbuf_size = buf_size; |
515 |
return next;
|
516 |
} |
517 |
|
518 |
AVCodecParser pnm_parser = { |
519 |
{ CODEC_ID_PGM, CODEC_ID_PGMYUV, CODEC_ID_PPM, CODEC_ID_PBM, CODEC_ID_PAM}, |
520 |
sizeof(ParseContext),
|
521 |
NULL,
|
522 |
pnm_parse, |
523 |
ff_parse_close, |
524 |
}; |
525 |
|
526 |
#ifdef CONFIG_PGM_ENCODER
|
527 |
AVCodec pgm_encoder = { |
528 |
"pgm",
|
529 |
CODEC_TYPE_VIDEO, |
530 |
CODEC_ID_PGM, |
531 |
sizeof(PNMContext),
|
532 |
common_init, |
533 |
pnm_encode_frame, |
534 |
NULL, //encode_end, |
535 |
pnm_decode_frame, |
536 |
.pix_fmts= (enum PixelFormat[]){PIX_FMT_GRAY8, -1}, |
537 |
}; |
538 |
#endif // CONFIG_PGM_ENCODER |
539 |
|
540 |
#ifdef CONFIG_PGMYUV_ENCODER
|
541 |
AVCodec pgmyuv_encoder = { |
542 |
"pgmyuv",
|
543 |
CODEC_TYPE_VIDEO, |
544 |
CODEC_ID_PGMYUV, |
545 |
sizeof(PNMContext),
|
546 |
common_init, |
547 |
pnm_encode_frame, |
548 |
NULL, //encode_end, |
549 |
pnm_decode_frame, |
550 |
.pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV420P, -1}, |
551 |
}; |
552 |
#endif // CONFIG_PGMYUV_ENCODER |
553 |
|
554 |
#ifdef CONFIG_PPM_ENCODER
|
555 |
AVCodec ppm_encoder = { |
556 |
"ppm",
|
557 |
CODEC_TYPE_VIDEO, |
558 |
CODEC_ID_PPM, |
559 |
sizeof(PNMContext),
|
560 |
common_init, |
561 |
pnm_encode_frame, |
562 |
NULL, //encode_end, |
563 |
pnm_decode_frame, |
564 |
.pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, -1}, |
565 |
}; |
566 |
#endif // CONFIG_PPM_ENCODER |
567 |
|
568 |
#ifdef CONFIG_PBM_ENCODER
|
569 |
AVCodec pbm_encoder = { |
570 |
"pbm",
|
571 |
CODEC_TYPE_VIDEO, |
572 |
CODEC_ID_PBM, |
573 |
sizeof(PNMContext),
|
574 |
common_init, |
575 |
pnm_encode_frame, |
576 |
NULL, //encode_end, |
577 |
pnm_decode_frame, |
578 |
.pix_fmts= (enum PixelFormat[]){PIX_FMT_MONOWHITE, -1}, |
579 |
}; |
580 |
#endif // CONFIG_PBM_ENCODER |
581 |
|
582 |
#ifdef CONFIG_PAM_ENCODER
|
583 |
AVCodec pam_encoder = { |
584 |
"pam",
|
585 |
CODEC_TYPE_VIDEO, |
586 |
CODEC_ID_PAM, |
587 |
sizeof(PNMContext),
|
588 |
common_init, |
589 |
pam_encode_frame, |
590 |
NULL, //encode_end, |
591 |
pnm_decode_frame, |
592 |
.pix_fmts= (enum PixelFormat[]){PIX_FMT_RGB24, PIX_FMT_RGBA32, PIX_FMT_GRAY8, PIX_FMT_MONOWHITE, -1}, |
593 |
}; |
594 |
#endif // CONFIG_PAM_ENCODER |