ffmpeg / libavcodec / utils.c @ d375c104
History | View | Annotate | Download (38.8 KB)
1 |
/*
|
---|---|
2 |
* utils for libavcodec
|
3 |
* Copyright (c) 2001 Fabrice Bellard
|
4 |
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
|
5 |
*
|
6 |
* This file is part of FFmpeg.
|
7 |
*
|
8 |
* FFmpeg is free software; you can redistribute it and/or
|
9 |
* modify it under the terms of the GNU Lesser General Public
|
10 |
* License as published by the Free Software Foundation; either
|
11 |
* version 2.1 of the License, or (at your option) any later version.
|
12 |
*
|
13 |
* FFmpeg is distributed in the hope that it will be useful,
|
14 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
16 |
* Lesser General Public License for more details.
|
17 |
*
|
18 |
* You should have received a copy of the GNU Lesser General Public
|
19 |
* License along with FFmpeg; if not, write to the Free Software
|
20 |
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
21 |
*/
|
22 |
|
23 |
/**
|
24 |
* @file
|
25 |
* utils.
|
26 |
*/
|
27 |
|
28 |
#include "libavutil/avstring.h" |
29 |
#include "libavutil/integer.h" |
30 |
#include "libavutil/crc.h" |
31 |
#include "libavutil/pixdesc.h" |
32 |
#include "libavutil/audioconvert.h" |
33 |
#include "libavutil/imgutils.h" |
34 |
#include "libavutil/samplefmt.h" |
35 |
#include "avcodec.h" |
36 |
#include "dsputil.h" |
37 |
#include "libavutil/opt.h" |
38 |
#include "imgconvert.h" |
39 |
#include "thread.h" |
40 |
#include "audioconvert.h" |
41 |
#include "internal.h" |
42 |
#include <stdlib.h> |
43 |
#include <stdarg.h> |
44 |
#include <limits.h> |
45 |
#include <float.h> |
46 |
|
47 |
static int volatile entangled_thread_counter=0; |
48 |
static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op); |
49 |
static void *codec_mutex; |
50 |
|
51 |
void *av_fast_realloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size) |
52 |
{ |
53 |
if(min_size < *size)
|
54 |
return ptr;
|
55 |
|
56 |
min_size= FFMAX(17*min_size/16 + 32, min_size); |
57 |
|
58 |
ptr= av_realloc(ptr, min_size); |
59 |
if(!ptr) //we could set this to the unmodified min_size but this is safer if the user lost the ptr and uses NULL now |
60 |
min_size= 0;
|
61 |
|
62 |
*size= min_size; |
63 |
|
64 |
return ptr;
|
65 |
} |
66 |
|
67 |
void av_fast_malloc(void *ptr, unsigned int *size, FF_INTERNALC_MEM_TYPE min_size) |
68 |
{ |
69 |
void **p = ptr;
|
70 |
if (min_size < *size)
|
71 |
return;
|
72 |
min_size= FFMAX(17*min_size/16 + 32, min_size); |
73 |
av_free(*p); |
74 |
*p = av_malloc(min_size); |
75 |
if (!*p) min_size = 0; |
76 |
*size= min_size; |
77 |
} |
78 |
|
79 |
/* encoder management */
|
80 |
static AVCodec *first_avcodec = NULL; |
81 |
|
82 |
AVCodec *av_codec_next(AVCodec *c){ |
83 |
if(c) return c->next; |
84 |
else return first_avcodec; |
85 |
} |
86 |
|
87 |
void avcodec_register(AVCodec *codec)
|
88 |
{ |
89 |
AVCodec **p; |
90 |
avcodec_init(); |
91 |
p = &first_avcodec; |
92 |
while (*p != NULL) p = &(*p)->next; |
93 |
*p = codec; |
94 |
codec->next = NULL;
|
95 |
} |
96 |
|
97 |
#if LIBAVCODEC_VERSION_MAJOR < 53 |
98 |
void register_avcodec(AVCodec *codec)
|
99 |
{ |
100 |
avcodec_register(codec); |
101 |
} |
102 |
#endif
|
103 |
|
104 |
unsigned avcodec_get_edge_width(void) |
105 |
{ |
106 |
return EDGE_WIDTH;
|
107 |
} |
108 |
|
109 |
void avcodec_set_dimensions(AVCodecContext *s, int width, int height){ |
110 |
s->coded_width = width; |
111 |
s->coded_height= height; |
112 |
s->width = -((-width )>>s->lowres); |
113 |
s->height= -((-height)>>s->lowres); |
114 |
} |
115 |
|
116 |
typedef struct InternalBuffer{ |
117 |
int last_pic_num;
|
118 |
uint8_t *base[4];
|
119 |
uint8_t *data[4];
|
120 |
int linesize[4]; |
121 |
int width, height;
|
122 |
enum PixelFormat pix_fmt;
|
123 |
}InternalBuffer; |
124 |
|
125 |
#define INTERNAL_BUFFER_SIZE 32 |
126 |
|
127 |
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[4]){ |
128 |
int w_align= 1; |
129 |
int h_align= 1; |
130 |
|
131 |
switch(s->pix_fmt){
|
132 |
case PIX_FMT_YUV420P:
|
133 |
case PIX_FMT_YUYV422:
|
134 |
case PIX_FMT_UYVY422:
|
135 |
case PIX_FMT_YUV422P:
|
136 |
case PIX_FMT_YUV440P:
|
137 |
case PIX_FMT_YUV444P:
|
138 |
case PIX_FMT_GRAY8:
|
139 |
case PIX_FMT_GRAY16BE:
|
140 |
case PIX_FMT_GRAY16LE:
|
141 |
case PIX_FMT_YUVJ420P:
|
142 |
case PIX_FMT_YUVJ422P:
|
143 |
case PIX_FMT_YUVJ440P:
|
144 |
case PIX_FMT_YUVJ444P:
|
145 |
case PIX_FMT_YUVA420P:
|
146 |
w_align= 16; //FIXME check for non mpeg style codecs and use less alignment |
147 |
h_align= 16;
|
148 |
if(s->codec_id == CODEC_ID_MPEG2VIDEO || s->codec_id == CODEC_ID_MJPEG || s->codec_id == CODEC_ID_AMV || s->codec_id == CODEC_ID_THP || s->codec_id == CODEC_ID_H264)
|
149 |
h_align= 32; // interlaced is rounded up to 2 MBs |
150 |
break;
|
151 |
case PIX_FMT_YUV411P:
|
152 |
case PIX_FMT_UYYVYY411:
|
153 |
w_align=32;
|
154 |
h_align=8;
|
155 |
break;
|
156 |
case PIX_FMT_YUV410P:
|
157 |
if(s->codec_id == CODEC_ID_SVQ1){
|
158 |
w_align=64;
|
159 |
h_align=64;
|
160 |
} |
161 |
case PIX_FMT_RGB555:
|
162 |
if(s->codec_id == CODEC_ID_RPZA){
|
163 |
w_align=4;
|
164 |
h_align=4;
|
165 |
} |
166 |
case PIX_FMT_PAL8:
|
167 |
case PIX_FMT_BGR8:
|
168 |
case PIX_FMT_RGB8:
|
169 |
if(s->codec_id == CODEC_ID_SMC){
|
170 |
w_align=4;
|
171 |
h_align=4;
|
172 |
} |
173 |
break;
|
174 |
case PIX_FMT_BGR24:
|
175 |
if((s->codec_id == CODEC_ID_MSZH) || (s->codec_id == CODEC_ID_ZLIB)){
|
176 |
w_align=4;
|
177 |
h_align=4;
|
178 |
} |
179 |
break;
|
180 |
default:
|
181 |
w_align= 1;
|
182 |
h_align= 1;
|
183 |
break;
|
184 |
} |
185 |
|
186 |
*width = FFALIGN(*width , w_align); |
187 |
*height= FFALIGN(*height, h_align); |
188 |
if(s->codec_id == CODEC_ID_H264 || s->lowres)
|
189 |
*height+=2; // some of the optimized chroma MC reads one line too much |
190 |
// which is also done in mpeg decoders with lowres > 0
|
191 |
|
192 |
linesize_align[0] =
|
193 |
linesize_align[1] =
|
194 |
linesize_align[2] =
|
195 |
linesize_align[3] = STRIDE_ALIGN;
|
196 |
//STRIDE_ALIGN is 8 for SSE* but this does not work for SVQ1 chroma planes
|
197 |
//we could change STRIDE_ALIGN to 16 for x86/sse but it would increase the
|
198 |
//picture size unneccessarily in some cases. The solution here is not
|
199 |
//pretty and better ideas are welcome!
|
200 |
#if HAVE_MMX
|
201 |
if(s->codec_id == CODEC_ID_SVQ1 || s->codec_id == CODEC_ID_VP5 ||
|
202 |
s->codec_id == CODEC_ID_VP6 || s->codec_id == CODEC_ID_VP6F || |
203 |
s->codec_id == CODEC_ID_VP6A) { |
204 |
linesize_align[0] =
|
205 |
linesize_align[1] =
|
206 |
linesize_align[2] = 16; |
207 |
} |
208 |
#endif
|
209 |
} |
210 |
|
211 |
void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ |
212 |
int chroma_shift = av_pix_fmt_descriptors[s->pix_fmt].log2_chroma_w;
|
213 |
int linesize_align[4]; |
214 |
int align;
|
215 |
avcodec_align_dimensions2(s, width, height, linesize_align); |
216 |
align = FFMAX(linesize_align[0], linesize_align[3]); |
217 |
linesize_align[1] <<= chroma_shift;
|
218 |
linesize_align[2] <<= chroma_shift;
|
219 |
align = FFMAX3(align, linesize_align[1], linesize_align[2]); |
220 |
*width=FFALIGN(*width, align); |
221 |
} |
222 |
|
223 |
#if LIBAVCODEC_VERSION_MAJOR < 53 |
224 |
int avcodec_check_dimensions(void *av_log_ctx, unsigned int w, unsigned int h){ |
225 |
return av_image_check_size(w, h, 0, av_log_ctx); |
226 |
} |
227 |
#endif
|
228 |
|
229 |
int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){
|
230 |
int i;
|
231 |
int w= s->width;
|
232 |
int h= s->height;
|
233 |
InternalBuffer *buf; |
234 |
int *picture_number;
|
235 |
|
236 |
if(pic->data[0]!=NULL) { |
237 |
av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n");
|
238 |
return -1; |
239 |
} |
240 |
if(s->internal_buffer_count >= INTERNAL_BUFFER_SIZE) {
|
241 |
av_log(s, AV_LOG_ERROR, "internal_buffer_count overflow (missing release_buffer?)\n");
|
242 |
return -1; |
243 |
} |
244 |
|
245 |
if(av_image_check_size(w, h, 0, s)) |
246 |
return -1; |
247 |
|
248 |
if(s->internal_buffer==NULL){ |
249 |
s->internal_buffer= av_mallocz((INTERNAL_BUFFER_SIZE+1)*sizeof(InternalBuffer)); |
250 |
} |
251 |
#if 0
|
252 |
s->internal_buffer= av_fast_realloc(
|
253 |
s->internal_buffer,
|
254 |
&s->internal_buffer_size,
|
255 |
sizeof(InternalBuffer)*FFMAX(99, s->internal_buffer_count+1)/*FIXME*/
|
256 |
);
|
257 |
#endif
|
258 |
|
259 |
buf= &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; |
260 |
picture_number= &(((InternalBuffer*)s->internal_buffer)[INTERNAL_BUFFER_SIZE]).last_pic_num; //FIXME ugly hack
|
261 |
(*picture_number)++; |
262 |
|
263 |
if(buf->base[0] && (buf->width != w || buf->height != h || buf->pix_fmt != s->pix_fmt)){ |
264 |
if(s->active_thread_type&FF_THREAD_FRAME) {
|
265 |
av_log_missing_feature(s, "Width/height changing with frame threads is", 0); |
266 |
return -1; |
267 |
} |
268 |
|
269 |
for(i=0; i<4; i++){ |
270 |
av_freep(&buf->base[i]); |
271 |
buf->data[i]= NULL;
|
272 |
} |
273 |
} |
274 |
|
275 |
if(buf->base[0]){ |
276 |
pic->age= *picture_number - buf->last_pic_num; |
277 |
buf->last_pic_num= *picture_number; |
278 |
}else{
|
279 |
int h_chroma_shift, v_chroma_shift;
|
280 |
int size[4] = {0}; |
281 |
int tmpsize;
|
282 |
int unaligned;
|
283 |
AVPicture picture; |
284 |
int stride_align[4]; |
285 |
|
286 |
avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); |
287 |
|
288 |
avcodec_align_dimensions2(s, &w, &h, stride_align); |
289 |
|
290 |
if(!(s->flags&CODEC_FLAG_EMU_EDGE)){
|
291 |
w+= EDGE_WIDTH*2;
|
292 |
h+= EDGE_WIDTH*2;
|
293 |
} |
294 |
|
295 |
do {
|
296 |
// NOTE: do not align linesizes individually, this breaks e.g. assumptions
|
297 |
// that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
|
298 |
av_image_fill_linesizes(picture.linesize, s->pix_fmt, w); |
299 |
// increase alignment of w for next try (rhs gives the lowest bit set in w)
|
300 |
w += w & ~(w-1);
|
301 |
|
302 |
unaligned = 0;
|
303 |
for (i=0; i<4; i++){ |
304 |
unaligned |= picture.linesize[i] % stride_align[i]; |
305 |
} |
306 |
} while (unaligned);
|
307 |
|
308 |
tmpsize = av_image_fill_pointers(picture.data, s->pix_fmt, h, NULL, picture.linesize);
|
309 |
if (tmpsize < 0) |
310 |
return -1; |
311 |
|
312 |
for (i=0; i<3 && picture.data[i+1]; i++) |
313 |
size[i] = picture.data[i+1] - picture.data[i];
|
314 |
size[i] = tmpsize - (picture.data[i] - picture.data[0]);
|
315 |
|
316 |
buf->last_pic_num= -256*256*256*64; |
317 |
memset(buf->base, 0, sizeof(buf->base)); |
318 |
memset(buf->data, 0, sizeof(buf->data)); |
319 |
|
320 |
for(i=0; i<4 && size[i]; i++){ |
321 |
const int h_shift= i==0 ? 0 : h_chroma_shift; |
322 |
const int v_shift= i==0 ? 0 : v_chroma_shift; |
323 |
|
324 |
buf->linesize[i]= picture.linesize[i]; |
325 |
|
326 |
buf->base[i]= av_malloc(size[i]+16); //FIXME 16 |
327 |
if(buf->base[i]==NULL) return -1; |
328 |
memset(buf->base[i], 128, size[i]);
|
329 |
|
330 |
// no edge if EDGE EMU or not planar YUV
|
331 |
if((s->flags&CODEC_FLAG_EMU_EDGE) || !size[2]) |
332 |
buf->data[i] = buf->base[i]; |
333 |
else
|
334 |
buf->data[i] = buf->base[i] + FFALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), stride_align[i]); |
335 |
} |
336 |
if(size[1] && !size[2]) |
337 |
ff_set_systematic_pal2((uint32_t*)buf->data[1], s->pix_fmt);
|
338 |
buf->width = s->width; |
339 |
buf->height = s->height; |
340 |
buf->pix_fmt= s->pix_fmt; |
341 |
pic->age= 256*256*256*64; |
342 |
} |
343 |
pic->type= FF_BUFFER_TYPE_INTERNAL; |
344 |
|
345 |
for(i=0; i<4; i++){ |
346 |
pic->base[i]= buf->base[i]; |
347 |
pic->data[i]= buf->data[i]; |
348 |
pic->linesize[i]= buf->linesize[i]; |
349 |
} |
350 |
s->internal_buffer_count++; |
351 |
|
352 |
if(s->pkt) pic->pkt_pts= s->pkt->pts;
|
353 |
else pic->pkt_pts= AV_NOPTS_VALUE;
|
354 |
pic->reordered_opaque= s->reordered_opaque; |
355 |
|
356 |
if(s->debug&FF_DEBUG_BUFFERS)
|
357 |
av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count);
|
358 |
|
359 |
return 0; |
360 |
} |
361 |
|
362 |
void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){
|
363 |
int i;
|
364 |
InternalBuffer *buf, *last; |
365 |
|
366 |
assert(pic->type==FF_BUFFER_TYPE_INTERNAL); |
367 |
assert(s->internal_buffer_count); |
368 |
|
369 |
if(s->internal_buffer){
|
370 |
buf = NULL; /* avoids warning */ |
371 |
for(i=0; i<s->internal_buffer_count; i++){ //just 3-5 checks so is not worth to optimize |
372 |
buf= &((InternalBuffer*)s->internal_buffer)[i]; |
373 |
if(buf->data[0] == pic->data[0]) |
374 |
break;
|
375 |
} |
376 |
assert(i < s->internal_buffer_count); |
377 |
s->internal_buffer_count--; |
378 |
last = &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; |
379 |
|
380 |
FFSWAP(InternalBuffer, *buf, *last); |
381 |
} |
382 |
|
383 |
for(i=0; i<4; i++){ |
384 |
pic->data[i]=NULL;
|
385 |
// pic->base[i]=NULL;
|
386 |
} |
387 |
//printf("R%X\n", pic->opaque);
|
388 |
|
389 |
if(s->debug&FF_DEBUG_BUFFERS)
|
390 |
av_log(s, AV_LOG_DEBUG, "default_release_buffer called on pic %p, %d buffers used\n", pic, s->internal_buffer_count);
|
391 |
} |
392 |
|
393 |
int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic){
|
394 |
AVFrame temp_pic; |
395 |
int i;
|
396 |
|
397 |
/* If no picture return a new buffer */
|
398 |
if(pic->data[0] == NULL) { |
399 |
/* We will copy from buffer, so must be readable */
|
400 |
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE; |
401 |
return s->get_buffer(s, pic);
|
402 |
} |
403 |
|
404 |
/* If internal buffer type return the same buffer */
|
405 |
if(pic->type == FF_BUFFER_TYPE_INTERNAL) {
|
406 |
if(s->pkt) pic->pkt_pts= s->pkt->pts;
|
407 |
else pic->pkt_pts= AV_NOPTS_VALUE;
|
408 |
pic->reordered_opaque= s->reordered_opaque; |
409 |
return 0; |
410 |
} |
411 |
|
412 |
/*
|
413 |
* Not internal type and reget_buffer not overridden, emulate cr buffer
|
414 |
*/
|
415 |
temp_pic = *pic; |
416 |
for(i = 0; i < 4; i++) |
417 |
pic->data[i] = pic->base[i] = NULL;
|
418 |
pic->opaque = NULL;
|
419 |
/* Allocate new frame */
|
420 |
if (s->get_buffer(s, pic))
|
421 |
return -1; |
422 |
/* Copy image data from old buffer to new buffer */
|
423 |
av_picture_copy((AVPicture*)pic, (AVPicture*)&temp_pic, s->pix_fmt, s->width, |
424 |
s->height); |
425 |
s->release_buffer(s, &temp_pic); // Release old frame
|
426 |
return 0; |
427 |
} |
428 |
|
429 |
int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size){ |
430 |
int i;
|
431 |
|
432 |
for(i=0; i<count; i++){ |
433 |
int r= func(c, (char*)arg + i*size); |
434 |
if(ret) ret[i]= r;
|
435 |
} |
436 |
return 0; |
437 |
} |
438 |
|
439 |
int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr),void *arg, int *ret, int count){ |
440 |
int i;
|
441 |
|
442 |
for(i=0; i<count; i++){ |
443 |
int r= func(c, arg, i, 0); |
444 |
if(ret) ret[i]= r;
|
445 |
} |
446 |
return 0; |
447 |
} |
448 |
|
449 |
enum PixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum PixelFormat *fmt){ |
450 |
while (*fmt != PIX_FMT_NONE && ff_is_hwaccel_pix_fmt(*fmt))
|
451 |
++fmt; |
452 |
return fmt[0]; |
453 |
} |
454 |
|
455 |
void avcodec_get_frame_defaults(AVFrame *pic){
|
456 |
memset(pic, 0, sizeof(AVFrame)); |
457 |
|
458 |
pic->pts = pic->best_effort_timestamp = AV_NOPTS_VALUE; |
459 |
pic->key_frame= 1;
|
460 |
} |
461 |
|
462 |
AVFrame *avcodec_alloc_frame(void){
|
463 |
AVFrame *pic= av_malloc(sizeof(AVFrame));
|
464 |
|
465 |
if(pic==NULL) return NULL; |
466 |
|
467 |
avcodec_get_frame_defaults(pic); |
468 |
|
469 |
return pic;
|
470 |
} |
471 |
|
472 |
int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec)
|
473 |
{ |
474 |
int ret= -1; |
475 |
|
476 |
/* If there is a user-supplied mutex locking routine, call it. */
|
477 |
if (ff_lockmgr_cb) {
|
478 |
if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
|
479 |
return -1; |
480 |
} |
481 |
|
482 |
entangled_thread_counter++; |
483 |
if(entangled_thread_counter != 1){ |
484 |
av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
|
485 |
goto end;
|
486 |
} |
487 |
|
488 |
if(avctx->codec || !codec)
|
489 |
goto end;
|
490 |
|
491 |
if (codec->priv_data_size > 0) { |
492 |
if(!avctx->priv_data){
|
493 |
avctx->priv_data = av_mallocz(codec->priv_data_size); |
494 |
if (!avctx->priv_data) {
|
495 |
ret = AVERROR(ENOMEM); |
496 |
goto end;
|
497 |
} |
498 |
if(codec->priv_class){ //this can be droped once all user apps use avcodec_get_context_defaults3() |
499 |
*(AVClass**)avctx->priv_data= codec->priv_class; |
500 |
av_opt_set_defaults(avctx->priv_data); |
501 |
} |
502 |
} |
503 |
} else {
|
504 |
avctx->priv_data = NULL;
|
505 |
} |
506 |
|
507 |
if(avctx->coded_width && avctx->coded_height)
|
508 |
avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); |
509 |
else if(avctx->width && avctx->height) |
510 |
avcodec_set_dimensions(avctx, avctx->width, avctx->height); |
511 |
|
512 |
if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height)
|
513 |
&& ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0 |
514 |
|| av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) { |
515 |
av_log(avctx, AV_LOG_WARNING, "ignoring invalid width/height values\n");
|
516 |
avcodec_set_dimensions(avctx, 0, 0); |
517 |
} |
518 |
|
519 |
/* if the decoder init function was already called previously,
|
520 |
free the already allocated subtitle_header before overwriting it */
|
521 |
if (codec->decode)
|
522 |
av_freep(&avctx->subtitle_header); |
523 |
|
524 |
#define SANE_NB_CHANNELS 128U |
525 |
if (avctx->channels > SANE_NB_CHANNELS) {
|
526 |
ret = AVERROR(EINVAL); |
527 |
goto free_and_end;
|
528 |
} |
529 |
|
530 |
avctx->codec = codec; |
531 |
if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) &&
|
532 |
avctx->codec_id == CODEC_ID_NONE) { |
533 |
avctx->codec_type = codec->type; |
534 |
avctx->codec_id = codec->id; |
535 |
} |
536 |
if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type
|
537 |
&& avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { |
538 |
av_log(avctx, AV_LOG_ERROR, "codec type or id mismatches\n");
|
539 |
goto free_and_end;
|
540 |
} |
541 |
avctx->frame_number = 0;
|
542 |
|
543 |
if (HAVE_THREADS && !avctx->thread_opaque) {
|
544 |
ret = ff_thread_init(avctx, avctx->thread_count); |
545 |
if (ret < 0) { |
546 |
goto free_and_end;
|
547 |
} |
548 |
} |
549 |
|
550 |
if (avctx->codec->max_lowres < avctx->lowres) {
|
551 |
av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n",
|
552 |
avctx->codec->max_lowres); |
553 |
goto free_and_end;
|
554 |
} |
555 |
if (avctx->codec->sample_fmts && avctx->codec->encode) {
|
556 |
int i;
|
557 |
for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) |
558 |
if (avctx->sample_fmt == avctx->codec->sample_fmts[i])
|
559 |
break;
|
560 |
if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) {
|
561 |
av_log(avctx, AV_LOG_ERROR, "Specified sample_fmt is not supported.\n");
|
562 |
goto free_and_end;
|
563 |
} |
564 |
} |
565 |
|
566 |
avctx->pts_correction_num_faulty_pts = |
567 |
avctx->pts_correction_num_faulty_dts = 0;
|
568 |
avctx->pts_correction_last_pts = |
569 |
avctx->pts_correction_last_dts = INT64_MIN; |
570 |
|
571 |
if(avctx->codec->init && !(avctx->active_thread_type&FF_THREAD_FRAME)){
|
572 |
ret = avctx->codec->init(avctx); |
573 |
if (ret < 0) { |
574 |
goto free_and_end;
|
575 |
} |
576 |
} |
577 |
|
578 |
ret=0;
|
579 |
end:
|
580 |
entangled_thread_counter--; |
581 |
|
582 |
/* Release any user-supplied mutex. */
|
583 |
if (ff_lockmgr_cb) {
|
584 |
(*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); |
585 |
} |
586 |
return ret;
|
587 |
free_and_end:
|
588 |
av_freep(&avctx->priv_data); |
589 |
avctx->codec= NULL;
|
590 |
goto end;
|
591 |
} |
592 |
|
593 |
int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, uint8_t *buf, int buf_size, |
594 |
const short *samples) |
595 |
{ |
596 |
if(buf_size < FF_MIN_BUFFER_SIZE && 0){ |
597 |
av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
|
598 |
return -1; |
599 |
} |
600 |
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || samples){
|
601 |
int ret = avctx->codec->encode(avctx, buf, buf_size, samples);
|
602 |
avctx->frame_number++; |
603 |
return ret;
|
604 |
}else
|
605 |
return 0; |
606 |
} |
607 |
|
608 |
int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, |
609 |
const AVFrame *pict)
|
610 |
{ |
611 |
if(buf_size < FF_MIN_BUFFER_SIZE){
|
612 |
av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n");
|
613 |
return -1; |
614 |
} |
615 |
if(av_image_check_size(avctx->width, avctx->height, 0, avctx)) |
616 |
return -1; |
617 |
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){
|
618 |
int ret = avctx->codec->encode(avctx, buf, buf_size, pict);
|
619 |
avctx->frame_number++; |
620 |
emms_c(); //needed to avoid an emms_c() call before every return;
|
621 |
|
622 |
return ret;
|
623 |
}else
|
624 |
return 0; |
625 |
} |
626 |
|
627 |
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, |
628 |
const AVSubtitle *sub)
|
629 |
{ |
630 |
int ret;
|
631 |
if(sub->start_display_time) {
|
632 |
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
|
633 |
return -1; |
634 |
} |
635 |
if(sub->num_rects == 0 || !sub->rects) |
636 |
return -1; |
637 |
ret = avctx->codec->encode(avctx, buf, buf_size, sub); |
638 |
avctx->frame_number++; |
639 |
return ret;
|
640 |
} |
641 |
|
642 |
/**
|
643 |
* Attempt to guess proper monotonic timestamps for decoded video frames
|
644 |
* which might have incorrect times. Input timestamps may wrap around, in
|
645 |
* which case the output will as well.
|
646 |
*
|
647 |
* @param pts the pts field of the decoded AVPacket, as passed through
|
648 |
* AVFrame.pkt_pts
|
649 |
* @param dts the dts field of the decoded AVPacket
|
650 |
* @return one of the input values, may be AV_NOPTS_VALUE
|
651 |
*/
|
652 |
static int64_t guess_correct_pts(AVCodecContext *ctx,
|
653 |
int64_t reordered_pts, int64_t dts) |
654 |
{ |
655 |
int64_t pts = AV_NOPTS_VALUE; |
656 |
|
657 |
if (dts != AV_NOPTS_VALUE) {
|
658 |
ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts; |
659 |
ctx->pts_correction_last_dts = dts; |
660 |
} |
661 |
if (reordered_pts != AV_NOPTS_VALUE) {
|
662 |
ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts; |
663 |
ctx->pts_correction_last_pts = reordered_pts; |
664 |
} |
665 |
if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
|
666 |
&& reordered_pts != AV_NOPTS_VALUE) |
667 |
pts = reordered_pts; |
668 |
else
|
669 |
pts = dts; |
670 |
|
671 |
return pts;
|
672 |
} |
673 |
|
674 |
|
675 |
#if FF_API_VIDEO_OLD
|
676 |
int attribute_align_arg avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
|
677 |
int *got_picture_ptr,
|
678 |
const uint8_t *buf, int buf_size) |
679 |
{ |
680 |
AVPacket avpkt; |
681 |
av_init_packet(&avpkt); |
682 |
avpkt.data = buf; |
683 |
avpkt.size = buf_size; |
684 |
// HACK for CorePNG to decode as normal PNG by default
|
685 |
avpkt.flags = AV_PKT_FLAG_KEY; |
686 |
|
687 |
return avcodec_decode_video2(avctx, picture, got_picture_ptr, &avpkt);
|
688 |
} |
689 |
#endif
|
690 |
|
691 |
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
|
692 |
int *got_picture_ptr,
|
693 |
AVPacket *avpkt) |
694 |
{ |
695 |
int ret;
|
696 |
|
697 |
*got_picture_ptr= 0;
|
698 |
if((avctx->coded_width||avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) |
699 |
return -1; |
700 |
|
701 |
avctx->pkt = avpkt; |
702 |
|
703 |
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type&FF_THREAD_FRAME)){
|
704 |
if (HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
|
705 |
ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, |
706 |
avpkt); |
707 |
else {
|
708 |
ret = avctx->codec->decode(avctx, picture, got_picture_ptr, |
709 |
avpkt); |
710 |
picture->pkt_dts= avpkt->dts; |
711 |
} |
712 |
|
713 |
emms_c(); //needed to avoid an emms_c() call before every return;
|
714 |
|
715 |
|
716 |
if (*got_picture_ptr){
|
717 |
avctx->frame_number++; |
718 |
picture->best_effort_timestamp = guess_correct_pts(avctx, |
719 |
picture->pkt_pts, |
720 |
picture->pkt_dts); |
721 |
} |
722 |
}else
|
723 |
ret= 0;
|
724 |
|
725 |
return ret;
|
726 |
} |
727 |
|
728 |
#if FF_API_AUDIO_OLD
|
729 |
int attribute_align_arg avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
|
730 |
int *frame_size_ptr,
|
731 |
const uint8_t *buf, int buf_size) |
732 |
{ |
733 |
AVPacket avpkt; |
734 |
av_init_packet(&avpkt); |
735 |
avpkt.data = buf; |
736 |
avpkt.size = buf_size; |
737 |
|
738 |
return avcodec_decode_audio3(avctx, samples, frame_size_ptr, &avpkt);
|
739 |
} |
740 |
#endif
|
741 |
|
742 |
int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
|
743 |
int *frame_size_ptr,
|
744 |
AVPacket *avpkt) |
745 |
{ |
746 |
int ret;
|
747 |
|
748 |
avctx->pkt = avpkt; |
749 |
|
750 |
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size){
|
751 |
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
|
752 |
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
|
753 |
av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
|
754 |
return -1; |
755 |
} |
756 |
if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
|
757 |
*frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t)){
|
758 |
av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
|
759 |
return -1; |
760 |
} |
761 |
|
762 |
ret = avctx->codec->decode(avctx, samples, frame_size_ptr, avpkt); |
763 |
avctx->frame_number++; |
764 |
}else{
|
765 |
ret= 0;
|
766 |
*frame_size_ptr=0;
|
767 |
} |
768 |
return ret;
|
769 |
} |
770 |
|
771 |
#if FF_API_SUBTITLE_OLD
|
772 |
int avcodec_decode_subtitle(AVCodecContext *avctx, AVSubtitle *sub,
|
773 |
int *got_sub_ptr,
|
774 |
const uint8_t *buf, int buf_size) |
775 |
{ |
776 |
AVPacket avpkt; |
777 |
av_init_packet(&avpkt); |
778 |
avpkt.data = buf; |
779 |
avpkt.size = buf_size; |
780 |
|
781 |
return avcodec_decode_subtitle2(avctx, sub, got_sub_ptr, &avpkt);
|
782 |
} |
783 |
#endif
|
784 |
|
785 |
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
|
786 |
int *got_sub_ptr,
|
787 |
AVPacket *avpkt) |
788 |
{ |
789 |
int ret;
|
790 |
|
791 |
avctx->pkt = avpkt; |
792 |
*got_sub_ptr = 0;
|
793 |
ret = avctx->codec->decode(avctx, sub, got_sub_ptr, avpkt); |
794 |
if (*got_sub_ptr)
|
795 |
avctx->frame_number++; |
796 |
return ret;
|
797 |
} |
798 |
|
799 |
void avsubtitle_free(AVSubtitle *sub)
|
800 |
{ |
801 |
int i;
|
802 |
|
803 |
for (i = 0; i < sub->num_rects; i++) |
804 |
{ |
805 |
av_freep(&sub->rects[i]->pict.data[0]);
|
806 |
av_freep(&sub->rects[i]->pict.data[1]);
|
807 |
av_freep(&sub->rects[i]->pict.data[2]);
|
808 |
av_freep(&sub->rects[i]->pict.data[3]);
|
809 |
av_freep(&sub->rects[i]->text); |
810 |
av_freep(&sub->rects[i]->ass); |
811 |
av_freep(&sub->rects[i]); |
812 |
} |
813 |
|
814 |
av_freep(&sub->rects); |
815 |
|
816 |
memset(sub, 0, sizeof(AVSubtitle)); |
817 |
} |
818 |
|
819 |
av_cold int avcodec_close(AVCodecContext *avctx)
|
820 |
{ |
821 |
/* If there is a user-supplied mutex locking routine, call it. */
|
822 |
if (ff_lockmgr_cb) {
|
823 |
if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN))
|
824 |
return -1; |
825 |
} |
826 |
|
827 |
entangled_thread_counter++; |
828 |
if(entangled_thread_counter != 1){ |
829 |
av_log(avctx, AV_LOG_ERROR, "insufficient thread locking around avcodec_open/close()\n");
|
830 |
entangled_thread_counter--; |
831 |
return -1; |
832 |
} |
833 |
|
834 |
if (HAVE_THREADS && avctx->thread_opaque)
|
835 |
ff_thread_free(avctx); |
836 |
if (avctx->codec && avctx->codec->close)
|
837 |
avctx->codec->close(avctx); |
838 |
avcodec_default_free_buffers(avctx); |
839 |
avctx->coded_frame = NULL;
|
840 |
av_freep(&avctx->priv_data); |
841 |
if(avctx->codec && avctx->codec->encode)
|
842 |
av_freep(&avctx->extradata); |
843 |
avctx->codec = NULL;
|
844 |
avctx->active_thread_type = 0;
|
845 |
entangled_thread_counter--; |
846 |
|
847 |
/* Release any user-supplied mutex. */
|
848 |
if (ff_lockmgr_cb) {
|
849 |
(*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE); |
850 |
} |
851 |
return 0; |
852 |
} |
853 |
|
854 |
AVCodec *avcodec_find_encoder(enum CodecID id)
|
855 |
{ |
856 |
AVCodec *p, *experimental=NULL;
|
857 |
p = first_avcodec; |
858 |
while (p) {
|
859 |
if (p->encode != NULL && p->id == id) { |
860 |
if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) {
|
861 |
experimental = p; |
862 |
} else
|
863 |
return p;
|
864 |
} |
865 |
p = p->next; |
866 |
} |
867 |
return experimental;
|
868 |
} |
869 |
|
870 |
AVCodec *avcodec_find_encoder_by_name(const char *name) |
871 |
{ |
872 |
AVCodec *p; |
873 |
if (!name)
|
874 |
return NULL; |
875 |
p = first_avcodec; |
876 |
while (p) {
|
877 |
if (p->encode != NULL && strcmp(name,p->name) == 0) |
878 |
return p;
|
879 |
p = p->next; |
880 |
} |
881 |
return NULL; |
882 |
} |
883 |
|
884 |
AVCodec *avcodec_find_decoder(enum CodecID id)
|
885 |
{ |
886 |
AVCodec *p; |
887 |
p = first_avcodec; |
888 |
while (p) {
|
889 |
if (p->decode != NULL && p->id == id) |
890 |
return p;
|
891 |
p = p->next; |
892 |
} |
893 |
return NULL; |
894 |
} |
895 |
|
896 |
AVCodec *avcodec_find_decoder_by_name(const char *name) |
897 |
{ |
898 |
AVCodec *p; |
899 |
if (!name)
|
900 |
return NULL; |
901 |
p = first_avcodec; |
902 |
while (p) {
|
903 |
if (p->decode != NULL && strcmp(name,p->name) == 0) |
904 |
return p;
|
905 |
p = p->next; |
906 |
} |
907 |
return NULL; |
908 |
} |
909 |
|
910 |
static int get_bit_rate(AVCodecContext *ctx) |
911 |
{ |
912 |
int bit_rate;
|
913 |
int bits_per_sample;
|
914 |
|
915 |
switch(ctx->codec_type) {
|
916 |
case AVMEDIA_TYPE_VIDEO:
|
917 |
case AVMEDIA_TYPE_DATA:
|
918 |
case AVMEDIA_TYPE_SUBTITLE:
|
919 |
case AVMEDIA_TYPE_ATTACHMENT:
|
920 |
bit_rate = ctx->bit_rate; |
921 |
break;
|
922 |
case AVMEDIA_TYPE_AUDIO:
|
923 |
bits_per_sample = av_get_bits_per_sample(ctx->codec_id); |
924 |
bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate; |
925 |
break;
|
926 |
default:
|
927 |
bit_rate = 0;
|
928 |
break;
|
929 |
} |
930 |
return bit_rate;
|
931 |
} |
932 |
|
933 |
size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag) |
934 |
{ |
935 |
int i, len, ret = 0; |
936 |
|
937 |
for (i = 0; i < 4; i++) { |
938 |
len = snprintf(buf, buf_size, |
939 |
isprint(codec_tag&0xFF) ? "%c" : "[%d]", codec_tag&0xFF); |
940 |
buf += len; |
941 |
buf_size = buf_size > len ? buf_size - len : 0;
|
942 |
ret += len; |
943 |
codec_tag>>=8;
|
944 |
} |
945 |
return ret;
|
946 |
} |
947 |
|
948 |
void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) |
949 |
{ |
950 |
const char *codec_name; |
951 |
const char *profile = NULL; |
952 |
AVCodec *p; |
953 |
char buf1[32]; |
954 |
int bitrate;
|
955 |
AVRational display_aspect_ratio; |
956 |
|
957 |
if (encode)
|
958 |
p = avcodec_find_encoder(enc->codec_id); |
959 |
else
|
960 |
p = avcodec_find_decoder(enc->codec_id); |
961 |
|
962 |
if (p) {
|
963 |
codec_name = p->name; |
964 |
profile = av_get_profile_name(p, enc->profile); |
965 |
} else if (enc->codec_id == CODEC_ID_MPEG2TS) { |
966 |
/* fake mpeg2 transport stream codec (currently not
|
967 |
registered) */
|
968 |
codec_name = "mpeg2ts";
|
969 |
} else if (enc->codec_name[0] != '\0') { |
970 |
codec_name = enc->codec_name; |
971 |
} else {
|
972 |
/* output avi tags */
|
973 |
char tag_buf[32]; |
974 |
av_get_codec_tag_string(tag_buf, sizeof(tag_buf), enc->codec_tag);
|
975 |
snprintf(buf1, sizeof(buf1), "%s / 0x%04X", tag_buf, enc->codec_tag); |
976 |
codec_name = buf1; |
977 |
} |
978 |
|
979 |
switch(enc->codec_type) {
|
980 |
case AVMEDIA_TYPE_VIDEO:
|
981 |
snprintf(buf, buf_size, |
982 |
"Video: %s%s",
|
983 |
codec_name, enc->mb_decision ? " (hq)" : ""); |
984 |
if (profile)
|
985 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
986 |
" (%s)", profile);
|
987 |
if (enc->pix_fmt != PIX_FMT_NONE) {
|
988 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
989 |
", %s",
|
990 |
avcodec_get_pix_fmt_name(enc->pix_fmt)); |
991 |
} |
992 |
if (enc->width) {
|
993 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
994 |
", %dx%d",
|
995 |
enc->width, enc->height); |
996 |
if (enc->sample_aspect_ratio.num) {
|
997 |
av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, |
998 |
enc->width*enc->sample_aspect_ratio.num, |
999 |
enc->height*enc->sample_aspect_ratio.den, |
1000 |
1024*1024); |
1001 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1002 |
" [PAR %d:%d DAR %d:%d]",
|
1003 |
enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, |
1004 |
display_aspect_ratio.num, display_aspect_ratio.den); |
1005 |
} |
1006 |
if(av_log_get_level() >= AV_LOG_DEBUG){
|
1007 |
int g= av_gcd(enc->time_base.num, enc->time_base.den);
|
1008 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1009 |
", %d/%d",
|
1010 |
enc->time_base.num/g, enc->time_base.den/g); |
1011 |
} |
1012 |
} |
1013 |
if (encode) {
|
1014 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1015 |
", q=%d-%d", enc->qmin, enc->qmax);
|
1016 |
} |
1017 |
break;
|
1018 |
case AVMEDIA_TYPE_AUDIO:
|
1019 |
snprintf(buf, buf_size, |
1020 |
"Audio: %s",
|
1021 |
codec_name); |
1022 |
if (profile)
|
1023 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1024 |
" (%s)", profile);
|
1025 |
if (enc->sample_rate) {
|
1026 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1027 |
", %d Hz", enc->sample_rate);
|
1028 |
} |
1029 |
av_strlcat(buf, ", ", buf_size);
|
1030 |
av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); |
1031 |
if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) {
|
1032 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1033 |
", %s", av_get_sample_fmt_name(enc->sample_fmt));
|
1034 |
} |
1035 |
break;
|
1036 |
case AVMEDIA_TYPE_DATA:
|
1037 |
snprintf(buf, buf_size, "Data: %s", codec_name);
|
1038 |
break;
|
1039 |
case AVMEDIA_TYPE_SUBTITLE:
|
1040 |
snprintf(buf, buf_size, "Subtitle: %s", codec_name);
|
1041 |
break;
|
1042 |
case AVMEDIA_TYPE_ATTACHMENT:
|
1043 |
snprintf(buf, buf_size, "Attachment: %s", codec_name);
|
1044 |
break;
|
1045 |
default:
|
1046 |
snprintf(buf, buf_size, "Invalid Codec type %d", enc->codec_type);
|
1047 |
return;
|
1048 |
} |
1049 |
if (encode) {
|
1050 |
if (enc->flags & CODEC_FLAG_PASS1)
|
1051 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1052 |
", pass 1");
|
1053 |
if (enc->flags & CODEC_FLAG_PASS2)
|
1054 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1055 |
", pass 2");
|
1056 |
} |
1057 |
bitrate = get_bit_rate(enc); |
1058 |
if (bitrate != 0) { |
1059 |
snprintf(buf + strlen(buf), buf_size - strlen(buf), |
1060 |
", %d kb/s", bitrate / 1000); |
1061 |
} |
1062 |
} |
1063 |
|
1064 |
const char *av_get_profile_name(const AVCodec *codec, int profile) |
1065 |
{ |
1066 |
const AVProfile *p;
|
1067 |
if (profile == FF_PROFILE_UNKNOWN || !codec->profiles)
|
1068 |
return NULL; |
1069 |
|
1070 |
for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++)
|
1071 |
if (p->profile == profile)
|
1072 |
return p->name;
|
1073 |
|
1074 |
return NULL; |
1075 |
} |
1076 |
|
1077 |
unsigned avcodec_version( void ) |
1078 |
{ |
1079 |
return LIBAVCODEC_VERSION_INT;
|
1080 |
} |
1081 |
|
1082 |
const char *avcodec_configuration(void) |
1083 |
{ |
1084 |
return FFMPEG_CONFIGURATION;
|
1085 |
} |
1086 |
|
1087 |
const char *avcodec_license(void) |
1088 |
{ |
1089 |
#define LICENSE_PREFIX "libavcodec license: " |
1090 |
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; |
1091 |
} |
1092 |
|
1093 |
void avcodec_init(void) |
1094 |
{ |
1095 |
static int initialized = 0; |
1096 |
|
1097 |
if (initialized != 0) |
1098 |
return;
|
1099 |
initialized = 1;
|
1100 |
|
1101 |
dsputil_static_init(); |
1102 |
} |
1103 |
|
1104 |
void avcodec_flush_buffers(AVCodecContext *avctx)
|
1105 |
{ |
1106 |
if(HAVE_PTHREADS && avctx->active_thread_type&FF_THREAD_FRAME)
|
1107 |
ff_thread_flush(avctx); |
1108 |
if(avctx->codec->flush)
|
1109 |
avctx->codec->flush(avctx); |
1110 |
} |
1111 |
|
1112 |
void avcodec_default_free_buffers(AVCodecContext *s){
|
1113 |
int i, j;
|
1114 |
|
1115 |
if(s->internal_buffer==NULL) return; |
1116 |
|
1117 |
if (s->internal_buffer_count)
|
1118 |
av_log(s, AV_LOG_WARNING, "Found %i unreleased buffers!\n", s->internal_buffer_count);
|
1119 |
for(i=0; i<INTERNAL_BUFFER_SIZE; i++){ |
1120 |
InternalBuffer *buf= &((InternalBuffer*)s->internal_buffer)[i]; |
1121 |
for(j=0; j<4; j++){ |
1122 |
av_freep(&buf->base[j]); |
1123 |
buf->data[j]= NULL;
|
1124 |
} |
1125 |
} |
1126 |
av_freep(&s->internal_buffer); |
1127 |
|
1128 |
s->internal_buffer_count=0;
|
1129 |
} |
1130 |
|
1131 |
char av_get_pict_type_char(int pict_type){ |
1132 |
switch(pict_type){
|
1133 |
case FF_I_TYPE: return 'I'; |
1134 |
case FF_P_TYPE: return 'P'; |
1135 |
case FF_B_TYPE: return 'B'; |
1136 |
case FF_S_TYPE: return 'S'; |
1137 |
case FF_SI_TYPE:return 'i'; |
1138 |
case FF_SP_TYPE:return 'p'; |
1139 |
case FF_BI_TYPE:return 'b'; |
1140 |
default: return '?'; |
1141 |
} |
1142 |
} |
1143 |
|
1144 |
int av_get_bits_per_sample(enum CodecID codec_id){ |
1145 |
switch(codec_id){
|
1146 |
case CODEC_ID_ADPCM_SBPRO_2:
|
1147 |
return 2; |
1148 |
case CODEC_ID_ADPCM_SBPRO_3:
|
1149 |
return 3; |
1150 |
case CODEC_ID_ADPCM_SBPRO_4:
|
1151 |
case CODEC_ID_ADPCM_CT:
|
1152 |
case CODEC_ID_ADPCM_IMA_WAV:
|
1153 |
case CODEC_ID_ADPCM_MS:
|
1154 |
case CODEC_ID_ADPCM_YAMAHA:
|
1155 |
return 4; |
1156 |
case CODEC_ID_PCM_ALAW:
|
1157 |
case CODEC_ID_PCM_MULAW:
|
1158 |
case CODEC_ID_PCM_S8:
|
1159 |
case CODEC_ID_PCM_U8:
|
1160 |
case CODEC_ID_PCM_ZORK:
|
1161 |
return 8; |
1162 |
case CODEC_ID_PCM_S16BE:
|
1163 |
case CODEC_ID_PCM_S16LE:
|
1164 |
case CODEC_ID_PCM_S16LE_PLANAR:
|
1165 |
case CODEC_ID_PCM_U16BE:
|
1166 |
case CODEC_ID_PCM_U16LE:
|
1167 |
return 16; |
1168 |
case CODEC_ID_PCM_S24DAUD:
|
1169 |
case CODEC_ID_PCM_S24BE:
|
1170 |
case CODEC_ID_PCM_S24LE:
|
1171 |
case CODEC_ID_PCM_U24BE:
|
1172 |
case CODEC_ID_PCM_U24LE:
|
1173 |
return 24; |
1174 |
case CODEC_ID_PCM_S32BE:
|
1175 |
case CODEC_ID_PCM_S32LE:
|
1176 |
case CODEC_ID_PCM_U32BE:
|
1177 |
case CODEC_ID_PCM_U32LE:
|
1178 |
case CODEC_ID_PCM_F32BE:
|
1179 |
case CODEC_ID_PCM_F32LE:
|
1180 |
return 32; |
1181 |
case CODEC_ID_PCM_F64BE:
|
1182 |
case CODEC_ID_PCM_F64LE:
|
1183 |
return 64; |
1184 |
default:
|
1185 |
return 0; |
1186 |
} |
1187 |
} |
1188 |
|
1189 |
#if FF_API_OLD_SAMPLE_FMT
|
1190 |
int av_get_bits_per_sample_format(enum AVSampleFormat sample_fmt) { |
1191 |
return av_get_bits_per_sample_fmt(sample_fmt);
|
1192 |
} |
1193 |
#endif
|
1194 |
|
1195 |
#if !HAVE_THREADS
|
1196 |
int ff_thread_init(AVCodecContext *s, int thread_count){ |
1197 |
s->thread_count = thread_count; |
1198 |
return -1; |
1199 |
} |
1200 |
#endif
|
1201 |
|
1202 |
unsigned int av_xiphlacing(unsigned char *s, unsigned int v) |
1203 |
{ |
1204 |
unsigned int n = 0; |
1205 |
|
1206 |
while(v >= 0xff) { |
1207 |
*s++ = 0xff;
|
1208 |
v -= 0xff;
|
1209 |
n++; |
1210 |
} |
1211 |
*s = v; |
1212 |
n++; |
1213 |
return n;
|
1214 |
} |
1215 |
|
1216 |
#if LIBAVCODEC_VERSION_MAJOR < 53 |
1217 |
#include "libavutil/parseutils.h" |
1218 |
|
1219 |
int av_parse_video_frame_size(int *width_ptr, int *height_ptr, const char *str) |
1220 |
{ |
1221 |
return av_parse_video_size(width_ptr, height_ptr, str);
|
1222 |
} |
1223 |
|
1224 |
int av_parse_video_frame_rate(AVRational *frame_rate, const char *arg) |
1225 |
{ |
1226 |
return av_parse_video_rate(frame_rate, arg);
|
1227 |
} |
1228 |
#endif
|
1229 |
|
1230 |
int ff_match_2uint16(const uint16_t (*tab)[2], int size, int a, int b){ |
1231 |
int i;
|
1232 |
for(i=0; i<size && !(tab[i][0]==a && tab[i][1]==b); i++); |
1233 |
return i;
|
1234 |
} |
1235 |
|
1236 |
void av_log_missing_feature(void *avc, const char *feature, int want_sample) |
1237 |
{ |
1238 |
av_log(avc, AV_LOG_WARNING, "%s not implemented. Update your FFmpeg "
|
1239 |
"version to the newest one from Git. If the problem still "
|
1240 |
"occurs, it means that your file has a feature which has not "
|
1241 |
"been implemented.", feature);
|
1242 |
if(want_sample)
|
1243 |
av_log_ask_for_sample(avc, NULL);
|
1244 |
else
|
1245 |
av_log(avc, AV_LOG_WARNING, "\n");
|
1246 |
} |
1247 |
|
1248 |
void av_log_ask_for_sample(void *avc, const char *msg) |
1249 |
{ |
1250 |
if (msg)
|
1251 |
av_log(avc, AV_LOG_WARNING, "%s ", msg);
|
1252 |
av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample "
|
1253 |
"of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ "
|
1254 |
"and contact the ffmpeg-devel mailing list.\n");
|
1255 |
} |
1256 |
|
1257 |
static AVHWAccel *first_hwaccel = NULL; |
1258 |
|
1259 |
void av_register_hwaccel(AVHWAccel *hwaccel)
|
1260 |
{ |
1261 |
AVHWAccel **p = &first_hwaccel; |
1262 |
while (*p)
|
1263 |
p = &(*p)->next; |
1264 |
*p = hwaccel; |
1265 |
hwaccel->next = NULL;
|
1266 |
} |
1267 |
|
1268 |
AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel) |
1269 |
{ |
1270 |
return hwaccel ? hwaccel->next : first_hwaccel;
|
1271 |
} |
1272 |
|
1273 |
AVHWAccel *ff_find_hwaccel(enum CodecID codec_id, enum PixelFormat pix_fmt) |
1274 |
{ |
1275 |
AVHWAccel *hwaccel=NULL;
|
1276 |
|
1277 |
while((hwaccel= av_hwaccel_next(hwaccel))){
|
1278 |
if ( hwaccel->id == codec_id
|
1279 |
&& hwaccel->pix_fmt == pix_fmt) |
1280 |
return hwaccel;
|
1281 |
} |
1282 |
return NULL; |
1283 |
} |
1284 |
|
1285 |
int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) |
1286 |
{ |
1287 |
if (ff_lockmgr_cb) {
|
1288 |
if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY))
|
1289 |
return -1; |
1290 |
} |
1291 |
|
1292 |
ff_lockmgr_cb = cb; |
1293 |
|
1294 |
if (ff_lockmgr_cb) {
|
1295 |
if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE))
|
1296 |
return -1; |
1297 |
} |
1298 |
return 0; |
1299 |
} |
1300 |
|
1301 |
unsigned int ff_toupper4(unsigned int x) |
1302 |
{ |
1303 |
return toupper( x &0xFF) |
1304 |
+ (toupper((x>>8 )&0xFF)<<8 ) |
1305 |
+ (toupper((x>>16)&0xFF)<<16) |
1306 |
+ (toupper((x>>24)&0xFF)<<24); |
1307 |
} |
1308 |
|
1309 |
#if !HAVE_PTHREADS
|
1310 |
|
1311 |
int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
|
1312 |
{ |
1313 |
f->owner = avctx; |
1314 |
return avctx->get_buffer(avctx, f);
|
1315 |
} |
1316 |
|
1317 |
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
|
1318 |
{ |
1319 |
f->owner->release_buffer(f->owner, f); |
1320 |
} |
1321 |
|
1322 |
void ff_thread_finish_setup(AVCodecContext *avctx)
|
1323 |
{ |
1324 |
} |
1325 |
|
1326 |
void ff_thread_report_progress(AVFrame *f, int progress, int field) |
1327 |
{ |
1328 |
} |
1329 |
|
1330 |
void ff_thread_await_progress(AVFrame *f, int progress, int field) |
1331 |
{ |
1332 |
} |
1333 |
|
1334 |
#endif
|
1335 |
|
1336 |
#if LIBAVCODEC_VERSION_MAJOR < 53 |
1337 |
|
1338 |
int avcodec_thread_init(AVCodecContext *s, int thread_count) |
1339 |
{ |
1340 |
return ff_thread_init(s, thread_count);
|
1341 |
} |
1342 |
|
1343 |
void avcodec_thread_free(AVCodecContext *s)
|
1344 |
{ |
1345 |
#if HAVE_THREADS
|
1346 |
ff_thread_free(s); |
1347 |
#endif
|
1348 |
} |
1349 |
|
1350 |
#endif
|