Statistics
| Branch: | Revision:

ffmpeg / output_example.c @ 3a74415d

History | View | Annotate | Download (15.9 KB)

1
/*
2
 * Libavformat API example: Output a media file in any supported
3
 * libavformat format. The default codecs are used.
4
 *
5
 * Copyright (c) 2003 Fabrice Bellard
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a copy
8
 * of this software and associated documentation files (the "Software"), to deal
9
 * in the Software without restriction, including without limitation the rights
10
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
 * copies of the Software, and to permit persons to whom the Software is
12
 * furnished to do so, subject to the following conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24
 */
25
#include <stdlib.h>
26
#include <stdio.h>
27
#include <string.h>
28
#include <math.h>
29

    
30
#ifndef M_PI
31
#define M_PI 3.14159265358979323846
32
#endif
33

    
34
#include "avformat.h"
35
#include "swscale.h"
36

    
37
/* 5 seconds stream duration */
38
#define STREAM_DURATION   5.0
39
#define STREAM_FRAME_RATE 25 /* 25 images/s */
40
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
41
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
42

    
43
static int sws_flags = SWS_BICUBIC;
44

    
45
/**************************************************************/
46
/* audio output */
47

    
48
float t, tincr, tincr2;
49
int16_t *samples;
50
uint8_t *audio_outbuf;
51
int audio_outbuf_size;
52
int audio_input_frame_size;
53

    
54
/*
55
 * add an audio output stream
56
 */
57
static AVStream *add_audio_stream(AVFormatContext *oc, int codec_id)
58
{
59
    AVCodecContext *c;
60
    AVStream *st;
61

    
62
    st = av_new_stream(oc, 1);
63
    if (!st) {
64
        fprintf(stderr, "Could not alloc stream\n");
65
        exit(1);
66
    }
67

    
68
    c = st->codec;
69
    c->codec_id = codec_id;
70
    c->codec_type = CODEC_TYPE_AUDIO;
71

    
72
    /* put sample parameters */
73
    c->bit_rate = 64000;
74
    c->sample_rate = 44100;
75
    c->channels = 2;
76
    return st;
77
}
78

    
79
static void open_audio(AVFormatContext *oc, AVStream *st)
80
{
81
    AVCodecContext *c;
82
    AVCodec *codec;
83

    
84
    c = st->codec;
85

    
86
    /* find the audio encoder */
87
    codec = avcodec_find_encoder(c->codec_id);
88
    if (!codec) {
89
        fprintf(stderr, "codec not found\n");
90
        exit(1);
91
    }
92

    
93
    /* open it */
94
    if (avcodec_open(c, codec) < 0) {
95
        fprintf(stderr, "could not open codec\n");
96
        exit(1);
97
    }
98

    
99
    /* init signal generator */
100
    t = 0;
101
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
102
    /* increment frequency by 110 Hz per second */
103
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
104

    
105
    audio_outbuf_size = 10000;
106
    audio_outbuf = av_malloc(audio_outbuf_size);
107

    
108
    /* ugly hack for PCM codecs (will be removed ASAP with new PCM
109
       support to compute the input frame size in samples */
110
    if (c->frame_size <= 1) {
111
        audio_input_frame_size = audio_outbuf_size / c->channels;
112
        switch(st->codec->codec_id) {
113
        case CODEC_ID_PCM_S16LE:
114
        case CODEC_ID_PCM_S16BE:
115
        case CODEC_ID_PCM_U16LE:
116
        case CODEC_ID_PCM_U16BE:
117
            audio_input_frame_size >>= 1;
118
            break;
119
        default:
120
            break;
121
        }
122
    } else {
123
        audio_input_frame_size = c->frame_size;
124
    }
125
    samples = av_malloc(audio_input_frame_size * 2 * c->channels);
126
}
127

    
128
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
129
   'nb_channels' channels */
130
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
131
{
132
    int j, i, v;
133
    int16_t *q;
134

    
135
    q = samples;
136
    for(j=0;j<frame_size;j++) {
137
        v = (int)(sin(t) * 10000);
138
        for(i = 0; i < nb_channels; i++)
139
            *q++ = v;
140
        t += tincr;
141
        tincr += tincr2;
142
    }
143
}
144

    
145
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
146
{
147
    AVCodecContext *c;
148
    AVPacket pkt;
149
    av_init_packet(&pkt);
150

    
151
    c = st->codec;
152

    
153
    get_audio_frame(samples, audio_input_frame_size, c->channels);
154

    
155
    pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
156

    
157
    pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
158
    pkt.flags |= PKT_FLAG_KEY;
159
    pkt.stream_index= st->index;
160
    pkt.data= audio_outbuf;
161

    
162
    /* write the compressed frame in the media file */
163
    if (av_write_frame(oc, &pkt) != 0) {
164
        fprintf(stderr, "Error while writing audio frame\n");
165
        exit(1);
166
    }
167
}
168

    
169
static void close_audio(AVFormatContext *oc, AVStream *st)
170
{
171
    avcodec_close(st->codec);
172

    
173
    av_free(samples);
174
    av_free(audio_outbuf);
175
}
176

    
177
/**************************************************************/
178
/* video output */
179

    
180
AVFrame *picture, *tmp_picture;
181
uint8_t *video_outbuf;
182
int frame_count, video_outbuf_size;
183

    
184
/* add a video output stream */
185
static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
186
{
187
    AVCodecContext *c;
188
    AVStream *st;
189

    
190
    st = av_new_stream(oc, 0);
191
    if (!st) {
192
        fprintf(stderr, "Could not alloc stream\n");
193
        exit(1);
194
    }
195

    
196
    c = st->codec;
197
    c->codec_id = codec_id;
198
    c->codec_type = CODEC_TYPE_VIDEO;
199

    
200
    /* put sample parameters */
201
    c->bit_rate = 400000;
202
    /* resolution must be a multiple of two */
203
    c->width = 352;
204
    c->height = 288;
205
    /* time base: this is the fundamental unit of time (in seconds) in terms
206
       of which frame timestamps are represented. for fixed-fps content,
207
       timebase should be 1/framerate and timestamp increments should be
208
       identically 1. */
209
    c->time_base.den = STREAM_FRAME_RATE;
210
    c->time_base.num = 1;
211
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
212
    c->pix_fmt = STREAM_PIX_FMT;
213
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
214
        /* just for testing, we also add B frames */
215
        c->max_b_frames = 2;
216
    }
217
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
218
        /* needed to avoid using macroblocks in which some coeffs overflow
219
           this doesnt happen with normal video, it just happens here as the
220
           motion of the chroma plane doesnt match the luma plane */
221
        c->mb_decision=2;
222
    }
223
    // some formats want stream headers to be separate
224
    if(!strcmp(oc->oformat->name, "mp4") || !strcmp(oc->oformat->name, "mov") || !strcmp(oc->oformat->name, "3gp"))
225
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
226

    
227
    return st;
228
}
229

    
230
static AVFrame *alloc_picture(int pix_fmt, int width, int height)
231
{
232
    AVFrame *picture;
233
    uint8_t *picture_buf;
234
    int size;
235

    
236
    picture = avcodec_alloc_frame();
237
    if (!picture)
238
        return NULL;
239
    size = avpicture_get_size(pix_fmt, width, height);
240
    picture_buf = av_malloc(size);
241
    if (!picture_buf) {
242
        av_free(picture);
243
        return NULL;
244
    }
245
    avpicture_fill((AVPicture *)picture, picture_buf,
246
                   pix_fmt, width, height);
247
    return picture;
248
}
249

    
250
static void open_video(AVFormatContext *oc, AVStream *st)
251
{
252
    AVCodec *codec;
253
    AVCodecContext *c;
254

    
255
    c = st->codec;
256

    
257
    /* find the video encoder */
258
    codec = avcodec_find_encoder(c->codec_id);
259
    if (!codec) {
260
        fprintf(stderr, "codec not found\n");
261
        exit(1);
262
    }
263

    
264
    /* open the codec */
265
    if (avcodec_open(c, codec) < 0) {
266
        fprintf(stderr, "could not open codec\n");
267
        exit(1);
268
    }
269

    
270
    video_outbuf = NULL;
271
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
272
        /* allocate output buffer */
273
        /* XXX: API change will be done */
274
        /* buffers passed into lav* can be allocated any way you prefer,
275
           as long as they're aligned enough for the architecture, and
276
           they're freed appropriately (such as using av_free for buffers
277
           allocated with av_malloc) */
278
        video_outbuf_size = 200000;
279
        video_outbuf = av_malloc(video_outbuf_size);
280
    }
281

    
282
    /* allocate the encoded raw picture */
283
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
284
    if (!picture) {
285
        fprintf(stderr, "Could not allocate picture\n");
286
        exit(1);
287
    }
288

    
289
    /* if the output format is not YUV420P, then a temporary YUV420P
290
       picture is needed too. It is then converted to the required
291
       output format */
292
    tmp_picture = NULL;
293
    if (c->pix_fmt != PIX_FMT_YUV420P) {
294
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
295
        if (!tmp_picture) {
296
            fprintf(stderr, "Could not allocate temporary picture\n");
297
            exit(1);
298
        }
299
    }
300
}
301

    
302
/* prepare a dummy image */
303
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
304
{
305
    int x, y, i;
306

    
307
    i = frame_index;
308

    
309
    /* Y */
310
    for(y=0;y<height;y++) {
311
        for(x=0;x<width;x++) {
312
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
313
        }
314
    }
315

    
316
    /* Cb and Cr */
317
    for(y=0;y<height/2;y++) {
318
        for(x=0;x<width/2;x++) {
319
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
320
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
321
        }
322
    }
323
}
324

    
325
static void write_video_frame(AVFormatContext *oc, AVStream *st)
326
{
327
    int out_size, ret;
328
    AVCodecContext *c;
329
    static struct SwsContext *img_convert_ctx;
330

    
331
    c = st->codec;
332

    
333
    if (frame_count >= STREAM_NB_FRAMES) {
334
        /* no more frame to compress. The codec has a latency of a few
335
           frames if using B frames, so we get the last frames by
336
           passing the same picture again */
337
    } else {
338
        if (c->pix_fmt != PIX_FMT_YUV420P) {
339
            /* as we only generate a YUV420P picture, we must convert it
340
               to the codec pixel format if needed */
341
            if (img_convert_ctx == NULL) {
342
                img_convert_ctx = sws_getContext(c->width, c->height,
343
                                                 PIX_FMT_YUV420P,
344
                                                 c->width, c->height,
345
                                                 c->pix_fmt,
346
                                                 sws_flags, NULL, NULL, NULL);
347
                if (img_convert_ctx == NULL) {
348
                    fprintf(stderr, "Cannot initialize the conversion context\n");
349
                    exit(1);
350
                }
351
            }
352
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
353
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
354
                      0, c->height, picture->data, picture->linesize);
355
        } else {
356
            fill_yuv_image(picture, frame_count, c->width, c->height);
357
        }
358
    }
359

    
360

    
361
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
362
        /* raw video case. The API will change slightly in the near
363
           futur for that */
364
        AVPacket pkt;
365
        av_init_packet(&pkt);
366

    
367
        pkt.flags |= PKT_FLAG_KEY;
368
        pkt.stream_index= st->index;
369
        pkt.data= (uint8_t *)picture;
370
        pkt.size= sizeof(AVPicture);
371

    
372
        ret = av_write_frame(oc, &pkt);
373
    } else {
374
        /* encode the image */
375
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
376
        /* if zero size, it means the image was buffered */
377
        if (out_size > 0) {
378
            AVPacket pkt;
379
            av_init_packet(&pkt);
380

    
381
            pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
382
            if(c->coded_frame->key_frame)
383
                pkt.flags |= PKT_FLAG_KEY;
384
            pkt.stream_index= st->index;
385
            pkt.data= video_outbuf;
386
            pkt.size= out_size;
387

    
388
            /* write the compressed frame in the media file */
389
            ret = av_write_frame(oc, &pkt);
390
        } else {
391
            ret = 0;
392
        }
393
    }
394
    if (ret != 0) {
395
        fprintf(stderr, "Error while writing video frame\n");
396
        exit(1);
397
    }
398
    frame_count++;
399
}
400

    
401
static void close_video(AVFormatContext *oc, AVStream *st)
402
{
403
    avcodec_close(st->codec);
404
    av_free(picture->data[0]);
405
    av_free(picture);
406
    if (tmp_picture) {
407
        av_free(tmp_picture->data[0]);
408
        av_free(tmp_picture);
409
    }
410
    av_free(video_outbuf);
411
}
412

    
413
/**************************************************************/
414
/* media file output */
415

    
416
int main(int argc, char **argv)
417
{
418
    const char *filename;
419
    AVOutputFormat *fmt;
420
    AVFormatContext *oc;
421
    AVStream *audio_st, *video_st;
422
    double audio_pts, video_pts;
423
    int i;
424

    
425
    /* initialize libavcodec, and register all codecs and formats */
426
    av_register_all();
427

    
428
    if (argc != 2) {
429
        printf("usage: %s output_file\n"
430
               "API example program to output a media file with libavformat.\n"
431
               "The output format is automatically guessed according to the file extension.\n"
432
               "Raw images can also be output by using '%%d' in the filename\n"
433
               "\n", argv[0]);
434
        exit(1);
435
    }
436

    
437
    filename = argv[1];
438

    
439
    /* auto detect the output format from the name. default is
440
       mpeg. */
441
    fmt = guess_format(NULL, filename, NULL);
442
    if (!fmt) {
443
        printf("Could not deduce output format from file extension: using MPEG.\n");
444
        fmt = guess_format("mpeg", NULL, NULL);
445
    }
446
    if (!fmt) {
447
        fprintf(stderr, "Could not find suitable output format\n");
448
        exit(1);
449
    }
450

    
451
    /* allocate the output media context */
452
    oc = av_alloc_format_context();
453
    if (!oc) {
454
        fprintf(stderr, "Memory error\n");
455
        exit(1);
456
    }
457
    oc->oformat = fmt;
458
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
459

    
460
    /* add the audio and video streams using the default format codecs
461
       and initialize the codecs */
462
    video_st = NULL;
463
    audio_st = NULL;
464
    if (fmt->video_codec != CODEC_ID_NONE) {
465
        video_st = add_video_stream(oc, fmt->video_codec);
466
    }
467
    if (fmt->audio_codec != CODEC_ID_NONE) {
468
        audio_st = add_audio_stream(oc, fmt->audio_codec);
469
    }
470

    
471
    /* set the output parameters (must be done even if no
472
       parameters). */
473
    if (av_set_parameters(oc, NULL) < 0) {
474
        fprintf(stderr, "Invalid output format parameters\n");
475
        exit(1);
476
    }
477

    
478
    dump_format(oc, 0, filename, 1);
479

    
480
    /* now that all the parameters are set, we can open the audio and
481
       video codecs and allocate the necessary encode buffers */
482
    if (video_st)
483
        open_video(oc, video_st);
484
    if (audio_st)
485
        open_audio(oc, audio_st);
486

    
487
    /* open the output file, if needed */
488
    if (!(fmt->flags & AVFMT_NOFILE)) {
489
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
490
            fprintf(stderr, "Could not open '%s'\n", filename);
491
            exit(1);
492
        }
493
    }
494

    
495
    /* write the stream header, if any */
496
    av_write_header(oc);
497

    
498
    for(;;) {
499
        /* compute current audio and video time */
500
        if (audio_st)
501
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
502
        else
503
            audio_pts = 0.0;
504

    
505
        if (video_st)
506
            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
507
        else
508
            video_pts = 0.0;
509

    
510
        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
511
            (!video_st || video_pts >= STREAM_DURATION))
512
            break;
513

    
514
        /* write interleaved audio and video frames */
515
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
516
            write_audio_frame(oc, audio_st);
517
        } else {
518
            write_video_frame(oc, video_st);
519
        }
520
    }
521

    
522
    /* close each codec */
523
    if (video_st)
524
        close_video(oc, video_st);
525
    if (audio_st)
526
        close_audio(oc, audio_st);
527

    
528
    /* write the trailer, if any */
529
    av_write_trailer(oc);
530

    
531
    /* free the streams */
532
    for(i = 0; i < oc->nb_streams; i++) {
533
        av_freep(&oc->streams[i]->codec);
534
        av_freep(&oc->streams[i]);
535
    }
536

    
537
    if (!(fmt->flags & AVFMT_NOFILE)) {
538
        /* close the output file */
539
        url_fclose(&oc->pb);
540
    }
541

    
542
    /* free the stream */
543
    av_free(oc);
544

    
545
    return 0;
546
}