Statistics
| Branch: | Revision:

ffmpeg / libavformat / output-example.c @ ebb92e07

History | View | Annotate | Download (16.4 KB)

1
/*
2
 * Libavformat API example: Output a media file in any supported
3
 * libavformat format. The default codecs are used.
4
 *
5
 * Copyright (c) 2003 Fabrice Bellard
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a copy
8
 * of this software and associated documentation files (the "Software"), to deal
9
 * in the Software without restriction, including without limitation the rights
10
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
 * copies of the Software, and to permit persons to whom the Software is
12
 * furnished to do so, subject to the following conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24
 */
25
#include <stdlib.h>
26
#include <stdio.h>
27
#include <string.h>
28
#include <math.h>
29

    
30
#include "libavformat/avformat.h"
31
#include "libswscale/swscale.h"
32

    
33
#undef exit
34

    
35
/* 5 seconds stream duration */
36
#define STREAM_DURATION   5.0
37
#define STREAM_FRAME_RATE 25 /* 25 images/s */
38
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
39
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
40

    
41
static int sws_flags = SWS_BICUBIC;
42

    
43
/**************************************************************/
44
/* audio output */
45

    
46
float t, tincr, tincr2;
47
int16_t *samples;
48
uint8_t *audio_outbuf;
49
int audio_outbuf_size;
50
int audio_input_frame_size;
51

    
52
/*
53
 * add an audio output stream
54
 */
55
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
56
{
57
    AVCodecContext *c;
58
    AVStream *st;
59

    
60
    st = av_new_stream(oc, 1);
61
    if (!st) {
62
        fprintf(stderr, "Could not alloc stream\n");
63
        exit(1);
64
    }
65

    
66
    c = st->codec;
67
    c->codec_id = codec_id;
68
    c->codec_type = AVMEDIA_TYPE_AUDIO;
69

    
70
    /* put sample parameters */
71
    c->sample_fmt = AV_SAMPLE_FMT_S16;
72
    c->bit_rate = 64000;
73
    c->sample_rate = 44100;
74
    c->channels = 2;
75

    
76
    // some formats want stream headers to be separate
77
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
78
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
79

    
80
    return st;
81
}
82

    
83
static void open_audio(AVFormatContext *oc, AVStream *st)
84
{
85
    AVCodecContext *c;
86
    AVCodec *codec;
87

    
88
    c = st->codec;
89

    
90
    /* find the audio encoder */
91
    codec = avcodec_find_encoder(c->codec_id);
92
    if (!codec) {
93
        fprintf(stderr, "codec not found\n");
94
        exit(1);
95
    }
96

    
97
    /* open it */
98
    if (avcodec_open(c, codec) < 0) {
99
        fprintf(stderr, "could not open codec\n");
100
        exit(1);
101
    }
102

    
103
    /* init signal generator */
104
    t = 0;
105
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
106
    /* increment frequency by 110 Hz per second */
107
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
108

    
109
    audio_outbuf_size = 10000;
110
    audio_outbuf = av_malloc(audio_outbuf_size);
111

    
112
    /* ugly hack for PCM codecs (will be removed ASAP with new PCM
113
       support to compute the input frame size in samples */
114
    if (c->frame_size <= 1) {
115
        audio_input_frame_size = audio_outbuf_size / c->channels;
116
        switch(st->codec->codec_id) {
117
        case CODEC_ID_PCM_S16LE:
118
        case CODEC_ID_PCM_S16BE:
119
        case CODEC_ID_PCM_U16LE:
120
        case CODEC_ID_PCM_U16BE:
121
            audio_input_frame_size >>= 1;
122
            break;
123
        default:
124
            break;
125
        }
126
    } else {
127
        audio_input_frame_size = c->frame_size;
128
    }
129
    samples = av_malloc(audio_input_frame_size * 2 * c->channels);
130
}
131

    
132
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
133
   'nb_channels' channels */
134
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
135
{
136
    int j, i, v;
137
    int16_t *q;
138

    
139
    q = samples;
140
    for(j=0;j<frame_size;j++) {
141
        v = (int)(sin(t) * 10000);
142
        for(i = 0; i < nb_channels; i++)
143
            *q++ = v;
144
        t += tincr;
145
        tincr += tincr2;
146
    }
147
}
148

    
149
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
150
{
151
    AVCodecContext *c;
152
    AVPacket pkt;
153
    av_init_packet(&pkt);
154

    
155
    c = st->codec;
156

    
157
    get_audio_frame(samples, audio_input_frame_size, c->channels);
158

    
159
    pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
160

    
161
    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
162
        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
163
    pkt.flags |= AV_PKT_FLAG_KEY;
164
    pkt.stream_index= st->index;
165
    pkt.data= audio_outbuf;
166

    
167
    /* write the compressed frame in the media file */
168
    if (av_interleaved_write_frame(oc, &pkt) != 0) {
169
        fprintf(stderr, "Error while writing audio frame\n");
170
        exit(1);
171
    }
172
}
173

    
174
static void close_audio(AVFormatContext *oc, AVStream *st)
175
{
176
    avcodec_close(st->codec);
177

    
178
    av_free(samples);
179
    av_free(audio_outbuf);
180
}
181

    
182
/**************************************************************/
183
/* video output */
184

    
185
AVFrame *picture, *tmp_picture;
186
uint8_t *video_outbuf;
187
int frame_count, video_outbuf_size;
188

    
189
/* add a video output stream */
190
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
191
{
192
    AVCodecContext *c;
193
    AVStream *st;
194

    
195
    st = av_new_stream(oc, 0);
196
    if (!st) {
197
        fprintf(stderr, "Could not alloc stream\n");
198
        exit(1);
199
    }
200

    
201
    c = st->codec;
202
    c->codec_id = codec_id;
203
    c->codec_type = AVMEDIA_TYPE_VIDEO;
204

    
205
    /* put sample parameters */
206
    c->bit_rate = 400000;
207
    /* resolution must be a multiple of two */
208
    c->width = 352;
209
    c->height = 288;
210
    /* time base: this is the fundamental unit of time (in seconds) in terms
211
       of which frame timestamps are represented. for fixed-fps content,
212
       timebase should be 1/framerate and timestamp increments should be
213
       identically 1. */
214
    c->time_base.den = STREAM_FRAME_RATE;
215
    c->time_base.num = 1;
216
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
217
    c->pix_fmt = STREAM_PIX_FMT;
218
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
219
        /* just for testing, we also add B frames */
220
        c->max_b_frames = 2;
221
    }
222
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
223
        /* Needed to avoid using macroblocks in which some coeffs overflow.
224
           This does not happen with normal video, it just happens here as
225
           the motion of the chroma plane does not match the luma plane. */
226
        c->mb_decision=2;
227
    }
228
    // some formats want stream headers to be separate
229
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
230
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
231

    
232
    return st;
233
}
234

    
235
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
236
{
237
    AVFrame *picture;
238
    uint8_t *picture_buf;
239
    int size;
240

    
241
    picture = avcodec_alloc_frame();
242
    if (!picture)
243
        return NULL;
244
    size = avpicture_get_size(pix_fmt, width, height);
245
    picture_buf = av_malloc(size);
246
    if (!picture_buf) {
247
        av_free(picture);
248
        return NULL;
249
    }
250
    avpicture_fill((AVPicture *)picture, picture_buf,
251
                   pix_fmt, width, height);
252
    return picture;
253
}
254

    
255
static void open_video(AVFormatContext *oc, AVStream *st)
256
{
257
    AVCodec *codec;
258
    AVCodecContext *c;
259

    
260
    c = st->codec;
261

    
262
    /* find the video encoder */
263
    codec = avcodec_find_encoder(c->codec_id);
264
    if (!codec) {
265
        fprintf(stderr, "codec not found\n");
266
        exit(1);
267
    }
268

    
269
    /* open the codec */
270
    if (avcodec_open(c, codec) < 0) {
271
        fprintf(stderr, "could not open codec\n");
272
        exit(1);
273
    }
274

    
275
    video_outbuf = NULL;
276
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
277
        /* allocate output buffer */
278
        /* XXX: API change will be done */
279
        /* buffers passed into lav* can be allocated any way you prefer,
280
           as long as they're aligned enough for the architecture, and
281
           they're freed appropriately (such as using av_free for buffers
282
           allocated with av_malloc) */
283
        video_outbuf_size = 200000;
284
        video_outbuf = av_malloc(video_outbuf_size);
285
    }
286

    
287
    /* allocate the encoded raw picture */
288
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
289
    if (!picture) {
290
        fprintf(stderr, "Could not allocate picture\n");
291
        exit(1);
292
    }
293

    
294
    /* if the output format is not YUV420P, then a temporary YUV420P
295
       picture is needed too. It is then converted to the required
296
       output format */
297
    tmp_picture = NULL;
298
    if (c->pix_fmt != PIX_FMT_YUV420P) {
299
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
300
        if (!tmp_picture) {
301
            fprintf(stderr, "Could not allocate temporary picture\n");
302
            exit(1);
303
        }
304
    }
305
}
306

    
307
/* prepare a dummy image */
308
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
309
{
310
    int x, y, i;
311

    
312
    i = frame_index;
313

    
314
    /* Y */
315
    for(y=0;y<height;y++) {
316
        for(x=0;x<width;x++) {
317
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
318
        }
319
    }
320

    
321
    /* Cb and Cr */
322
    for(y=0;y<height/2;y++) {
323
        for(x=0;x<width/2;x++) {
324
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
325
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
326
        }
327
    }
328
}
329

    
330
static void write_video_frame(AVFormatContext *oc, AVStream *st)
331
{
332
    int out_size, ret;
333
    AVCodecContext *c;
334
    static struct SwsContext *img_convert_ctx;
335

    
336
    c = st->codec;
337

    
338
    if (frame_count >= STREAM_NB_FRAMES) {
339
        /* no more frame to compress. The codec has a latency of a few
340
           frames if using B frames, so we get the last frames by
341
           passing the same picture again */
342
    } else {
343
        if (c->pix_fmt != PIX_FMT_YUV420P) {
344
            /* as we only generate a YUV420P picture, we must convert it
345
               to the codec pixel format if needed */
346
            if (img_convert_ctx == NULL) {
347
                img_convert_ctx = sws_getContext(c->width, c->height,
348
                                                 PIX_FMT_YUV420P,
349
                                                 c->width, c->height,
350
                                                 c->pix_fmt,
351
                                                 sws_flags, NULL, NULL, NULL);
352
                if (img_convert_ctx == NULL) {
353
                    fprintf(stderr, "Cannot initialize the conversion context\n");
354
                    exit(1);
355
                }
356
            }
357
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
358
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
359
                      0, c->height, picture->data, picture->linesize);
360
        } else {
361
            fill_yuv_image(picture, frame_count, c->width, c->height);
362
        }
363
    }
364

    
365

    
366
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
367
        /* raw video case. The API will change slightly in the near
368
           futur for that */
369
        AVPacket pkt;
370
        av_init_packet(&pkt);
371

    
372
        pkt.flags |= AV_PKT_FLAG_KEY;
373
        pkt.stream_index= st->index;
374
        pkt.data= (uint8_t *)picture;
375
        pkt.size= sizeof(AVPicture);
376

    
377
        ret = av_interleaved_write_frame(oc, &pkt);
378
    } else {
379
        /* encode the image */
380
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
381
        /* if zero size, it means the image was buffered */
382
        if (out_size > 0) {
383
            AVPacket pkt;
384
            av_init_packet(&pkt);
385

    
386
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
387
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
388
            if(c->coded_frame->key_frame)
389
                pkt.flags |= AV_PKT_FLAG_KEY;
390
            pkt.stream_index= st->index;
391
            pkt.data= video_outbuf;
392
            pkt.size= out_size;
393

    
394
            /* write the compressed frame in the media file */
395
            ret = av_interleaved_write_frame(oc, &pkt);
396
        } else {
397
            ret = 0;
398
        }
399
    }
400
    if (ret != 0) {
401
        fprintf(stderr, "Error while writing video frame\n");
402
        exit(1);
403
    }
404
    frame_count++;
405
}
406

    
407
static void close_video(AVFormatContext *oc, AVStream *st)
408
{
409
    avcodec_close(st->codec);
410
    av_free(picture->data[0]);
411
    av_free(picture);
412
    if (tmp_picture) {
413
        av_free(tmp_picture->data[0]);
414
        av_free(tmp_picture);
415
    }
416
    av_free(video_outbuf);
417
}
418

    
419
/**************************************************************/
420
/* media file output */
421

    
422
int main(int argc, char **argv)
423
{
424
    const char *filename;
425
    AVOutputFormat *fmt;
426
    AVFormatContext *oc;
427
    AVStream *audio_st, *video_st;
428
    double audio_pts, video_pts;
429
    int i;
430

    
431
    /* initialize libavcodec, and register all codecs and formats */
432
    av_register_all();
433

    
434
    if (argc != 2) {
435
        printf("usage: %s output_file\n"
436
               "API example program to output a media file with libavformat.\n"
437
               "The output format is automatically guessed according to the file extension.\n"
438
               "Raw images can also be output by using '%%d' in the filename\n"
439
               "\n", argv[0]);
440
        exit(1);
441
    }
442

    
443
    filename = argv[1];
444

    
445
    /* auto detect the output format from the name. default is
446
       mpeg. */
447
    fmt = av_guess_format(NULL, filename, NULL);
448
    if (!fmt) {
449
        printf("Could not deduce output format from file extension: using MPEG.\n");
450
        fmt = av_guess_format("mpeg", NULL, NULL);
451
    }
452
    if (!fmt) {
453
        fprintf(stderr, "Could not find suitable output format\n");
454
        exit(1);
455
    }
456

    
457
    /* allocate the output media context */
458
    oc = avformat_alloc_context();
459
    if (!oc) {
460
        fprintf(stderr, "Memory error\n");
461
        exit(1);
462
    }
463
    oc->oformat = fmt;
464
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
465

    
466
    /* add the audio and video streams using the default format codecs
467
       and initialize the codecs */
468
    video_st = NULL;
469
    audio_st = NULL;
470
    if (fmt->video_codec != CODEC_ID_NONE) {
471
        video_st = add_video_stream(oc, fmt->video_codec);
472
    }
473
    if (fmt->audio_codec != CODEC_ID_NONE) {
474
        audio_st = add_audio_stream(oc, fmt->audio_codec);
475
    }
476

    
477
    /* set the output parameters (must be done even if no
478
       parameters). */
479
    if (av_set_parameters(oc, NULL) < 0) {
480
        fprintf(stderr, "Invalid output format parameters\n");
481
        exit(1);
482
    }
483

    
484
    av_dump_format(oc, 0, filename, 1);
485

    
486
    /* now that all the parameters are set, we can open the audio and
487
       video codecs and allocate the necessary encode buffers */
488
    if (video_st)
489
        open_video(oc, video_st);
490
    if (audio_st)
491
        open_audio(oc, audio_st);
492

    
493
    /* open the output file, if needed */
494
    if (!(fmt->flags & AVFMT_NOFILE)) {
495
        if (avio_open(&oc->pb, filename, URL_WRONLY) < 0) {
496
            fprintf(stderr, "Could not open '%s'\n", filename);
497
            exit(1);
498
        }
499
    }
500

    
501
    /* write the stream header, if any */
502
    av_write_header(oc);
503

    
504
    for(;;) {
505
        /* compute current audio and video time */
506
        if (audio_st)
507
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
508
        else
509
            audio_pts = 0.0;
510

    
511
        if (video_st)
512
            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
513
        else
514
            video_pts = 0.0;
515

    
516
        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
517
            (!video_st || video_pts >= STREAM_DURATION))
518
            break;
519

    
520
        /* write interleaved audio and video frames */
521
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
522
            write_audio_frame(oc, audio_st);
523
        } else {
524
            write_video_frame(oc, video_st);
525
        }
526
    }
527

    
528
    /* write the trailer, if any.  the trailer must be written
529
     * before you close the CodecContexts open when you wrote the
530
     * header; otherwise write_trailer may try to use memory that
531
     * was freed on av_codec_close() */
532
    av_write_trailer(oc);
533

    
534
    /* close each codec */
535
    if (video_st)
536
        close_video(oc, video_st);
537
    if (audio_st)
538
        close_audio(oc, audio_st);
539

    
540
    /* free the streams */
541
    for(i = 0; i < oc->nb_streams; i++) {
542
        av_freep(&oc->streams[i]->codec);
543
        av_freep(&oc->streams[i]);
544
    }
545

    
546
    if (!(fmt->flags & AVFMT_NOFILE)) {
547
        /* close the output file */
548
        avio_close(oc->pb);
549
    }
550

    
551
    /* free the stream */
552
    av_free(oc);
553

    
554
    return 0;
555
}