Statistics
| Branch: | Revision:

ffmpeg / libavformat / output-example.c @ 72415b2a

History | View | Annotate | Download (16.3 KB)

1
/*
2
 * Libavformat API example: Output a media file in any supported
3
 * libavformat format. The default codecs are used.
4
 *
5
 * Copyright (c) 2003 Fabrice Bellard
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a copy
8
 * of this software and associated documentation files (the "Software"), to deal
9
 * in the Software without restriction, including without limitation the rights
10
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
 * copies of the Software, and to permit persons to whom the Software is
12
 * furnished to do so, subject to the following conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24
 */
25
#include <stdlib.h>
26
#include <stdio.h>
27
#include <string.h>
28
#include <math.h>
29

    
30
#include "libavformat/avformat.h"
31
#include "libswscale/swscale.h"
32

    
33
#undef exit
34

    
35
/* 5 seconds stream duration */
36
#define STREAM_DURATION   5.0
37
#define STREAM_FRAME_RATE 25 /* 25 images/s */
38
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
39
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
40

    
41
static int sws_flags = SWS_BICUBIC;
42

    
43
/**************************************************************/
44
/* audio output */
45

    
46
float t, tincr, tincr2;
47
int16_t *samples;
48
uint8_t *audio_outbuf;
49
int audio_outbuf_size;
50
int audio_input_frame_size;
51

    
52
/*
53
 * add an audio output stream
54
 */
55
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
56
{
57
    AVCodecContext *c;
58
    AVStream *st;
59

    
60
    st = av_new_stream(oc, 1);
61
    if (!st) {
62
        fprintf(stderr, "Could not alloc stream\n");
63
        exit(1);
64
    }
65

    
66
    c = st->codec;
67
    c->codec_id = codec_id;
68
    c->codec_type = AVMEDIA_TYPE_AUDIO;
69

    
70
    /* put sample parameters */
71
    c->bit_rate = 64000;
72
    c->sample_rate = 44100;
73
    c->channels = 2;
74

    
75
    // some formats want stream headers to be separate
76
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
77
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
78

    
79
    return st;
80
}
81

    
82
static void open_audio(AVFormatContext *oc, AVStream *st)
83
{
84
    AVCodecContext *c;
85
    AVCodec *codec;
86

    
87
    c = st->codec;
88

    
89
    /* find the audio encoder */
90
    codec = avcodec_find_encoder(c->codec_id);
91
    if (!codec) {
92
        fprintf(stderr, "codec not found\n");
93
        exit(1);
94
    }
95

    
96
    /* open it */
97
    if (avcodec_open(c, codec) < 0) {
98
        fprintf(stderr, "could not open codec\n");
99
        exit(1);
100
    }
101

    
102
    /* init signal generator */
103
    t = 0;
104
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
105
    /* increment frequency by 110 Hz per second */
106
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
107

    
108
    audio_outbuf_size = 10000;
109
    audio_outbuf = av_malloc(audio_outbuf_size);
110

    
111
    /* ugly hack for PCM codecs (will be removed ASAP with new PCM
112
       support to compute the input frame size in samples */
113
    if (c->frame_size <= 1) {
114
        audio_input_frame_size = audio_outbuf_size / c->channels;
115
        switch(st->codec->codec_id) {
116
        case CODEC_ID_PCM_S16LE:
117
        case CODEC_ID_PCM_S16BE:
118
        case CODEC_ID_PCM_U16LE:
119
        case CODEC_ID_PCM_U16BE:
120
            audio_input_frame_size >>= 1;
121
            break;
122
        default:
123
            break;
124
        }
125
    } else {
126
        audio_input_frame_size = c->frame_size;
127
    }
128
    samples = av_malloc(audio_input_frame_size * 2 * c->channels);
129
}
130

    
131
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
132
   'nb_channels' channels */
133
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
134
{
135
    int j, i, v;
136
    int16_t *q;
137

    
138
    q = samples;
139
    for(j=0;j<frame_size;j++) {
140
        v = (int)(sin(t) * 10000);
141
        for(i = 0; i < nb_channels; i++)
142
            *q++ = v;
143
        t += tincr;
144
        tincr += tincr2;
145
    }
146
}
147

    
148
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
149
{
150
    AVCodecContext *c;
151
    AVPacket pkt;
152
    av_init_packet(&pkt);
153

    
154
    c = st->codec;
155

    
156
    get_audio_frame(samples, audio_input_frame_size, c->channels);
157

    
158
    pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
159

    
160
    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
161
        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
162
    pkt.flags |= PKT_FLAG_KEY;
163
    pkt.stream_index= st->index;
164
    pkt.data= audio_outbuf;
165

    
166
    /* write the compressed frame in the media file */
167
    if (av_interleaved_write_frame(oc, &pkt) != 0) {
168
        fprintf(stderr, "Error while writing audio frame\n");
169
        exit(1);
170
    }
171
}
172

    
173
static void close_audio(AVFormatContext *oc, AVStream *st)
174
{
175
    avcodec_close(st->codec);
176

    
177
    av_free(samples);
178
    av_free(audio_outbuf);
179
}
180

    
181
/**************************************************************/
182
/* video output */
183

    
184
AVFrame *picture, *tmp_picture;
185
uint8_t *video_outbuf;
186
int frame_count, video_outbuf_size;
187

    
188
/* add a video output stream */
189
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
190
{
191
    AVCodecContext *c;
192
    AVStream *st;
193

    
194
    st = av_new_stream(oc, 0);
195
    if (!st) {
196
        fprintf(stderr, "Could not alloc stream\n");
197
        exit(1);
198
    }
199

    
200
    c = st->codec;
201
    c->codec_id = codec_id;
202
    c->codec_type = AVMEDIA_TYPE_VIDEO;
203

    
204
    /* put sample parameters */
205
    c->bit_rate = 400000;
206
    /* resolution must be a multiple of two */
207
    c->width = 352;
208
    c->height = 288;
209
    /* time base: this is the fundamental unit of time (in seconds) in terms
210
       of which frame timestamps are represented. for fixed-fps content,
211
       timebase should be 1/framerate and timestamp increments should be
212
       identically 1. */
213
    c->time_base.den = STREAM_FRAME_RATE;
214
    c->time_base.num = 1;
215
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
216
    c->pix_fmt = STREAM_PIX_FMT;
217
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
218
        /* just for testing, we also add B frames */
219
        c->max_b_frames = 2;
220
    }
221
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
222
        /* Needed to avoid using macroblocks in which some coeffs overflow.
223
           This does not happen with normal video, it just happens here as
224
           the motion of the chroma plane does not match the luma plane. */
225
        c->mb_decision=2;
226
    }
227
    // some formats want stream headers to be separate
228
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
229
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
230

    
231
    return st;
232
}
233

    
234
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
235
{
236
    AVFrame *picture;
237
    uint8_t *picture_buf;
238
    int size;
239

    
240
    picture = avcodec_alloc_frame();
241
    if (!picture)
242
        return NULL;
243
    size = avpicture_get_size(pix_fmt, width, height);
244
    picture_buf = av_malloc(size);
245
    if (!picture_buf) {
246
        av_free(picture);
247
        return NULL;
248
    }
249
    avpicture_fill((AVPicture *)picture, picture_buf,
250
                   pix_fmt, width, height);
251
    return picture;
252
}
253

    
254
static void open_video(AVFormatContext *oc, AVStream *st)
255
{
256
    AVCodec *codec;
257
    AVCodecContext *c;
258

    
259
    c = st->codec;
260

    
261
    /* find the video encoder */
262
    codec = avcodec_find_encoder(c->codec_id);
263
    if (!codec) {
264
        fprintf(stderr, "codec not found\n");
265
        exit(1);
266
    }
267

    
268
    /* open the codec */
269
    if (avcodec_open(c, codec) < 0) {
270
        fprintf(stderr, "could not open codec\n");
271
        exit(1);
272
    }
273

    
274
    video_outbuf = NULL;
275
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
276
        /* allocate output buffer */
277
        /* XXX: API change will be done */
278
        /* buffers passed into lav* can be allocated any way you prefer,
279
           as long as they're aligned enough for the architecture, and
280
           they're freed appropriately (such as using av_free for buffers
281
           allocated with av_malloc) */
282
        video_outbuf_size = 200000;
283
        video_outbuf = av_malloc(video_outbuf_size);
284
    }
285

    
286
    /* allocate the encoded raw picture */
287
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
288
    if (!picture) {
289
        fprintf(stderr, "Could not allocate picture\n");
290
        exit(1);
291
    }
292

    
293
    /* if the output format is not YUV420P, then a temporary YUV420P
294
       picture is needed too. It is then converted to the required
295
       output format */
296
    tmp_picture = NULL;
297
    if (c->pix_fmt != PIX_FMT_YUV420P) {
298
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
299
        if (!tmp_picture) {
300
            fprintf(stderr, "Could not allocate temporary picture\n");
301
            exit(1);
302
        }
303
    }
304
}
305

    
306
/* prepare a dummy image */
307
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
308
{
309
    int x, y, i;
310

    
311
    i = frame_index;
312

    
313
    /* Y */
314
    for(y=0;y<height;y++) {
315
        for(x=0;x<width;x++) {
316
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
317
        }
318
    }
319

    
320
    /* Cb and Cr */
321
    for(y=0;y<height/2;y++) {
322
        for(x=0;x<width/2;x++) {
323
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
324
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
325
        }
326
    }
327
}
328

    
329
static void write_video_frame(AVFormatContext *oc, AVStream *st)
330
{
331
    int out_size, ret;
332
    AVCodecContext *c;
333
    static struct SwsContext *img_convert_ctx;
334

    
335
    c = st->codec;
336

    
337
    if (frame_count >= STREAM_NB_FRAMES) {
338
        /* no more frame to compress. The codec has a latency of a few
339
           frames if using B frames, so we get the last frames by
340
           passing the same picture again */
341
    } else {
342
        if (c->pix_fmt != PIX_FMT_YUV420P) {
343
            /* as we only generate a YUV420P picture, we must convert it
344
               to the codec pixel format if needed */
345
            if (img_convert_ctx == NULL) {
346
                img_convert_ctx = sws_getContext(c->width, c->height,
347
                                                 PIX_FMT_YUV420P,
348
                                                 c->width, c->height,
349
                                                 c->pix_fmt,
350
                                                 sws_flags, NULL, NULL, NULL);
351
                if (img_convert_ctx == NULL) {
352
                    fprintf(stderr, "Cannot initialize the conversion context\n");
353
                    exit(1);
354
                }
355
            }
356
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
357
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
358
                      0, c->height, picture->data, picture->linesize);
359
        } else {
360
            fill_yuv_image(picture, frame_count, c->width, c->height);
361
        }
362
    }
363

    
364

    
365
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
366
        /* raw video case. The API will change slightly in the near
367
           futur for that */
368
        AVPacket pkt;
369
        av_init_packet(&pkt);
370

    
371
        pkt.flags |= PKT_FLAG_KEY;
372
        pkt.stream_index= st->index;
373
        pkt.data= (uint8_t *)picture;
374
        pkt.size= sizeof(AVPicture);
375

    
376
        ret = av_interleaved_write_frame(oc, &pkt);
377
    } else {
378
        /* encode the image */
379
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
380
        /* if zero size, it means the image was buffered */
381
        if (out_size > 0) {
382
            AVPacket pkt;
383
            av_init_packet(&pkt);
384

    
385
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
386
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
387
            if(c->coded_frame->key_frame)
388
                pkt.flags |= PKT_FLAG_KEY;
389
            pkt.stream_index= st->index;
390
            pkt.data= video_outbuf;
391
            pkt.size= out_size;
392

    
393
            /* write the compressed frame in the media file */
394
            ret = av_interleaved_write_frame(oc, &pkt);
395
        } else {
396
            ret = 0;
397
        }
398
    }
399
    if (ret != 0) {
400
        fprintf(stderr, "Error while writing video frame\n");
401
        exit(1);
402
    }
403
    frame_count++;
404
}
405

    
406
static void close_video(AVFormatContext *oc, AVStream *st)
407
{
408
    avcodec_close(st->codec);
409
    av_free(picture->data[0]);
410
    av_free(picture);
411
    if (tmp_picture) {
412
        av_free(tmp_picture->data[0]);
413
        av_free(tmp_picture);
414
    }
415
    av_free(video_outbuf);
416
}
417

    
418
/**************************************************************/
419
/* media file output */
420

    
421
int main(int argc, char **argv)
422
{
423
    const char *filename;
424
    AVOutputFormat *fmt;
425
    AVFormatContext *oc;
426
    AVStream *audio_st, *video_st;
427
    double audio_pts, video_pts;
428
    int i;
429

    
430
    /* initialize libavcodec, and register all codecs and formats */
431
    av_register_all();
432

    
433
    if (argc != 2) {
434
        printf("usage: %s output_file\n"
435
               "API example program to output a media file with libavformat.\n"
436
               "The output format is automatically guessed according to the file extension.\n"
437
               "Raw images can also be output by using '%%d' in the filename\n"
438
               "\n", argv[0]);
439
        exit(1);
440
    }
441

    
442
    filename = argv[1];
443

    
444
    /* auto detect the output format from the name. default is
445
       mpeg. */
446
    fmt = av_guess_format(NULL, filename, NULL);
447
    if (!fmt) {
448
        printf("Could not deduce output format from file extension: using MPEG.\n");
449
        fmt = av_guess_format("mpeg", NULL, NULL);
450
    }
451
    if (!fmt) {
452
        fprintf(stderr, "Could not find suitable output format\n");
453
        exit(1);
454
    }
455

    
456
    /* allocate the output media context */
457
    oc = avformat_alloc_context();
458
    if (!oc) {
459
        fprintf(stderr, "Memory error\n");
460
        exit(1);
461
    }
462
    oc->oformat = fmt;
463
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
464

    
465
    /* add the audio and video streams using the default format codecs
466
       and initialize the codecs */
467
    video_st = NULL;
468
    audio_st = NULL;
469
    if (fmt->video_codec != CODEC_ID_NONE) {
470
        video_st = add_video_stream(oc, fmt->video_codec);
471
    }
472
    if (fmt->audio_codec != CODEC_ID_NONE) {
473
        audio_st = add_audio_stream(oc, fmt->audio_codec);
474
    }
475

    
476
    /* set the output parameters (must be done even if no
477
       parameters). */
478
    if (av_set_parameters(oc, NULL) < 0) {
479
        fprintf(stderr, "Invalid output format parameters\n");
480
        exit(1);
481
    }
482

    
483
    dump_format(oc, 0, filename, 1);
484

    
485
    /* now that all the parameters are set, we can open the audio and
486
       video codecs and allocate the necessary encode buffers */
487
    if (video_st)
488
        open_video(oc, video_st);
489
    if (audio_st)
490
        open_audio(oc, audio_st);
491

    
492
    /* open the output file, if needed */
493
    if (!(fmt->flags & AVFMT_NOFILE)) {
494
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
495
            fprintf(stderr, "Could not open '%s'\n", filename);
496
            exit(1);
497
        }
498
    }
499

    
500
    /* write the stream header, if any */
501
    av_write_header(oc);
502

    
503
    for(;;) {
504
        /* compute current audio and video time */
505
        if (audio_st)
506
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
507
        else
508
            audio_pts = 0.0;
509

    
510
        if (video_st)
511
            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
512
        else
513
            video_pts = 0.0;
514

    
515
        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
516
            (!video_st || video_pts >= STREAM_DURATION))
517
            break;
518

    
519
        /* write interleaved audio and video frames */
520
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
521
            write_audio_frame(oc, audio_st);
522
        } else {
523
            write_video_frame(oc, video_st);
524
        }
525
    }
526

    
527
    /* write the trailer, if any.  the trailer must be written
528
     * before you close the CodecContexts open when you wrote the
529
     * header; otherwise write_trailer may try to use memory that
530
     * was freed on av_codec_close() */
531
    av_write_trailer(oc);
532

    
533
    /* close each codec */
534
    if (video_st)
535
        close_video(oc, video_st);
536
    if (audio_st)
537
        close_audio(oc, audio_st);
538

    
539
    /* free the streams */
540
    for(i = 0; i < oc->nb_streams; i++) {
541
        av_freep(&oc->streams[i]->codec);
542
        av_freep(&oc->streams[i]);
543
    }
544

    
545
    if (!(fmt->flags & AVFMT_NOFILE)) {
546
        /* close the output file */
547
        url_fclose(oc->pb);
548
    }
549

    
550
    /* free the stream */
551
    av_free(oc);
552

    
553
    return 0;
554
}