Statistics
| Branch: | Revision:

ffmpeg / libavformat / output-example.c @ a52ce57d

History | View | Annotate | Download (16.4 KB)

1
/*
2
 * Libavformat API example: Output a media file in any supported
3
 * libavformat format. The default codecs are used.
4
 *
5
 * Copyright (c) 2003 Fabrice Bellard
6
 *
7
 * Permission is hereby granted, free of charge, to any person obtaining a copy
8
 * of this software and associated documentation files (the "Software"), to deal
9
 * in the Software without restriction, including without limitation the rights
10
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
 * copies of the Software, and to permit persons to whom the Software is
12
 * furnished to do so, subject to the following conditions:
13
 *
14
 * The above copyright notice and this permission notice shall be included in
15
 * all copies or substantial portions of the Software.
16
 *
17
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23
 * THE SOFTWARE.
24
 */
25
#include <stdlib.h>
26
#include <stdio.h>
27
#include <string.h>
28
#include <math.h>
29

    
30
#ifndef M_PI
31
#define M_PI 3.14159265358979323846
32
#endif
33

    
34
#include "libavformat/avformat.h"
35
#include "libswscale/swscale.h"
36

    
37
#undef exit
38

    
39
/* 5 seconds stream duration */
40
#define STREAM_DURATION   5.0
41
#define STREAM_FRAME_RATE 25 /* 25 images/s */
42
#define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
43
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
44

    
45
static int sws_flags = SWS_BICUBIC;
46

    
47
/**************************************************************/
48
/* audio output */
49

    
50
float t, tincr, tincr2;
51
int16_t *samples;
52
uint8_t *audio_outbuf;
53
int audio_outbuf_size;
54
int audio_input_frame_size;
55

    
56
/*
57
 * add an audio output stream
58
 */
59
static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
60
{
61
    AVCodecContext *c;
62
    AVStream *st;
63

    
64
    st = av_new_stream(oc, 1);
65
    if (!st) {
66
        fprintf(stderr, "Could not alloc stream\n");
67
        exit(1);
68
    }
69

    
70
    c = st->codec;
71
    c->codec_id = codec_id;
72
    c->codec_type = CODEC_TYPE_AUDIO;
73

    
74
    /* put sample parameters */
75
    c->bit_rate = 64000;
76
    c->sample_rate = 44100;
77
    c->channels = 2;
78

    
79
    // some formats want stream headers to be separate
80
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
81
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
82

    
83
    return st;
84
}
85

    
86
static void open_audio(AVFormatContext *oc, AVStream *st)
87
{
88
    AVCodecContext *c;
89
    AVCodec *codec;
90

    
91
    c = st->codec;
92

    
93
    /* find the audio encoder */
94
    codec = avcodec_find_encoder(c->codec_id);
95
    if (!codec) {
96
        fprintf(stderr, "codec not found\n");
97
        exit(1);
98
    }
99

    
100
    /* open it */
101
    if (avcodec_open(c, codec) < 0) {
102
        fprintf(stderr, "could not open codec\n");
103
        exit(1);
104
    }
105

    
106
    /* init signal generator */
107
    t = 0;
108
    tincr = 2 * M_PI * 110.0 / c->sample_rate;
109
    /* increment frequency by 110 Hz per second */
110
    tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
111

    
112
    audio_outbuf_size = 10000;
113
    audio_outbuf = av_malloc(audio_outbuf_size);
114

    
115
    /* ugly hack for PCM codecs (will be removed ASAP with new PCM
116
       support to compute the input frame size in samples */
117
    if (c->frame_size <= 1) {
118
        audio_input_frame_size = audio_outbuf_size / c->channels;
119
        switch(st->codec->codec_id) {
120
        case CODEC_ID_PCM_S16LE:
121
        case CODEC_ID_PCM_S16BE:
122
        case CODEC_ID_PCM_U16LE:
123
        case CODEC_ID_PCM_U16BE:
124
            audio_input_frame_size >>= 1;
125
            break;
126
        default:
127
            break;
128
        }
129
    } else {
130
        audio_input_frame_size = c->frame_size;
131
    }
132
    samples = av_malloc(audio_input_frame_size * 2 * c->channels);
133
}
134

    
135
/* prepare a 16 bit dummy audio frame of 'frame_size' samples and
136
   'nb_channels' channels */
137
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
138
{
139
    int j, i, v;
140
    int16_t *q;
141

    
142
    q = samples;
143
    for(j=0;j<frame_size;j++) {
144
        v = (int)(sin(t) * 10000);
145
        for(i = 0; i < nb_channels; i++)
146
            *q++ = v;
147
        t += tincr;
148
        tincr += tincr2;
149
    }
150
}
151

    
152
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
153
{
154
    AVCodecContext *c;
155
    AVPacket pkt;
156
    av_init_packet(&pkt);
157

    
158
    c = st->codec;
159

    
160
    get_audio_frame(samples, audio_input_frame_size, c->channels);
161

    
162
    pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
163

    
164
    if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
165
        pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
166
    pkt.flags |= PKT_FLAG_KEY;
167
    pkt.stream_index= st->index;
168
    pkt.data= audio_outbuf;
169

    
170
    /* write the compressed frame in the media file */
171
    if (av_interleaved_write_frame(oc, &pkt) != 0) {
172
        fprintf(stderr, "Error while writing audio frame\n");
173
        exit(1);
174
    }
175
}
176

    
177
static void close_audio(AVFormatContext *oc, AVStream *st)
178
{
179
    avcodec_close(st->codec);
180

    
181
    av_free(samples);
182
    av_free(audio_outbuf);
183
}
184

    
185
/**************************************************************/
186
/* video output */
187

    
188
AVFrame *picture, *tmp_picture;
189
uint8_t *video_outbuf;
190
int frame_count, video_outbuf_size;
191

    
192
/* add a video output stream */
193
static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
194
{
195
    AVCodecContext *c;
196
    AVStream *st;
197

    
198
    st = av_new_stream(oc, 0);
199
    if (!st) {
200
        fprintf(stderr, "Could not alloc stream\n");
201
        exit(1);
202
    }
203

    
204
    c = st->codec;
205
    c->codec_id = codec_id;
206
    c->codec_type = CODEC_TYPE_VIDEO;
207

    
208
    /* put sample parameters */
209
    c->bit_rate = 400000;
210
    /* resolution must be a multiple of two */
211
    c->width = 352;
212
    c->height = 288;
213
    /* time base: this is the fundamental unit of time (in seconds) in terms
214
       of which frame timestamps are represented. for fixed-fps content,
215
       timebase should be 1/framerate and timestamp increments should be
216
       identically 1. */
217
    c->time_base.den = STREAM_FRAME_RATE;
218
    c->time_base.num = 1;
219
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
220
    c->pix_fmt = STREAM_PIX_FMT;
221
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
222
        /* just for testing, we also add B frames */
223
        c->max_b_frames = 2;
224
    }
225
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
226
        /* Needed to avoid using macroblocks in which some coeffs overflow.
227
           This does not happen with normal video, it just happens here as
228
           the motion of the chroma plane does not match the luma plane. */
229
        c->mb_decision=2;
230
    }
231
    // some formats want stream headers to be separate
232
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
233
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;
234

    
235
    return st;
236
}
237

    
238
static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
239
{
240
    AVFrame *picture;
241
    uint8_t *picture_buf;
242
    int size;
243

    
244
    picture = avcodec_alloc_frame();
245
    if (!picture)
246
        return NULL;
247
    size = avpicture_get_size(pix_fmt, width, height);
248
    picture_buf = av_malloc(size);
249
    if (!picture_buf) {
250
        av_free(picture);
251
        return NULL;
252
    }
253
    avpicture_fill((AVPicture *)picture, picture_buf,
254
                   pix_fmt, width, height);
255
    return picture;
256
}
257

    
258
static void open_video(AVFormatContext *oc, AVStream *st)
259
{
260
    AVCodec *codec;
261
    AVCodecContext *c;
262

    
263
    c = st->codec;
264

    
265
    /* find the video encoder */
266
    codec = avcodec_find_encoder(c->codec_id);
267
    if (!codec) {
268
        fprintf(stderr, "codec not found\n");
269
        exit(1);
270
    }
271

    
272
    /* open the codec */
273
    if (avcodec_open(c, codec) < 0) {
274
        fprintf(stderr, "could not open codec\n");
275
        exit(1);
276
    }
277

    
278
    video_outbuf = NULL;
279
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
280
        /* allocate output buffer */
281
        /* XXX: API change will be done */
282
        /* buffers passed into lav* can be allocated any way you prefer,
283
           as long as they're aligned enough for the architecture, and
284
           they're freed appropriately (such as using av_free for buffers
285
           allocated with av_malloc) */
286
        video_outbuf_size = 200000;
287
        video_outbuf = av_malloc(video_outbuf_size);
288
    }
289

    
290
    /* allocate the encoded raw picture */
291
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
292
    if (!picture) {
293
        fprintf(stderr, "Could not allocate picture\n");
294
        exit(1);
295
    }
296

    
297
    /* if the output format is not YUV420P, then a temporary YUV420P
298
       picture is needed too. It is then converted to the required
299
       output format */
300
    tmp_picture = NULL;
301
    if (c->pix_fmt != PIX_FMT_YUV420P) {
302
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
303
        if (!tmp_picture) {
304
            fprintf(stderr, "Could not allocate temporary picture\n");
305
            exit(1);
306
        }
307
    }
308
}
309

    
310
/* prepare a dummy image */
311
static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
312
{
313
    int x, y, i;
314

    
315
    i = frame_index;
316

    
317
    /* Y */
318
    for(y=0;y<height;y++) {
319
        for(x=0;x<width;x++) {
320
            pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
321
        }
322
    }
323

    
324
    /* Cb and Cr */
325
    for(y=0;y<height/2;y++) {
326
        for(x=0;x<width/2;x++) {
327
            pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
328
            pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
329
        }
330
    }
331
}
332

    
333
static void write_video_frame(AVFormatContext *oc, AVStream *st)
334
{
335
    int out_size, ret;
336
    AVCodecContext *c;
337
    static struct SwsContext *img_convert_ctx;
338

    
339
    c = st->codec;
340

    
341
    if (frame_count >= STREAM_NB_FRAMES) {
342
        /* no more frame to compress. The codec has a latency of a few
343
           frames if using B frames, so we get the last frames by
344
           passing the same picture again */
345
    } else {
346
        if (c->pix_fmt != PIX_FMT_YUV420P) {
347
            /* as we only generate a YUV420P picture, we must convert it
348
               to the codec pixel format if needed */
349
            if (img_convert_ctx == NULL) {
350
                img_convert_ctx = sws_getContext(c->width, c->height,
351
                                                 PIX_FMT_YUV420P,
352
                                                 c->width, c->height,
353
                                                 c->pix_fmt,
354
                                                 sws_flags, NULL, NULL, NULL);
355
                if (img_convert_ctx == NULL) {
356
                    fprintf(stderr, "Cannot initialize the conversion context\n");
357
                    exit(1);
358
                }
359
            }
360
            fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
361
            sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
362
                      0, c->height, picture->data, picture->linesize);
363
        } else {
364
            fill_yuv_image(picture, frame_count, c->width, c->height);
365
        }
366
    }
367

    
368

    
369
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
370
        /* raw video case. The API will change slightly in the near
371
           futur for that */
372
        AVPacket pkt;
373
        av_init_packet(&pkt);
374

    
375
        pkt.flags |= PKT_FLAG_KEY;
376
        pkt.stream_index= st->index;
377
        pkt.data= (uint8_t *)picture;
378
        pkt.size= sizeof(AVPicture);
379

    
380
        ret = av_interleaved_write_frame(oc, &pkt);
381
    } else {
382
        /* encode the image */
383
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
384
        /* if zero size, it means the image was buffered */
385
        if (out_size > 0) {
386
            AVPacket pkt;
387
            av_init_packet(&pkt);
388

    
389
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
390
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
391
            if(c->coded_frame->key_frame)
392
                pkt.flags |= PKT_FLAG_KEY;
393
            pkt.stream_index= st->index;
394
            pkt.data= video_outbuf;
395
            pkt.size= out_size;
396

    
397
            /* write the compressed frame in the media file */
398
            ret = av_interleaved_write_frame(oc, &pkt);
399
        } else {
400
            ret = 0;
401
        }
402
    }
403
    if (ret != 0) {
404
        fprintf(stderr, "Error while writing video frame\n");
405
        exit(1);
406
    }
407
    frame_count++;
408
}
409

    
410
static void close_video(AVFormatContext *oc, AVStream *st)
411
{
412
    avcodec_close(st->codec);
413
    av_free(picture->data[0]);
414
    av_free(picture);
415
    if (tmp_picture) {
416
        av_free(tmp_picture->data[0]);
417
        av_free(tmp_picture);
418
    }
419
    av_free(video_outbuf);
420
}
421

    
422
/**************************************************************/
423
/* media file output */
424

    
425
int main(int argc, char **argv)
426
{
427
    const char *filename;
428
    AVOutputFormat *fmt;
429
    AVFormatContext *oc;
430
    AVStream *audio_st, *video_st;
431
    double audio_pts, video_pts;
432
    int i;
433

    
434
    /* initialize libavcodec, and register all codecs and formats */
435
    av_register_all();
436

    
437
    if (argc != 2) {
438
        printf("usage: %s output_file\n"
439
               "API example program to output a media file with libavformat.\n"
440
               "The output format is automatically guessed according to the file extension.\n"
441
               "Raw images can also be output by using '%%d' in the filename\n"
442
               "\n", argv[0]);
443
        exit(1);
444
    }
445

    
446
    filename = argv[1];
447

    
448
    /* auto detect the output format from the name. default is
449
       mpeg. */
450
    fmt = av_guess_format(NULL, filename, NULL);
451
    if (!fmt) {
452
        printf("Could not deduce output format from file extension: using MPEG.\n");
453
        fmt = av_guess_format("mpeg", NULL, NULL);
454
    }
455
    if (!fmt) {
456
        fprintf(stderr, "Could not find suitable output format\n");
457
        exit(1);
458
    }
459

    
460
    /* allocate the output media context */
461
    oc = avformat_alloc_context();
462
    if (!oc) {
463
        fprintf(stderr, "Memory error\n");
464
        exit(1);
465
    }
466
    oc->oformat = fmt;
467
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
468

    
469
    /* add the audio and video streams using the default format codecs
470
       and initialize the codecs */
471
    video_st = NULL;
472
    audio_st = NULL;
473
    if (fmt->video_codec != CODEC_ID_NONE) {
474
        video_st = add_video_stream(oc, fmt->video_codec);
475
    }
476
    if (fmt->audio_codec != CODEC_ID_NONE) {
477
        audio_st = add_audio_stream(oc, fmt->audio_codec);
478
    }
479

    
480
    /* set the output parameters (must be done even if no
481
       parameters). */
482
    if (av_set_parameters(oc, NULL) < 0) {
483
        fprintf(stderr, "Invalid output format parameters\n");
484
        exit(1);
485
    }
486

    
487
    dump_format(oc, 0, filename, 1);
488

    
489
    /* now that all the parameters are set, we can open the audio and
490
       video codecs and allocate the necessary encode buffers */
491
    if (video_st)
492
        open_video(oc, video_st);
493
    if (audio_st)
494
        open_audio(oc, audio_st);
495

    
496
    /* open the output file, if needed */
497
    if (!(fmt->flags & AVFMT_NOFILE)) {
498
        if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0) {
499
            fprintf(stderr, "Could not open '%s'\n", filename);
500
            exit(1);
501
        }
502
    }
503

    
504
    /* write the stream header, if any */
505
    av_write_header(oc);
506

    
507
    for(;;) {
508
        /* compute current audio and video time */
509
        if (audio_st)
510
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
511
        else
512
            audio_pts = 0.0;
513

    
514
        if (video_st)
515
            video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
516
        else
517
            video_pts = 0.0;
518

    
519
        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
520
            (!video_st || video_pts >= STREAM_DURATION))
521
            break;
522

    
523
        /* write interleaved audio and video frames */
524
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
525
            write_audio_frame(oc, audio_st);
526
        } else {
527
            write_video_frame(oc, video_st);
528
        }
529
    }
530

    
531
    /* write the trailer, if any.  the trailer must be written
532
     * before you close the CodecContexts open when you wrote the
533
     * header; otherwise write_trailer may try to use memory that
534
     * was freed on av_codec_close() */
535
    av_write_trailer(oc);
536

    
537
    /* close each codec */
538
    if (video_st)
539
        close_video(oc, video_st);
540
    if (audio_st)
541
        close_audio(oc, audio_st);
542

    
543
    /* free the streams */
544
    for(i = 0; i < oc->nb_streams; i++) {
545
        av_freep(&oc->streams[i]->codec);
546
        av_freep(&oc->streams[i]);
547
    }
548

    
549
    if (!(fmt->flags & AVFMT_NOFILE)) {
550
        /* close the output file */
551
        url_fclose(oc->pb);
552
    }
553

    
554
    /* free the stream */
555
    av_free(oc);
556

    
557
    return 0;
558
}