Statistics
| Branch: | Revision:

chunker-player / chunker_streamer / chunker_streamer.c @ daaa68ea

History | View | Annotate | Download (47.7 KB)

1
/*
2
 *  Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
3
 *  Copyright (c) 2010-2011 Csaba Kiraly
4
 *  developed for the Napa-Wine EU project. See www.napa-wine.eu
5
 *
6
 *  This is free software; see lgpl-2.1.txt
7
 */
8

    
9
#include "chunker_streamer.h"
10
#include <signal.h>
11
#include <math.h>
12
#include <getopt.h>
13
#include <libswscale/swscale.h>
14
#include <libavutil/opt.h>
15

    
16
#ifdef USE_AVFILTER
17
#include <libavfilter/avfilter.h>
18
#include "chunker_filtering.h"
19
#endif
20

    
21
#include "chunk_pusher.h"
22

    
23
struct outstream {
24
        struct output *output;
25
        ExternalChunk *chunk;
26
};
27
struct outstream outstream;
28

    
29
#define DEBUG
30
#define DEBUG_AUDIO_FRAMES  false
31
#define DEBUG_VIDEO_FRAMES  false
32
#define DEBUG_CHUNKER false
33
#define DEBUG_ANOMALIES true
34
#define DEBUG_TIMESTAMPING false
35
#include "dbg.h"
36

    
37
#define STREAMER_MAX(a,b) ((a>b)?(a):(b))
38
#define STREAMER_MIN(a,b) ((a<b)?(a):(b))
39

    
40
//#define DISPLAY_PSNR
41
#define GET_PSNR(x) ((x==0) ? 0 : (-10.0*log(x)/log(10)))
42

    
43
ChunkerMetadata *cmeta = NULL;
44
int seq_current_chunk = 1; //chunk numbering starts from 1; HINT do i need more bytes?
45

    
46
#define AUDIO_CHUNK 0
47
#define VIDEO_CHUNK 1
48

    
49
void SaveFrame(AVFrame *pFrame, int width, int height);
50
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf);
51
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf);
52
void bit32_encoded_push(uint32_t v, uint8_t *p);
53

    
54
int video_record_count = 0;
55
int savedVideoFrames = 0;
56
long int firstSavedVideoFrame = 0;
57
int ChunkerStreamerTestMode = 0;
58

    
59
int pts_anomaly_threshold = -1;
60
int newtime_anomaly_threshold = -1;
61
bool timebank = false;
62
char *outside_world_url = NULL;
63

    
64
int gop_size = 25;
65
int max_b_frames = 3;
66
bool vcopy = false;
67

    
68
long delay_audio = 0; //delay audio by x millisec
69

    
70
char *avfilter="yadif";
71

    
72
// Constant number of frames per chunk
73
int chunkFilledFramesStrategy(ExternalChunk *echunk, int chunkType)
74
{
75
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if frames num %d == %d in chunk %d\n", echunk->frames_num, cmeta->framesPerChunk[chunkType], echunk->seq);
76
        if(echunk->frames_num == cmeta->framesPerChunk[chunkType])
77
                return 1;
78

    
79
        return 0;
80
}
81

    
82
// Constant size. Note that for now each chunk will have a size just greater or equal than the required value
83
// It can be considered as constant size.
84
int chunkFilledSizeStrategy(ExternalChunk *echunk, int chunkType)
85
{
86
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if chunk size %d >= %d in chunk %d\n", echunk->payload_len, cmeta->targetChunkSize, echunk->seq);
87
        if(echunk->payload_len >= cmeta->targetChunkSize)
88
                return 1;
89
        
90
        return 0;
91
}
92

    
93
// Performace optimization.
94
// The chunkFilled function has been splitted into two functions (one for each strategy).
95
// Instead of continuously check the strategy flag (which is constant),
96
// we change the callback just once according to the current strategy (look at the switch statement in the main in which this function pointer is set)
97
int (*chunkFilled)(ExternalChunk *echunk, int chunkType);
98

    
99
void initChunk(ExternalChunk *chunk, int *seq_num) {
100
        chunk->seq = (*seq_num)++;
101
        chunk->frames_num = 0;
102
        chunk->payload_len = 0;
103
        chunk->len=0;
104
  if(chunk->data != NULL)
105
    free(chunk->data);
106
        chunk->data = NULL;
107
        chunk->start_time.tv_sec = -1;
108
        chunk->start_time.tv_usec = -1;
109
        chunk->end_time.tv_sec = -1;
110
        chunk->end_time.tv_usec = -1;
111
        chunk->priority = 0;
112
        chunk->category = 0;
113
        chunk->_refcnt = 0;
114
}
115

    
116
int quit = 0;
117

    
118
void sigproc()
119
{
120
        printf("you have pressed ctrl-c, terminating...\n");
121
        quit = 1;
122
}
123

    
124
static void print_usage(int argc, char *argv[])
125
{
126
  fprintf (stderr,
127
    "\nUsage:%s [options]\n"
128
    "\n"
129
    "Mandatory options:\n"
130
    "\t[-i input file]\n"
131
    "\t[-a audio bitrate]\n"
132
    "\t[-v video bitrate]\n\n"
133
    "Other options:\n"
134
    "\t[-F output] (overrides config file)\n"
135
    "\t[-A audioencoder]\n"
136
    "\t[-V videoencoder]\n"
137
    "\t[-s WxH]: force video size.\n"
138
    "\t[-l]: this is a live stream.\n"
139
    "\t[-o]: adjust A/V frame timestamps (deafault off, use it only with flawed containers)\n"
140
    "\t[-p]: pts anomaly threshold (default: -1=off).\n"
141
    "\t[-q]: sync anomaly threshold ((default: -1=off).\n"
142
    "\t[-t]: QoE test mode\n\n"
143

    
144
    "\t[--video_stream]:set video_stream ID in input\n"
145
    "\t[--audio_stream]:set audio_stream ID in input\n"
146
    "\t[--avfilter]:set input filter (default: yadif\n"
147
    "\n"
148
    "Codec options:\n"
149
    "\t[-g GOP]: gop size\n"
150
    "\t[-b frames]: max number of consecutive b frames\n"
151
    "\t[-x extas]: extra video codec options (e.g. -x me_method=hex,flags2=+dct8x8+wpred+bpyrami+mixed_refs)\n"
152
    "\n"
153
    "=======================================================\n", argv[0]
154
    );
155
  }
156

    
157
int sendChunk(struct output *output, ExternalChunk *chunk) {
158
#ifdef HTTPIO
159
                                                return pushChunkHttp(chunk, outside_world_url);
160
#endif
161
#ifdef TCPIO
162
                                                return pushChunkTcp(output, chunk);
163
#endif
164
#ifdef UDPIO
165
                                                return pushChunkUDP(chunk);
166
#endif
167
}
168

    
169

    
170
int transcodeFrame(uint8_t *video_outbuf, int video_outbuf_size, int64_t *target_pts, AVFrame *pFrame, AVRational time_base, AVCodecContext *pCodecCtx, AVCodecContext *pCodecCtxEnc)
171
{
172
        int video_frame_size = 0;
173
        AVFrame *pFrame2 = NULL;
174
        AVFrame *scaledFrame = NULL;
175
        pFrame2=avcodec_alloc_frame();
176
        scaledFrame=avcodec_alloc_frame();
177
        if(pFrame2==NULL || scaledFrame==NULL) {
178
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
179
                if(pFrame2) av_free(pFrame2);
180
                if(scaledFrame) av_free(scaledFrame);
181
                return -1;
182
        }
183
        int scaledFrame_buf_size = avpicture_get_size( PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
184
        uint8_t* scaledFrame_buffer = (uint8_t *) av_malloc( scaledFrame_buf_size * sizeof( uint8_t ) );
185
        avpicture_fill( (AVPicture*) scaledFrame, scaledFrame_buffer, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
186
        if(!video_outbuf || !scaledFrame_buffer) {
187
                fprintf(stderr, "INIT: Memory error alloc video_outbuf!!!\n");
188
                return -1;
189
        }
190

    
191
                                            if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
192
                                                pFrame->pts = av_rescale_q(pFrame->pkt_pts, time_base, pCodecCtxEnc->time_base);
193
                                            } else {        //try to figure out the pts //TODO: review this
194
                                                if (pFrame->pkt_dts != AV_NOPTS_VALUE) {
195
                                                        pFrame->pts = av_rescale_q(pFrame->pkt_dts, time_base, pCodecCtxEnc->time_base);
196
                                                }
197
                                            }
198

    
199
#ifdef VIDEO_DEINTERLACE
200
                                            avpicture_deinterlace(
201
                                                        (AVPicture*) pFrame,
202
                                                        (const AVPicture*) pFrame,
203
                                                        pCodecCtxEnc->pix_fmt,
204
                                                        pCodecCtxEnc->width,
205
                                                        pCodecCtxEnc->height);
206
#endif
207

    
208
#ifdef USE_AVFILTER
209
                                            //apply avfilters
210
                                            filter(pFrame,pFrame2);
211
                                            pFrame = pFrame2;
212
                                            dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
213
                                            dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
214
#endif
215

    
216
                                            if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
217
//                                                static AVPicture pict;
218
                                                static struct SwsContext *img_convert_ctx = NULL;
219

    
220
                                                pFrame->pict_type = 0;
221
                                                if(img_convert_ctx == NULL)
222
                                                {
223
                                                        img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
224
                                                        if(img_convert_ctx == NULL) {
225
                                                                fprintf(stderr, "Cannot initialize the conversion context!\n");
226
                                                                exit(1);
227
                                                        }
228
                                                }
229
                                                sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, scaledFrame->data, scaledFrame->linesize);
230
                                                scaledFrame->pts = pFrame->pts;
231
                                                scaledFrame->pict_type = 0;
232
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, scaledFrame);
233
                                            } else {
234
                                                pFrame->pict_type = 0;
235
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, pFrame);
236
                                            }
237

    
238
                                            //use pts if dts is invalid
239
                                            if(pCodecCtxEnc->coded_frame->pts!=AV_NOPTS_VALUE)
240
                                                *target_pts = av_rescale_q(pCodecCtxEnc->coded_frame->pts, pCodecCtxEnc->time_base, time_base);
241
                                            else {        //TODO: review this
242
                                                if(pFrame2) av_free(pFrame2);
243
                                                if(scaledFrame) av_free(scaledFrame);
244
                                                if(scaledFrame_buffer) av_free(scaledFrame_buffer);
245
                                                return -1;
246
                                            }
247

    
248
                                        if(video_frame_size > 0) {
249
                                            if(pCodecCtxEnc->coded_frame) {
250
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pCodecCtxEnc->coded_frame->pkt_dts, pCodecCtxEnc->coded_frame->pkt_pts, pCodecCtxEnc->coded_frame->pts);
251
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: outtype: %d%s\n", pCodecCtxEnc->coded_frame->pict_type, pCodecCtxEnc->coded_frame->key_frame ? " (key)" : "");
252
                                            }
253
#ifdef DISPLAY_PSNR
254
                                            static double ist_psnr = 0;
255
                                            static double cum_psnr = 0;
256
                                            static int psnr_samples = 0;
257
                                            if(pCodecCtxEnc->coded_frame) {
258
                                                if(pCodecCtxEnc->flags&CODEC_FLAG_PSNR) {
259
                                                        ist_psnr = GET_PSNR(pCodecCtxEnc->coded_frame->error[0]/(pCodecCtxEnc->width*pCodecCtxEnc->height*255.0*255.0));
260
                                                        psnr_samples++;
261
                                                        cum_psnr += ist_psnr;
262
                                                        fprintf(stderr, "PSNR: ist %.4f avg: %.4f\n", ist_psnr, cum_psnr / (double)psnr_samples);
263
                                                }
264
                                            }
265
#endif
266
                                        }
267

    
268
        if(pFrame2) av_free(pFrame2);
269
        if(scaledFrame) av_free(scaledFrame);
270
        if(scaledFrame_buffer) av_free(scaledFrame_buffer);
271
        return video_frame_size;
272
}
273

    
274

    
275
void createFrame(struct Frame *frame, long long newTime, int video_frame_size, int pict_type)
276
{
277

    
278
                                        frame->timestamp.tv_sec = (long long)newTime/1000;
279
                                        frame->timestamp.tv_usec = newTime%1000;
280
                                        frame->size = video_frame_size;
281
                                        /* pict_type maybe 1 (I), 2 (P), 3 (B), 5 (AUDIO)*/
282
                                        frame->type = pict_type;
283

    
284

    
285
/* should be on some other place
286
//                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: original codec frame number %d vs. encoded %d vs. packed %d\n", pCodecCtx->frame_number, pCodecCtxEnc->frame_number, frame->number);
287
//                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: duration %d timebase %d %d container timebase %d\n", (int)packet.duration, pCodecCtxEnc->time_base.den, pCodecCtxEnc->time_base.num, pCodecCtx->time_base.den);
288

289
#ifdef YUV_RECORD_ENABLED
290
                                        if(!vcopy && ChunkerStreamerTestMode)
291
                                        {
292
                                                if(videotrace)
293
                                                        fprintf(videotrace, "%d %d %d\n", frame->number, pict_type, frame->size);
294

295
                                                SaveFrame(pFrame, dest_width, dest_height);
296

297
                                                ++savedVideoFrames;
298
                                                SaveEncodedFrame(frame, video_outbuf);
299

300
                                                if(!firstSavedVideoFrame)
301
                                                        firstSavedVideoFrame = frame->number;
302

303
                                                char tmp_filename[255];
304
                                                sprintf(tmp_filename, "yuv_data/streamer_out_context.txt");
305
                                                FILE* tmp = fopen(tmp_filename, "w");
306
                                                if(tmp)
307
                                                {
308
                                                        fprintf(tmp, "width = %d\nheight = %d\ntotal_frames_saved = %d\ntotal_frames_decoded = %d\nfirst_frame_number = %ld\nlast_frame_number = %d\n"
309
                                                                ,dest_width, dest_height
310
                                                                ,savedVideoFrames, savedVideoFrames, firstSavedVideoFrame, frame->number);
311
                                                        fclose(tmp);
312
                                                }
313
                                        }
314
#endif
315
*/
316

    
317
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: encapsulated frame size:%d type:%d\n", frame->size, frame->type);
318
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: timestamped sec %ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
319
}
320

    
321

    
322
void addFrameToOutstream(struct outstream *os, Frame *frame, uint8_t *video_outbuf)
323
{
324

    
325
        ExternalChunk *chunk = os->chunk;
326
        struct output *output = os->output;
327

    
328
                                        if(update_chunk(chunk, frame, video_outbuf) == -1) {
329
                                                fprintf(stderr, "VIDEO: unable to update chunk %d. Exiting.\n", chunk->seq);
330
                                                exit(-1);
331
                                        }
332

    
333
                                        if(chunkFilled(chunk, VIDEO_CHUNK)) { // is chunk filled using current strategy?
334
                                                //calculate priority
335
                                                chunk->priority /= chunk->frames_num;
336

    
337
                                                //SAVE ON FILE
338
                                                //saveChunkOnFile(chunk);
339
                                                //Send the chunk to an external transport/player
340
                                                sendChunk(output, chunk);
341
                                                dctprintf(DEBUG_CHUNKER, "VIDEO: sent chunk video %d, prio:%f, size %d\n", chunk->seq, chunk->priority, chunk->len);
342
                                                chunk->seq = 0; //signal that we need an increase
343
                                                //initChunk(chunk, &seq_current_chunk);
344
                                        }
345
}
346

    
347
int main(int argc, char *argv[]) {
348
        signal(SIGINT, sigproc);
349
        
350
        int i=0;
351

    
352
        //output variables
353
        uint8_t *video_outbuf = NULL;
354
        int video_outbuf_size, video_frame_size;
355
        uint8_t *audio_outbuf = NULL;
356
        int audio_outbuf_size, audio_frame_size;
357
        int audio_data_size;
358

    
359
        //numeric identifiers of input streams
360
        int videoStream = -1;
361
        int audioStream = -1;
362

    
363
//        int len1;
364
        int frameFinished;
365
        //frame sequential counters
366
        int contFrameAudio=1, contFrameVideo=0;
367
//        int numBytes;
368

    
369
        //command line parameters
370
        int audio_bitrate = -1;
371
        int video_bitrate = -1;
372
        char *audio_codec = "mp2";
373
        char *video_codec = "mpeg4";
374
        char *codec_options = "";
375
        int live_source = 0; //tells to sleep before reading next frame in not live (i.e. file)
376
        int offset_av = 0; //tells to compensate for offset between audio and video in the file
377
        
378
        //a raw buffer for decoded uncompressed audio samples
379
        int16_t *samples = NULL;
380
        //a raw uncompressed video picture
381
        AVFrame *pFrame1 = NULL;
382

    
383
        AVFormatContext *pFormatCtx;
384
        AVCodecContext  *pCodecCtx = NULL ,*pCodecCtxEnc = NULL ,*aCodecCtxEnc = NULL ,*aCodecCtx = NULL;
385
        AVCodec         *pCodec = NULL ,*pCodecEnc = NULL ,*aCodec = NULL ,*aCodecEnc = NULL;
386
        AVPacket         packet;
387

    
388
        //stuff needed to compute the right timestamps
389
        short int FirstTimeAudio=1, FirstTimeVideo=1;
390
        short int pts_anomalies_counter=0;
391
        short int newtime_anomalies_counter=0;
392
        long long newTime=0, newTime_audio=0, newTime_video=0, newTime_prev=0;
393
        struct timeval lastAudioSent = {0, 0};
394
        int64_t ptsvideo1=0;
395
        int64_t ptsaudio1=0;
396
        int64_t last_pkt_dts=0, delta_video=0, delta_audio=0, last_pkt_dts_audio=0, target_pts=0;
397

    
398
        //Napa-Wine specific Frame and Chunk structures for transport
399
        Frame *frame = NULL;
400
        ExternalChunk *chunkaudio = NULL;
401
        
402
        char av_input[1024];
403
        int dest_width = -1;
404
        int dest_height = -1;
405
        
406
        static struct option long_options[] =
407
        {
408
                {"audio_stream", required_argument, 0, 0},
409
                {"video_stream", required_argument, 0, 0},
410
                {"avfilter", required_argument, 0, 0},
411
                {0, 0, 0, 0}
412
        };
413
        /* `getopt_long' stores the option index here. */
414
        int option_index = 0, c;
415
        int mandatories = 0;
416
        while ((c = getopt_long (argc, argv, "i:a:v:A:V:s:lop:q:tF:g:b:d:x:", long_options, &option_index)) != -1)
417
        {
418
                switch (c) {
419
                        case 0: //for long options
420
                                if( strcmp( "audio_stream", long_options[option_index].name ) == 0 ) { audioStream = atoi(optarg); }
421
                                if( strcmp( "video_stream", long_options[option_index].name ) == 0 ) { videoStream = atoi(optarg); }
422
                                if( strcmp( "avfilter", long_options[option_index].name ) == 0 ) { avfilter = strdup(optarg); }
423
                                break;
424
                        case 'i':
425
                                sprintf(av_input, "%s", optarg);
426
                                mandatories++;
427
                                break;
428
                        case 'a':
429
                                sscanf(optarg, "%d", &audio_bitrate);
430
                                mandatories++;
431
                                break;
432
                        case 'v':
433
                                sscanf(optarg, "%d", &video_bitrate);
434
                                mandatories++;
435
                                break;
436
                        case 'A':
437
                                audio_codec = strdup(optarg);
438
                                break;
439
                        case 'V':
440
                                video_codec = strdup(optarg);
441
                                break;
442
                        case 's':
443
                                sscanf(optarg, "%dx%d", &dest_width, &dest_height);
444
                                break;
445
                        case 'l':
446
                                live_source = 1;
447
                                break;
448
                        case 'o':
449
                                offset_av = 1;
450
                                break;
451
                        case 't':
452
                                ChunkerStreamerTestMode = 1;
453
                                break;
454
                        case 'p':
455
                                sscanf(optarg, "%d", &pts_anomaly_threshold);
456
                                break;
457
                        case 'q':
458
                                sscanf(optarg, "%d", &newtime_anomaly_threshold);
459
                                break;
460
                        case 'F':
461
                                outside_world_url = strdup(optarg);
462
                                break;
463
                        case 'g':
464
                                sscanf(optarg, "%d", &gop_size);
465
                                break;
466
                        case 'b':
467
                                sscanf(optarg, "%d", &max_b_frames);
468
                                break;
469
                        case 'd':
470
                                sscanf(optarg, "%ld", &delay_audio);
471
                                break;
472
                        case 'x':
473
                                codec_options = strdup(optarg);
474
                                break;
475
                        default:
476
                                print_usage(argc, argv);
477
                                return -1;
478
                }
479
        }
480
        
481
        if(mandatories < 3) 
482
        {
483
                print_usage(argc, argv);
484
                return -1;
485
        }
486

    
487
#ifdef YUV_RECORD_ENABLED
488
        if(ChunkerStreamerTestMode)
489
        {
490
                DELETE_DIR("yuv_data");
491
                CREATE_DIR("yuv_data");
492
                //FILE* pFile=fopen("yuv_data/streamer_out.yuv", "w");
493
                //fclose(pFile);
494
        }
495
#endif
496

    
497
restart:
498
        // read the configuration file
499
        cmeta = chunkerInit();
500
        if (!outside_world_url) {
501
                outside_world_url = strdup(cmeta->outside_world_url);
502
        }
503
        switch(cmeta->strategy)
504
        {
505
                case 1:
506
                        chunkFilled = chunkFilledSizeStrategy;
507
                        break;
508
                default:
509
                        chunkFilled = chunkFilledFramesStrategy;
510
        }
511
                
512
        if(live_source)
513
                fprintf(stderr, "INIT: Using LIVE SOURCE TimeStamps\n");
514
        if(offset_av)
515
                fprintf(stderr, "INIT: Compensating AV OFFSET in file\n");
516

    
517
        // Register all formats and codecs
518
        av_register_all();
519

    
520
        // Open input file
521
        if(av_open_input_file(&pFormatCtx, av_input, NULL, 0, NULL) != 0) {
522
                fprintf(stdout, "INIT: Couldn't open video file. Exiting.\n");
523
                exit(-1);
524
        }
525

    
526
        // Retrieve stream information
527
        if(av_find_stream_info(pFormatCtx) < 0) {
528
                fprintf(stdout, "INIT: Couldn't find stream information. Exiting.\n");
529
                exit(-1);
530
        }
531

    
532
        // Dump information about file onto standard error
533
        av_dump_format(pFormatCtx, 0, av_input, 0);
534

    
535
        // Find the video and audio stream numbers
536
        for(i=0; i<pFormatCtx->nb_streams; i++) {
537
                if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream<0) {
538
                        videoStream=i;
539
                }
540
                if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream<0) {
541
                        audioStream=i;
542
                }
543
        }
544

    
545
        if(videoStream==-1 || audioStream==-1) {        // TODO: refine to work with 1 or the other
546
                fprintf(stdout, "INIT: Didn't find audio and video streams. Exiting.\n");
547
                exit(-1);
548
        }
549

    
550
        fprintf(stderr, "INIT: Num streams : %d TBR: %d %d RFRAMERATE:%d %d Duration:%ld\n", pFormatCtx->nb_streams, pFormatCtx->streams[videoStream]->time_base.num, pFormatCtx->streams[videoStream]->time_base.den, pFormatCtx->streams[videoStream]->r_frame_rate.num, pFormatCtx->streams[videoStream]->r_frame_rate.den, (long int)pFormatCtx->streams[videoStream]->duration);
551

    
552
        fprintf(stderr, "INIT: Video stream has id : %d\n",videoStream);
553
        fprintf(stderr, "INIT: Audio stream has id : %d\n",audioStream);
554

    
555

    
556
        // Get a pointer to the codec context for the input video stream
557
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;
558
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
559
        //extract W and H
560
        fprintf(stderr, "INIT: Width:%d Height:%d\n", pCodecCtx->width, pCodecCtx->height);
561

    
562
        // Get a pointer to the codec context for the input audio stream
563
        if(audioStream != -1) {
564
                aCodecCtx=pFormatCtx->streams[audioStream]->codec;
565
                fprintf(stderr, "INIT: AUDIO Codecid: %d channels %d samplerate %d\n", aCodecCtx->codec_id, aCodecCtx->channels, aCodecCtx->sample_rate);
566
        }
567

    
568
        // Figure out size
569
        dest_width = (dest_width > 0) ? dest_width : pCodecCtx->width;
570
        dest_height = (dest_height > 0) ? dest_height : pCodecCtx->height;
571

    
572
        //setup video output encoder
573
 if (strcmp(video_codec, "copy") == 0) {
574
        vcopy = true;
575
 } else {
576
        pCodecEnc = avcodec_find_encoder_by_name(video_codec);
577
        if (pCodecEnc) {
578
                fprintf(stderr, "INIT: Setting VIDEO codecID to: %d\n",pCodecEnc->id);
579
        } else {
580
                fprintf(stderr, "INIT: Unknown OUT VIDEO codec: %s!\n", video_codec);
581
                return -1; // Codec not found
582
        }
583

    
584
        pCodecCtxEnc=avcodec_alloc_context();
585
        pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;
586
        pCodecCtxEnc->codec_id = pCodecEnc->id;
587

    
588
        pCodecCtxEnc->bit_rate = video_bitrate;
589
        //~ pCodecCtxEnc->qmin = 30;
590
        //~ pCodecCtxEnc->qmax = 30;
591
        //times 20 follows the defaults, was not needed in previous versions of libavcodec
592
//        pCodecCtxEnc->crf = 20.0f;
593
        // resolution must be a multiple of two 
594
        pCodecCtxEnc->width = dest_width;
595
        pCodecCtxEnc->height = dest_height;
596
        // frames per second 
597
        //~ pCodecCtxEnc->time_base= pCodecCtx->time_base;//(AVRational){1,25};
598
        //printf("pCodecCtx->time_base=%d/%d\n", pCodecCtx->time_base.num, pCodecCtx->time_base.den);
599
        pCodecCtxEnc->time_base= pCodecCtx->time_base;//(AVRational){1,25};
600
        pCodecCtxEnc->gop_size = gop_size; // emit one intra frame every gop_size frames 
601
        pCodecCtxEnc->max_b_frames = max_b_frames;
602
        pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P;
603
        pCodecCtxEnc->flags |= CODEC_FLAG_PSNR;
604
        //~ pCodecCtxEnc->flags |= CODEC_FLAG_QSCALE;
605

    
606
        //some generic quality tuning
607
        pCodecCtxEnc->mb_decision = FF_MB_DECISION_RD;
608

    
609
        //some rate control parameters for streaming, taken from ffserver.c
610
        {
611
        /* Bitrate tolerance is less for streaming */
612
        AVCodecContext *av = pCodecCtxEnc;
613
        if (av->bit_rate_tolerance == 0)
614
            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
615
                      (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);
616
        //if (av->qmin == 0)
617
        //    av->qmin = 3;
618
        //if (av->qmax == 0)
619
        //    av->qmax = 31;
620
        //if (av->max_qdiff == 0)
621
        //    av->max_qdiff = 3;
622
        //av->qcompress = 0.5;
623
        //av->qblur = 0.5;
624

    
625
        //if (!av->nsse_weight)
626
        //    av->nsse_weight = 8;
627

    
628
        //av->frame_skip_cmp = FF_CMP_DCTMAX;
629
        //if (!av->me_method)
630
        //    av->me_method = ME_EPZS;
631
        //av->rc_buffer_aggressivity = 1.0;
632

    
633
        //if (!av->rc_eq)
634
        //    av->rc_eq = "tex^qComp";
635
        //if (!av->i_quant_factor)
636
        //    av->i_quant_factor = -0.8;
637
        //if (!av->b_quant_factor)
638
        //    av->b_quant_factor = 1.25;
639
        //if (!av->b_quant_offset)
640
        //    av->b_quant_offset = 1.25;
641
        if (!av->rc_max_rate)
642
            av->rc_max_rate = av->bit_rate * 2;
643

    
644
        if (av->rc_max_rate && !av->rc_buffer_size) {
645
            av->rc_buffer_size = av->rc_max_rate;
646
        }
647
        }
648
        //end of code taken fromffserver.c
649

    
650
  switch (pCodecEnc->id) {
651
    case CODEC_ID_H264 :
652
        // Fast Profile
653
        // libx264-fast.ffpreset preset 
654
        pCodecCtxEnc->coder_type = FF_CODER_TYPE_AC; // coder = 1 -> enable CABAC
655
        pCodecCtxEnc->flags |= CODEC_FLAG_LOOP_FILTER; // flags=+loop -> deblock
656
        pCodecCtxEnc->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
657
        pCodecCtxEnc->partitions |= X264_PART_I8X8|X264_PART_I4X4|X264_PART_P8X8|X264_PART_B8X8;        // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
658
        pCodecCtxEnc->me_method=ME_HEX; // me_method=hex
659
        pCodecCtxEnc->me_subpel_quality = 6; // subq=7
660
        pCodecCtxEnc->me_range = 16; // me_range=16
661
        //pCodecCtxEnc->gop_size = 250; // g=250
662
        //pCodecCtxEnc->keyint_min = 25; // keyint_min=25
663
        pCodecCtxEnc->scenechange_threshold = 40; // sc_threshold=40
664
        pCodecCtxEnc->i_quant_factor = 0.71; // i_qfactor=0.71
665
        pCodecCtxEnc->b_frame_strategy = 1; // b_strategy=1
666
        pCodecCtxEnc->qcompress = 0.6; // qcomp=0.6
667
        pCodecCtxEnc->qmin = 10; // qmin=10
668
        pCodecCtxEnc->qmax = 51; // qmax=51
669
        pCodecCtxEnc->max_qdiff = 4; // qdiff=4
670
        //pCodecCtxEnc->max_b_frames = 3; // bf=3
671
        pCodecCtxEnc->refs = 2; // refs=3
672
        //pCodecCtxEnc->directpred = 1; // directpred=1
673
        pCodecCtxEnc->directpred = 3; // directpred=1 in preset -> "directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)"
674
        //pCodecCtxEnc->trellis = 1; // trellis=1
675
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_BPYRAMID|CODEC_FLAG2_MIXED_REFS|CODEC_FLAG2_WPRED|CODEC_FLAG2_8X8DCT|CODEC_FLAG2_FASTPSKIP;        // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
676
        pCodecCtxEnc->weighted_p_pred = 2; // wpredp=2
677

    
678
        // libx264-main.ffpreset preset
679
        //pCodecCtxEnc->flags2|=CODEC_FLAG2_8X8DCT;
680
        //pCodecCtxEnc->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
681
        //pCodecCtxEnc->crf = 22;
682

    
683
#ifdef STREAMER_X264_USE_SSIM
684
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_SSIM;
685
#endif
686

    
687
        //pCodecCtxEnc->weighted_p_pred=2; //maps wpredp=2; weighted prediction analysis method
688
        // pCodecCtxEnc->rc_min_rate = 0;
689
        // pCodecCtxEnc->rc_max_rate = video_bitrate*2;
690
        // pCodecCtxEnc->rc_buffer_size = 0;
691
        break;
692
    case CODEC_ID_MPEG4 :
693
        break;
694
    default:
695
        fprintf(stderr, "INIT: Unsupported OUT VIDEO codec: %s!\n", video_codec);
696
  }
697

    
698
  if ((av_set_options_string(pCodecCtxEnc, codec_options, "=", ",")) < 0) {
699
    fprintf(stderr, "Error parsing options string: '%s'\n", codec_options);
700
    exit(1);
701
  }
702

    
703
        fprintf(stderr, "INIT: VIDEO timebase OUT:%d %d IN: %d %d\n", pCodecCtxEnc->time_base.num, pCodecCtxEnc->time_base.den, pCodecCtx->time_base.num, pCodecCtx->time_base.den);
704
 }
705

    
706
        if(pCodec==NULL) {
707
                fprintf(stderr, "INIT: Unsupported IN VIDEO pcodec!\n");
708
                return -1; // Codec not found
709
        }
710
        if(!vcopy && pCodecEnc==NULL) {
711
                fprintf(stderr, "INIT: Unsupported OUT VIDEO pcodecenc!\n");
712
                return -1; // Codec not found
713
        }
714
        if(avcodec_open(pCodecCtx, pCodec)<0) {
715
                fprintf(stderr, "INIT: could not open IN VIDEO codec\n");
716
                return -1; // Could not open codec
717
        }
718
        if(!vcopy && avcodec_open(pCodecCtxEnc, pCodecEnc)<0) {
719
                fprintf(stderr, "INIT: could not open OUT VIDEO codecEnc\n");
720
                return -1; // Could not open codec
721
        }
722
        if(audioStream!=-1) {
723
                //setup audio output encoder
724
                aCodecCtxEnc = avcodec_alloc_context();
725
                aCodecCtxEnc->bit_rate = audio_bitrate; //256000
726
                aCodecCtxEnc->sample_fmt = SAMPLE_FMT_S16;
727
                aCodecCtxEnc->sample_rate = aCodecCtx->sample_rate;
728
                aCodecCtxEnc->channels = aCodecCtx->channels;
729
                fprintf(stderr, "INIT: AUDIO bitrate OUT:%d sample_rate:%d channels:%d\n", aCodecCtxEnc->bit_rate, aCodecCtxEnc->sample_rate, aCodecCtxEnc->channels);
730

    
731
                // Find the decoder for the audio stream
732
                aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
733
                aCodecEnc = avcodec_find_encoder_by_name(audio_codec);
734
                if(aCodec==NULL) {
735
                        fprintf(stderr,"INIT: Unsupported acodec!\n");
736
                        return -1;
737
                }
738
                if(aCodecEnc==NULL) {
739
                        fprintf(stderr,"INIT: Unsupported acodecEnc!\n");
740
                        return -1;
741
                }
742
        
743
                if(avcodec_open(aCodecCtx, aCodec)<0) {
744
                        fprintf(stderr, "INIT: could not open IN AUDIO codec\n");
745
                        return -1; // Could not open codec
746
                }
747
                if(avcodec_open(aCodecCtxEnc, aCodecEnc)<0) {
748
                        fprintf(stderr, "INIT: could not open OUT AUDIO codec\n");
749
                        return -1; // Could not open codec
750
                }
751
        }
752
        else {
753
                fprintf(stderr,"INIT: NO AUDIO TRACK IN INPUT FILE\n");
754
        }
755

    
756
        // Allocate audio in and out buffers
757
        samples = (int16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
758
        if(samples == NULL) {
759
                fprintf(stderr, "INIT: Memory error alloc audio samples!!!\n");
760
                return -1;
761
        }
762
        audio_outbuf_size = STREAMER_MAX_AUDIO_BUFFER_SIZE;
763
        audio_outbuf = av_malloc(audio_outbuf_size);
764
        if(audio_outbuf == NULL) {
765
                fprintf(stderr, "INIT: Memory error alloc audio_outbuf!!!\n");
766
                return -1;
767
        }
768

    
769
        // Allocate video in frame and out buffer
770
        pFrame1=avcodec_alloc_frame();
771
        if(pFrame1==NULL) {
772
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
773
                return -1;
774
        }
775
        video_outbuf_size = STREAMER_MAX_VIDEO_BUFFER_SIZE;
776
        video_outbuf = av_malloc(video_outbuf_size);
777

    
778
        //allocate Napa-Wine transport
779
        frame = (Frame *)malloc(sizeof(Frame));
780
        if(!frame) {
781
                fprintf(stderr, "INIT: Memory error alloc Frame!!!\n");
782
                return -1;
783
        }
784

    
785
        //create an empty first video chunk
786
        outstream.chunk = (ExternalChunk *)malloc(sizeof(ExternalChunk));
787
        if(!outstream.chunk) {
788
                fprintf(stderr, "INIT: Memory error alloc chunk!!!\n");
789
                return -1;
790
        }
791
        outstream.chunk->data = NULL;
792
        outstream.chunk->seq = 0;
793
        dcprintf(DEBUG_CHUNKER, "INIT: chunk video %d\n", outstream.chunk->seq);
794
        //create empty first audio chunk
795

    
796
        chunkaudio = (ExternalChunk *)malloc(sizeof(ExternalChunk));
797
        if(!chunkaudio) {
798
                fprintf(stderr, "INIT: Memory error alloc chunkaudio!!!\n");
799
                return -1;
800
        }
801
  chunkaudio->data=NULL;
802
        chunkaudio->seq = 0;
803
        //initChunk(chunkaudio, &seq_current_chunk);
804
        dcprintf(DEBUG_CHUNKER, "INIT: chunk audio %d\n", chunkaudio->seq);
805

    
806
#ifdef HTTPIO
807
        /* initialize the HTTP chunk pusher */
808
        initChunkPusher(); //TRIPLO
809
#endif
810

    
811
        long sleep=0;
812
        struct timeval now_tv;
813
        struct timeval tmp_tv;
814
        long long lateTime = 0;
815
        long long maxAudioInterval = 0;
816
        long long maxVDecodeTime = 0;
817
//        unsigned char lastIFrameDistance = 0;
818

    
819
#ifdef TCPIO
820
        static char peer_ip[16];
821
        static int peer_port;
822
        int res = sscanf(outside_world_url, "tcp://%15[0-9.]:%d", peer_ip, &peer_port);
823
        if (res < 2) {
824
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
825
                return -2;
826
        }
827
        
828
        outstream.output = initTCPPush(peer_ip, peer_port);
829
        if (!outstream.output) {
830
                fprintf(stderr, "Error initializing output module, exiting\n");
831
                exit(1);
832
        }
833
#endif
834
#ifdef UDPIO
835
        static char peer_ip[16];
836
        static int peer_port;
837
        int res = sscanf(outside_world_url, "udp://%15[0-9.]:%d", peer_ip, &peer_port);
838
        if (res < 2) {
839
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
840
                return -2;
841
        }
842
        
843
        initUDPPush(peer_ip, peer_port);
844
#endif
845
        
846
        char videotrace_filename[255];
847
        char psnr_filename[255];
848
        sprintf(videotrace_filename, "yuv_data/videotrace.log");
849
        sprintf(psnr_filename, "yuv_data/psnrtrace.log");
850
        FILE* videotrace = fopen(videotrace_filename, "w");
851
        FILE* psnrtrace = fopen(psnr_filename, "w");
852

    
853
#ifdef USE_AVFILTER
854
        //init AVFilter
855
        avfilter_register_all();
856
        init_filters(avfilter, pCodecCtx);
857
#endif
858

    
859
        //main loop to read from the input file
860
        while((av_read_frame(pFormatCtx, &packet)>=0) && !quit)
861
        {
862
                //detect if a strange number of anomalies is occurring
863
                if(ptsvideo1 < 0 || ptsvideo1 > packet.dts || ptsaudio1 < 0 || ptsaudio1 > packet.dts) {
864
                        pts_anomalies_counter++;
865
                        dcprintf(DEBUG_ANOMALIES, "READLOOP: pts BASE anomaly detected number %d\n", pts_anomalies_counter);
866
                        if(pts_anomaly_threshold >=0 && live_source) { //reset just in case of live source
867
                                if(pts_anomalies_counter > pts_anomaly_threshold) {
868
                                        dcprintf(DEBUG_ANOMALIES, "READLOOP: too many pts BASE anomalies. resetting pts base\n");
869
                                        av_free_packet(&packet);
870
                                        goto close;
871
                                }
872
                        }
873
                }
874

    
875
                //newTime_video and _audio are in usec
876
                //if video and audio stamps differ more than 5sec
877
                if( newTime_video - newTime_audio > 5000000 || newTime_video - newTime_audio < -5000000 ) {
878
                        newtime_anomalies_counter++;
879
                        dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME audio video differ anomaly detected number %d\n", newtime_anomalies_counter);
880
                }
881

    
882
                if(newtime_anomaly_threshold >=0 && newtime_anomalies_counter > newtime_anomaly_threshold) {
883
                        if(live_source) { //restart just in case of live source
884
                                dcprintf(DEBUG_ANOMALIES, "READLOOP: too many NEGATIVE TIMESTAMPS anomalies. Restarting.\n");
885
                                av_free_packet(&packet);
886
                                goto close;
887
                        }
888
                }
889

    
890
                // Is this a packet from the video stream?
891
                if(packet.stream_index==videoStream)
892
                {
893
                        if(!live_source)
894
                        {
895
                                if(audioStream != -1) { //take this "time bank" method into account only if we have audio track
896
                                        // lateTime < 0 means a positive time account that can be used to decode video frames
897
                                        // if (lateTime + maxVDecodeTime) >= 0 then we may have a negative time account after video transcoding
898
                                        // therefore, it's better to skip the frame
899
                                        if(timebank && (lateTime+maxVDecodeTime) >= 0)
900
                                        {
901
                                                dcprintf(DEBUG_ANOMALIES, "\n\n\t\t************************* SKIPPING VIDEO FRAME %ld ***********************************\n\n", sleep);
902
                                                av_free_packet(&packet);
903
                                                continue;
904
                                        }
905
                                }
906
                        }
907
                        
908
                        gettimeofday(&tmp_tv, NULL);
909
                        
910
                        //decode the video packet into a raw pFrame
911
                        if(avcodec_decode_video2(pCodecCtx, pFrame1, &frameFinished, &packet)>0)
912
                        {
913
                                AVFrame *pFrame;
914
                                pFrame = pFrame1;
915

    
916
                                // usleep(5000);
917
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOin pkt: dts %"PRId64" pts %"PRId64" pts-dts %"PRId64"\n", packet.dts, packet.pts, packet.pts-packet.dts );
918
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame->pkt_dts, pFrame->pkt_pts, pFrame->pts);
919
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame->pict_type, pFrame->key_frame ? " (key)" : "");
920
                                if(frameFinished)
921
                                { // it must be true all the time else error
922
                                
923
                                        frame->number = ++contFrameVideo;
924

    
925

    
926

    
927
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: finished frame %d dts %"PRId64" pts %"PRId64"\n", frame->number, packet.dts, packet.pts);
928
                                        if(frame->number==0) {
929
                                                if(packet.dts==AV_NOPTS_VALUE)
930
                                                {
931
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
932
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
933
                                                        av_free_packet(&packet);
934
                                                        continue;
935
                                                }
936
                                                last_pkt_dts = packet.dts;
937
                                                newTime = 0;
938
                                        }
939
                                        else {
940
                                                if(packet.dts!=AV_NOPTS_VALUE) {
941
                                                        delta_video = packet.dts-last_pkt_dts;
942
                                                        last_pkt_dts = packet.dts;
943
                                                }
944
                                                else if(delta_video==0)
945
                                                {
946
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
947
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
948
                                                        av_free_packet(&packet);
949
                                                        continue;
950
                                                }
951
                                        }
952
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: deltavideo : %d\n", (int)delta_video);
953

    
954
                                        if(vcopy) {
955
                                                video_frame_size = packet.size;
956
                                                if (video_frame_size > video_outbuf_size) {
957
                                                        fprintf(stderr, "VIDEO: error, outbuf too small, SKIPPING\n");;
958
                                                        av_free_packet(&packet);
959
                                                        continue;
960
                                                } else {
961
                                                        memcpy(video_outbuf, packet.data, video_frame_size);
962
                                                }
963

    
964
                                                if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
965
                                                        target_pts = pFrame->pkt_pts;
966
                                                }else {        //TODO: review this
967
                                                        target_pts = pFrame->pkt_dts;
968
                                                }
969
                                        } else {
970
                                                video_frame_size = transcodeFrame(video_outbuf, video_outbuf_size, &target_pts, pFrame, pFormatCtx->streams[videoStream]->time_base, pCodecCtx, pCodecCtxEnc);
971
                                                if (video_frame_size <= 0) {
972
                                                        av_free_packet(&packet);
973
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
974
                                                        continue;
975
                                                }
976
                                        }
977

    
978
                                        if(offset_av)
979
                                        {
980
                                                if(FirstTimeVideo && target_pts>0) {
981
                                                        ptsvideo1 = target_pts;
982
                                                        FirstTimeVideo = 0;
983
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: SET PTS BASE OFFSET %"PRId64"\n", ptsvideo1);
984
                                                }
985
                                        }
986
                                        else //we want to compensate audio and video offset for this source
987
                                        {
988
                                                if(FirstTimeVideo && target_pts>0) {
989
                                                        //maintain the offset between audio pts and video pts
990
                                                        //because in case of live source they have the same numbering
991
                                                        if(ptsaudio1 > 0) //if we have already seen some audio frames...
992
                                                                ptsvideo1 = ptsaudio1;
993
                                                        else
994
                                                                ptsvideo1 = target_pts;
995
                                                        FirstTimeVideo = 0;
996
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO LIVE: SET PTS BASE OFFSET %"PRId64"\n", ptsvideo1);
997
                                                }
998
                                        }
999
                                        //compute the new video timestamp in milliseconds
1000
                                        if(frame->number>0) {
1001
                                                newTime = (target_pts - ptsvideo1) * 1000 * pFormatCtx->streams[videoStream]->time_base.num / pFormatCtx->streams[videoStream]->time_base.den;
1002
                                                // store timestamp in useconds for next frame sleep
1003
                                                newTime_video = newTime*1000;
1004
                                        }
1005
                                        dcprintf(DEBUG_TIMESTAMPING, "VIDEO: NEWTIMESTAMP %lld\n", newTime);
1006
                                        if(newTime<0) {
1007
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: SKIPPING FRAME\n");
1008
                                                newtime_anomalies_counter++;
1009
                                                dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME negative video timestamp anomaly detected number %d\n", newtime_anomalies_counter);
1010
                                                contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
1011
                                                av_free_packet(&packet);
1012
                                                continue; //SKIP THIS FRAME, bad timestamp
1013
                                        }
1014

    
1015
                                        createFrame(frame, newTime, video_frame_size, 
1016
                                                    vcopy ? pFrame->pict_type : (unsigned char)pCodecCtxEnc->coded_frame->pict_type);
1017
                                        addFrameToOutstream(&outstream, frame, video_outbuf);
1018

    
1019
                                        //compute how long it took to encode video frame
1020
                                        gettimeofday(&now_tv, NULL);
1021
                                        long long usec = (now_tv.tv_sec-tmp_tv.tv_sec)*1000000;
1022
                                        usec+=(now_tv.tv_usec-tmp_tv.tv_usec);
1023
                                        if(usec > maxVDecodeTime)
1024
                                                maxVDecodeTime = usec;
1025

    
1026
                                        //we DONT have an audio track, so we compute timings and determine
1027
                                        //how much time we have to sleep at next VIDEO frame taking
1028
                                        //also into account how much time was needed to encode the current
1029
                                        //video frame
1030
                                        //all this in case the video source is not live, i.e. not self-timing
1031
                                        //and only in case there is no audio track
1032
                                        if(audioStream == -1) {
1033
                                                if(!live_source) {
1034
                                                        if(newTime_prev != 0) {
1035
                                                                //how much delay between video frames ideally
1036
                                                                long long maxDelay = newTime_video - newTime_prev;
1037
                                                                sleep = (maxDelay - usec);
1038
                                                                dcprintf(DEBUG_ANOMALIES,"\tmaxDelay=%ld\n", ((long)maxDelay));
1039
                                                                dcprintf(DEBUG_ANOMALIES,"\tlast video frame interval=%ld; sleep time=%ld\n", ((long)usec), ((long)sleep));
1040
                                                        }
1041
                                                        else
1042
                                                                sleep = 0;
1043

    
1044
                                                        //update and store counters
1045
                                                        newTime_prev = newTime_video;
1046

    
1047
                                                        //i can also sleep now instead of at the beginning of
1048
                                                        //the next frame because in this case we only have video
1049
                                                        //frames, hence it would immediately be the next thing to do
1050
                                                        if(sleep > 0) {
1051
                                                                dcprintf(DEBUG_TIMESTAMPING,"\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1052
                                                                usleep(sleep);
1053
                                                        }
1054

    
1055
                                                }
1056
                                        }
1057
                                }
1058
                        }
1059
                } else if(packet.stream_index==audioStream) {
1060
                        if(sleep > 0)
1061
                        {
1062
                                dcprintf(DEBUG_TIMESTAMPING, "\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1063
                                usleep(sleep);
1064
                        }
1065
                        
1066
                        audio_data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
1067
                        //decode the audio packet into a raw audio source buffer
1068
                        if(avcodec_decode_audio3(aCodecCtx, samples, &audio_data_size, &packet)>0)
1069
                        {
1070
                                dcprintf(DEBUG_AUDIO_FRAMES, "\n-------AUDIO FRAME\n");
1071
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: newTimeaudioSTART : %lf\n", (double)(packet.pts)*av_q2d(pFormatCtx->streams[audioStream]->time_base));
1072
                                if(audio_data_size>0) {
1073
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: datasizeaudio:%d\n", audio_data_size);
1074
                                        /* if a frame has been decoded, output it */
1075
                                        //fwrite(samples, 1, audio_data_size, outfileaudio);
1076
                                }
1077
                                else {
1078
                                        av_free_packet(&packet);
1079
                                        continue;
1080
                                }
1081
        
1082
                                audio_frame_size = avcodec_encode_audio(aCodecCtxEnc, audio_outbuf, audio_data_size, samples);
1083
                                if(audio_frame_size <= 0) {
1084
                                        av_free_packet(&packet);
1085
                                        continue;
1086
                                }
1087
                                
1088
                                frame->number = contFrameAudio;
1089

    
1090
                                if(frame->number==0) {
1091
                                        if(packet.dts==AV_NOPTS_VALUE) {
1092
                                                av_free_packet(&packet);
1093
                                                continue;
1094
                                        }
1095
                                        last_pkt_dts_audio = packet.dts;
1096
                                        newTime = 0;
1097
                                }
1098
                                else {
1099
                                        if(packet.dts!=AV_NOPTS_VALUE) {
1100
                                                delta_audio = packet.dts-last_pkt_dts_audio;
1101
                                                last_pkt_dts_audio = packet.dts;
1102
                                        }
1103
                                        else if(delta_audio==0) {
1104
                                                av_free_packet(&packet);
1105
                                                continue;
1106
                                        }
1107
                                }
1108
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: original codec frame number %d vs. encoded %d vs. packed %d\n", aCodecCtx->frame_number, aCodecCtxEnc->frame_number, frame->number);
1109
                                //use pts if dts is invalid
1110
                                if(packet.dts!=AV_NOPTS_VALUE)
1111
                                        target_pts = packet.dts;
1112
                                else if(packet.pts!=AV_NOPTS_VALUE) {
1113
                                        target_pts = packet.pts;
1114
                                } else  {
1115
                                        av_free_packet(&packet);
1116
                                        continue;
1117
                                }
1118

    
1119
                                if(offset_av)
1120
                                {
1121
                                        if(FirstTimeAudio && packet.dts>0) {
1122
                                                ptsaudio1 = packet.dts;
1123
                                                FirstTimeAudio = 0;
1124
                                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: SET PTS BASE OFFSET %"PRId64"\n", ptsaudio1);
1125
                                        }
1126
                                }
1127
                                else //we want to compensate audio and video offset for this source
1128
                                {
1129
                                        if(FirstTimeAudio && packet.dts>0) {
1130
                                                //maintain the offset between audio pts and video pts
1131
                                                //because in case of live source they have the same numbering
1132
                                                if(ptsvideo1 > 0) //if we have already seen some video frames...
1133
                                                        ptsaudio1 = ptsvideo1;
1134
                                                else
1135
                                                        ptsaudio1 = packet.dts;
1136
                                                FirstTimeAudio = 0;
1137
                                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO LIVE: SET PTS BASE OFFSET %"PRId64"\n", ptsaudio1);
1138
                                        }
1139
                                }
1140
                                //compute the new audio timestamps in milliseconds
1141
                                if(frame->number>0) {
1142
                                        newTime = ((target_pts-ptsaudio1)*1000.0*((double)av_q2d(pFormatCtx->streams[audioStream]->time_base)));//*(double)delta_audio;
1143
                                        // store timestamp in useconds for next frame sleep
1144
                                        newTime_audio = newTime*1000;
1145
                                }
1146
                                dcprintf(DEBUG_TIMESTAMPING, "AUDIO: NEWTIMESTAMP %lld\n", newTime);
1147
                                if(newTime<0) {
1148
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: SKIPPING FRAME\n");
1149
                                        newtime_anomalies_counter++;
1150
                                        dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME negative audio timestamp anomaly detected number %d\n", newtime_anomalies_counter);
1151
                                        av_free_packet(&packet);
1152
                                        continue; //SKIP THIS FRAME, bad timestamp
1153
                                }
1154

    
1155
                                frame->timestamp.tv_sec = (unsigned int)(newTime + delay_audio)/1000;
1156
                                frame->timestamp.tv_usec = (newTime + delay_audio)%1000;
1157
                                frame->size = audio_frame_size;
1158
                                frame->type = 5; // 5 is audio type
1159
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: pts %"PRId64" duration %d timebase %d %d dts %"PRId64"\n", packet.pts, packet.duration, pFormatCtx->streams[audioStream]->time_base.num, pFormatCtx->streams[audioStream]->time_base.den, packet.dts);
1160
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: timestamp sec:%ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
1161
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: deltaaudio %"PRId64"\n", delta_audio);        
1162
                                contFrameAudio++;
1163

    
1164
                                if(update_chunk(chunkaudio, frame, audio_outbuf) == -1) {
1165
                                        fprintf(stderr, "AUDIO: unable to update chunk %d. Exiting.\n", chunkaudio->seq);
1166
                                        exit(-1);
1167
                                }
1168
                                //set priority
1169
                                chunkaudio->priority = 1;
1170

    
1171
                                if(chunkFilled(chunkaudio, AUDIO_CHUNK)) {
1172
                                        // is chunk filled using current strategy?
1173
                                        //SAVE ON FILE
1174
                                        //saveChunkOnFile(chunkaudio);
1175
                                        //Send the chunk to an external transport/player
1176
                                        sendChunk(outstream.output, chunkaudio);
1177
                                        dctprintf(DEBUG_CHUNKER, "AUDIO: just sent chunk audio %d\n", chunkaudio->seq);
1178
                                        chunkaudio->seq = 0; //signal that we need an increase
1179
                                        //initChunk(chunkaudio, &seq_current_chunk);
1180
                                }
1181

    
1182
                                //we have an audio track, so we compute timings and determine
1183
                                //how much time we have to sleep at next audio frame taking
1184
                                //also into account how much time was needed to encode the
1185
                                //video frames
1186
                                //all this in case the video source is not live, i.e. not self-timing
1187
                                if(!live_source)
1188
                                {
1189
                                        if(newTime_prev != 0)
1190
                                        {
1191
                                                long long maxDelay = newTime_audio - newTime_prev;
1192

    
1193
                                                gettimeofday(&now_tv, NULL);
1194
                                                long long usec = (now_tv.tv_sec-lastAudioSent.tv_sec)*1000000;
1195
                                                usec+=(now_tv.tv_usec-lastAudioSent.tv_usec);
1196

    
1197
                                                if(usec > maxAudioInterval)
1198
                                                        maxAudioInterval = usec;
1199

    
1200
                                                lateTime -= (maxDelay - usec);
1201
                                                dcprintf(DEBUG_TIMESTAMPING,"\tmaxDelay=%ld, maxAudioInterval=%ld\n", ((long)maxDelay), ((long) maxAudioInterval));
1202
                                                dcprintf(DEBUG_TIMESTAMPING,"\tlast audio frame interval=%ld; lateTime=%ld\n", ((long)usec), ((long)lateTime));
1203

    
1204
                                                if((lateTime+maxAudioInterval) < 0)
1205
                                                        sleep = (lateTime+maxAudioInterval)*-1;
1206
                                                else
1207
                                                        sleep = 0;
1208
                                        }
1209
                                        else
1210
                                                sleep = 0;
1211

    
1212
                                        newTime_prev = newTime_audio;
1213
                                        gettimeofday(&lastAudioSent, NULL);
1214
                                }
1215
                        }
1216
                }
1217
                dcprintf(DEBUG_CHUNKER,"Free the packet that was allocated by av_read_frame\n");
1218
                av_free_packet(&packet);
1219
        }
1220
        
1221
        if(videotrace)
1222
                fclose(videotrace);
1223
        if(psnrtrace)
1224
                fclose(psnrtrace);
1225

    
1226
close:
1227
        if(outstream.chunk->seq != 0 && outstream.chunk->frames_num>0) {
1228
                //SAVE ON FILE
1229
                //saveChunkOnFile(chunk);
1230
                //Send the chunk to an external transport/player
1231
                sendChunk(outstream.output, outstream.chunk);
1232
                dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST VIDEO CHUNK\n");
1233
                outstream.chunk->seq = 0; //signal that we need an increase just in case we will restart
1234
        }
1235
        if(chunkaudio->seq != 0 && chunkaudio->frames_num>0) {
1236
                //SAVE ON FILE     
1237
                //saveChunkOnFile(chunkaudio);
1238
                //Send the chunk via http to an external transport/player
1239
                sendChunk(outstream.output, chunkaudio);
1240
                dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST AUDIO CHUNK\n");
1241
                chunkaudio->seq = 0; //signal that we need an increase just in case we will restart
1242
        }
1243

    
1244
#ifdef HTTPIO
1245
        /* finalize the HTTP chunk pusher */
1246
        finalizeChunkPusher();
1247
#endif
1248

    
1249
        free(outstream.chunk);
1250
        free(chunkaudio);
1251
        free(frame);
1252
        av_free(video_outbuf);
1253
        av_free(audio_outbuf);
1254
        free(cmeta);
1255

    
1256
        // Free the YUV frame
1257
        av_free(pFrame1);
1258
        av_free(samples);
1259
  
1260
        // Close the codec
1261
        if (!vcopy) avcodec_close(pCodecCtx);
1262
        if (!vcopy) avcodec_close(pCodecCtxEnc);
1263

    
1264
        if(audioStream!=-1) {
1265
                avcodec_close(aCodecCtx);
1266
                avcodec_close(aCodecCtxEnc);
1267
        }
1268
  
1269
        // Close the video file
1270
        av_close_input_file(pFormatCtx);
1271

    
1272
        if(LOOP_MODE) {
1273
                //we want video to continue, but the av_read_frame stopped
1274
                //lets wait a 5 secs, and cycle in again
1275
                usleep(5000000);
1276
                dcprintf(DEBUG_CHUNKER, "CHUNKER: WAITING 5 secs FOR LIVE SOURCE TO SKIP ERRORS AND RESTARTING\n");
1277
                videoStream = -1;
1278
                audioStream = -1;
1279
                FirstTimeAudio=1;
1280
                FirstTimeVideo=1;
1281
                pts_anomalies_counter=0;
1282
                newtime_anomalies_counter=0;
1283
                newTime=0;
1284
                newTime_audio=0;
1285
                newTime_prev=0;
1286
                ptsvideo1=0;
1287
                ptsaudio1=0;
1288
                last_pkt_dts=0;
1289
                delta_video=0;
1290
                delta_audio=0;
1291
                last_pkt_dts_audio=0;
1292
                target_pts=0;
1293
                i=0;
1294
                //~ contFrameVideo = 0;
1295
                //~ contFrameAudio = 1;
1296
                
1297
#ifdef YUV_RECORD_ENABLED
1298
                if(ChunkerStreamerTestMode)
1299
                {
1300
                        video_record_count++;
1301
                        //~ savedVideoFrames = 0;
1302
                        
1303
                        //~ char tmp_filename[255];
1304
                        //~ sprintf(tmp_filename, "yuv_data/out_%d.yuv", video_record_count);
1305
                        //~ FILE *pFile=fopen(tmp_filename, "w");
1306
                        //~ if(pFile!=NULL)
1307
                                //~ fclose(pFile);
1308
                }
1309
#endif
1310

    
1311
                goto restart;
1312
        }
1313

    
1314
#ifdef TCPIO
1315
        finalizeTCPChunkPusher(outstream.output);
1316
#endif
1317

    
1318
#ifdef USE_AVFILTER
1319
        close_filters();
1320
#endif
1321

    
1322
        return 0;
1323
}
1324

    
1325
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf) {
1326
        //the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
1327
        static int sizeFrameHeader = 5*sizeof(int32_t);
1328

    
1329
        //moving temp pointer to encode Frame on the wire
1330
        uint8_t *tempdata = NULL;
1331

    
1332
        if(chunk->seq == 0) {
1333
                initChunk(chunk, &seq_current_chunk);
1334
        }
1335
        //add frame priority to chunk priority (to be normalized later on)
1336
        chunk->priority += frame->type + 1; // I:2, P:3, B:4
1337

    
1338
        //HINT on malloc
1339
        chunk->data = (uint8_t *)realloc(chunk->data, sizeof(uint8_t)*(chunk->payload_len + frame->size + sizeFrameHeader));
1340
        if(!chunk->data)  {
1341
                fprintf(stderr, "Memory error in chunk!!!\n");
1342
                return -1;
1343
        }
1344
        chunk->frames_num++; // number of frames in the current chunk
1345

    
1346
/*
1347
        //package the Frame header
1348
        tempdata = chunk->data+chunk->payload_len;
1349
        *((int32_t *)tempdata) = frame->number;
1350
        tempdata+=sizeof(int32_t);
1351
        *((struct timeval *)tempdata) = frame->timestamp;
1352
        tempdata+=sizeof(struct timeval);
1353
        *((int32_t *)tempdata) = frame->size;
1354
        tempdata+=sizeof(int32_t);
1355
        *((int32_t *)tempdata) = frame->type;
1356
        tempdata+=sizeof(int32_t);
1357
*/
1358
        //package the Frame header: network order and platform independent
1359
        tempdata = chunk->data+chunk->payload_len;
1360
        bit32_encoded_push(frame->number, tempdata);
1361
        bit32_encoded_push(frame->timestamp.tv_sec, tempdata + CHUNK_TRANSCODING_INT_SIZE);
1362
        bit32_encoded_push(frame->timestamp.tv_usec, tempdata + CHUNK_TRANSCODING_INT_SIZE*2);
1363
        bit32_encoded_push(frame->size, tempdata + CHUNK_TRANSCODING_INT_SIZE*3);
1364
        bit32_encoded_push(frame->type, tempdata + CHUNK_TRANSCODING_INT_SIZE*4);
1365

    
1366
         //insert the new frame data
1367
        memcpy(chunk->data + chunk->payload_len + sizeFrameHeader, outbuf, frame->size);
1368
        chunk->payload_len += frame->size + sizeFrameHeader; // update payload length
1369
        //chunk lenght is updated just prior to pushing it out because
1370
        //the chunk header len is better calculated there
1371
        //chunk->len = sizeChunkHeader + chunk->payload_len; // update overall length
1372

    
1373
        //update timestamps
1374
        if(((int)frame->timestamp.tv_sec < (int)chunk->start_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->start_time.tv_sec && (int)frame->timestamp.tv_usec < (int)chunk->start_time.tv_usec) || (int)chunk->start_time.tv_sec==-1) {
1375
                                                chunk->start_time.tv_sec = frame->timestamp.tv_sec;
1376
                                                chunk->start_time.tv_usec = frame->timestamp.tv_usec;
1377
        }
1378
        
1379
        if(((int)frame->timestamp.tv_sec > (int)chunk->end_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->end_time.tv_sec && (int)frame->timestamp.tv_usec > (int)chunk->end_time.tv_usec) || (int)chunk->end_time.tv_sec==-1) {
1380
                                                chunk->end_time.tv_sec = frame->timestamp.tv_sec;
1381
                                                chunk->end_time.tv_usec = frame->timestamp.tv_usec;
1382
        }
1383
        return 0;
1384
}
1385

    
1386
void SaveFrame(AVFrame *pFrame, int width, int height)
1387
{
1388
        FILE *pFile;
1389
        int  y;
1390

    
1391
         // Open file
1392
        char tmp_filename[255];
1393
        sprintf(tmp_filename, "yuv_data/streamer_out.yuv");
1394
        pFile=fopen(tmp_filename, "ab");
1395
        if(pFile==NULL)
1396
                return;
1397

    
1398
        // Write header
1399
        //fprintf(pFile, "P5\n%d %d\n255\n", width, height);
1400
  
1401
        // Write Y data
1402
        for(y=0; y<height; y++)
1403
                  if(fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile) != width)
1404
                {
1405
                        printf("errno = %d\n", errno);
1406
                        exit(1);
1407
                }
1408
        // Write U data
1409
        for(y=0; y<height/2; y++)
1410
                  if(fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile) != width/2)
1411
                  {
1412
                        printf("errno = %d\n", errno);
1413
                        exit(1);
1414
                }
1415
        // Write V data
1416
        for(y=0; y<height/2; y++)
1417
                  if(fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile) != width/2)
1418
                  {
1419
                        printf("errno = %d\n", errno);
1420
                        exit(1);
1421
                }
1422
  
1423
        // Close file
1424
        fclose(pFile);
1425
}
1426

    
1427
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf)
1428
{
1429
        static FILE* pFile = NULL;
1430
        
1431
        pFile=fopen("yuv_data/streamer_out.mpeg4", "ab");
1432
        fwrite(frame, sizeof(Frame), 1, pFile);
1433
        fwrite(video_outbuf, frame->size, 1, pFile);
1434
        fclose(pFile);
1435
}