Statistics
| Branch: | Revision:

chunker-player / chunker_streamer / chunker_streamer.c @ 51d67a10

History | View | Annotate | Download (46.4 KB)

1
/*
2
 *  Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
3
 *  developed for the Napa-Wine EU project. See www.napa-wine.eu
4
 *
5
 *  This is free software; see lgpl-2.1.txt
6
 */
7

    
8
#include "chunker_streamer.h"
9
#include <signal.h>
10
#include <math.h>
11
#include <getopt.h>
12
#include <libswscale/swscale.h>
13

    
14
#ifdef USE_AVFILTER
15
#include "chunker_filtering.h"
16
#endif
17

    
18
#define DEBUG
19
#define DEBUG_AUDIO_FRAMES  false
20
#define DEBUG_VIDEO_FRAMES  false
21
#define DEBUG_CHUNKER false
22
#define DEBUG_ANOMALIES true
23
#define DEBUG_TIMESTAMPING false
24
#include "dbg.h"
25

    
26
#define STREAMER_MAX(a,b) ((a>b)?(a):(b))
27
#define STREAMER_MIN(a,b) ((a<b)?(a):(b))
28

    
29
//#define DISPLAY_PSNR
30
#define GET_PSNR(x) ((x==0) ? 0 : (-10.0*log(x)/log(10)))
31

    
32
ChunkerMetadata *cmeta = NULL;
33
int seq_current_chunk = 1; //chunk numbering starts from 1; HINT do i need more bytes?
34

    
35
#define AUDIO_CHUNK 0
36
#define VIDEO_CHUNK 1
37

    
38
void SaveFrame(AVFrame *pFrame, int width, int height);
39
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf);
40
int pushChunkTcp(ExternalChunk *echunk);
41
void initTCPPush(char* ip, int port);
42
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf);
43
void finalizeTCPChunkPusher();
44
void bit32_encoded_push(uint32_t v, uint8_t *p);
45

    
46
int video_record_count = 0;
47
int savedVideoFrames = 0;
48
long int firstSavedVideoFrame = 0;
49
int ChunkerStreamerTestMode = 0;
50

    
51
int pts_anomaly_threshold = -1;
52
int newtime_anomaly_threshold = -1;
53
bool timebank = false;
54
char *outside_world_url = NULL;
55

    
56
int gop_size = 25;
57
int max_b_frames = 3;
58
bool vcopy = false;
59

    
60
long delay_audio = 0; //delay audio by x millisec
61

    
62
char *avfilter="yadif";
63

    
64
// Constant number of frames per chunk
65
int chunkFilledFramesStrategy(ExternalChunk *echunk, int chunkType)
66
{
67
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if frames num %d == %d in chunk %d\n", echunk->frames_num, cmeta->framesPerChunk[chunkType], echunk->seq);
68
        if(echunk->frames_num == cmeta->framesPerChunk[chunkType])
69
                return 1;
70

    
71
        return 0;
72
}
73

    
74
// Constant size. Note that for now each chunk will have a size just greater or equal than the required value
75
// It can be considered as constant size.
76
int chunkFilledSizeStrategy(ExternalChunk *echunk, int chunkType)
77
{
78
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if chunk size %d >= %d in chunk %d\n", echunk->payload_len, cmeta->targetChunkSize, echunk->seq);
79
        if(echunk->payload_len >= cmeta->targetChunkSize)
80
                return 1;
81
        
82
        return 0;
83
}
84

    
85
// Performace optimization.
86
// The chunkFilled function has been splitted into two functions (one for each strategy).
87
// Instead of continuously check the strategy flag (which is constant),
88
// we change the callback just once according to the current strategy (look at the switch statement in the main in which this function pointer is set)
89
int (*chunkFilled)(ExternalChunk *echunk, int chunkType);
90

    
91
void initChunk(ExternalChunk *chunk, int *seq_num) {
92
        chunk->seq = (*seq_num)++;
93
        chunk->frames_num = 0;
94
        chunk->payload_len = 0;
95
        chunk->len=0;
96
  if(chunk->data != NULL)
97
    free(chunk->data);
98
        chunk->data = NULL;
99
        chunk->start_time.tv_sec = -1;
100
        chunk->start_time.tv_usec = -1;
101
        chunk->end_time.tv_sec = -1;
102
        chunk->end_time.tv_usec = -1;
103
        chunk->priority = 0;
104
        chunk->category = 0;
105
        chunk->_refcnt = 0;
106
}
107

    
108
int quit = 0;
109

    
110
void sigproc()
111
{
112
        printf("you have pressed ctrl-c, terminating...\n");
113
        quit = 1;
114
}
115

    
116
static void print_usage(int argc, char *argv[])
117
{
118
  fprintf (stderr,
119
    "\nUsage:%s [options]\n"
120
    "\n"
121
    "Mandatory options:\n"
122
    "\t[-i input file]\n"
123
    "\t[-a audio bitrate]\n"
124
    "\t[-v video bitrate]\n\n"
125
    "Other options:\n"
126
    "\t[-F output] (overrides config file)\n"
127
    "\t[-A audioencoder]\n"
128
    "\t[-V videoencoder]\n"
129
    "\t[-s WxH]: force video size.\n"
130
    "\t[-l]: this is a live stream.\n"
131
    "\t[-o]: adjust A/V frame timestamps (deafault off, use it only with flawed containers)\n"
132
    "\t[-p]: pts anomaly threshold (default: -1=off).\n"
133
    "\t[-q]: sync anomaly threshold ((default: -1=off).\n"
134
    "\t[-t]: QoE test mode\n\n"
135

    
136
    "\t[--avfilter]:set input filter (default: yadif\n"
137
    "\n"
138
    "Codec options:\n"
139
    "\t[-g GOP]: gop size\n"
140
    "\t[-b frames]: max number of consecutive b frames\n"
141
    "\t[-x extas]: extra video codec options (e.g. -x me_method=hex,flags2=+dct8x8+wpred+bpyrami+mixed_refs)\n"
142
    "\n"
143
    "=======================================================\n", argv[0]
144
    );
145
  }
146

    
147
int sendChunk(ExternalChunk *chunk) {
148
#ifdef HTTPIO
149
                                                return pushChunkHttp(chunk, outside_world_url);
150
#endif
151
#ifdef TCPIO
152
                                                return pushChunkTcp(chunk);
153
#endif
154
#ifdef UDPIO
155
                                                return pushChunkUDP(chunk);
156
#endif
157
}
158

    
159
int main(int argc, char *argv[]) {
160
        signal(SIGINT, sigproc);
161
        
162
        int i=0;
163

    
164
        //output variables
165
        uint8_t *video_outbuf = NULL;
166
        int video_outbuf_size, video_frame_size;
167
        uint8_t *audio_outbuf = NULL;
168
        int audio_outbuf_size, audio_frame_size;
169
        int audio_data_size;
170

    
171
        //numeric identifiers of input streams
172
        int videoStream = -1;
173
        int audioStream = -1;
174

    
175
//        int len1;
176
        int frameFinished;
177
        //frame sequential counters
178
        int contFrameAudio=1, contFrameVideo=0;
179
//        int numBytes;
180

    
181
        //command line parameters
182
        int audio_bitrate = -1;
183
        int video_bitrate = -1;
184
        char *audio_codec = "mp2";
185
        char *video_codec = "mpeg4";
186
        char *codec_options = "";
187
        int live_source = 0; //tells to sleep before reading next frame in not live (i.e. file)
188
        int offset_av = 0; //tells to compensate for offset between audio and video in the file
189
        
190
        //a raw buffer for decoded uncompressed audio samples
191
        int16_t *samples = NULL;
192
        //a raw uncompressed video picture
193
        AVFrame *pFrame1 = NULL;
194
        AVFrame *pFrame2 = NULL;
195
        AVFrame *scaledFrame = NULL;
196

    
197
        AVFormatContext *pFormatCtx;
198
        AVCodecContext  *pCodecCtx = NULL ,*pCodecCtxEnc = NULL ,*aCodecCtxEnc = NULL ,*aCodecCtx = NULL;
199
        AVCodec         *pCodec = NULL ,*pCodecEnc = NULL ,*aCodec = NULL ,*aCodecEnc = NULL;
200
        AVPacket         packet;
201

    
202
        //stuff needed to compute the right timestamps
203
        short int FirstTimeAudio=1, FirstTimeVideo=1;
204
        short int pts_anomalies_counter=0;
205
        short int newtime_anomalies_counter=0;
206
        long long newTime=0, newTime_audio=0, newTime_video=0, newTime_prev=0;
207
        struct timeval lastAudioSent = {0, 0};
208
        int64_t ptsvideo1=0;
209
        int64_t ptsaudio1=0;
210
        int64_t last_pkt_dts=0, delta_video=0, delta_audio=0, last_pkt_dts_audio=0, target_pts=0;
211

    
212
        //Napa-Wine specific Frame and Chunk structures for transport
213
        Frame *frame = NULL;
214
        ExternalChunk *chunk = NULL;
215
        ExternalChunk *chunkaudio = NULL;
216
        
217
        char av_input[1024];
218
        int dest_width = -1;
219
        int dest_height = -1;
220
        
221
        static struct option long_options[] =
222
        {
223
                /* These options set a flag. */
224
                {"audio_stream", required_argument, 0, 0},
225
                {"video_stream", required_argument, 0, 0},
226
                {"avfilter", required_argument, 0, 0},
227
                {0, 0, 0, 0}
228
        };
229
        /* `getopt_long' stores the option index here. */
230
        int option_index = 0, c;
231
        int mandatories = 0;
232
        while ((c = getopt_long (argc, argv, "i:a:v:A:V:s:lop:q:tF:g:b:d:x:", long_options, &option_index)) != -1)
233
        {
234
                switch (c) {
235
                        case 0: //for long options
236
                                if( strcmp( "audio_stream", long_options[option_index].name ) == 0 ) { audioStream = atoi(optarg); }
237
                                if( strcmp( "video_stream", long_options[option_index].name ) == 0 ) { videoStream = atoi(optarg); }
238
                                if( strcmp( "avfilter", long_options[option_index].name ) == 0 ) { avfilter = strdup(optarg); }
239
                                break;
240
                        case 'i':
241
                                sprintf(av_input, "%s", optarg);
242
                                mandatories++;
243
                                break;
244
                        case 'a':
245
                                sscanf(optarg, "%d", &audio_bitrate);
246
                                mandatories++;
247
                                break;
248
                        case 'v':
249
                                sscanf(optarg, "%d", &video_bitrate);
250
                                mandatories++;
251
                                break;
252
                        case 'A':
253
                                audio_codec = strdup(optarg);
254
                                break;
255
                        case 'V':
256
                                video_codec = strdup(optarg);
257
                                break;
258
                        case 's':
259
                                sscanf(optarg, "%dx%d", &dest_width, &dest_height);
260
                                break;
261
                        case 'l':
262
                                live_source = 1;
263
                                break;
264
                        case 'o':
265
                                offset_av = 1;
266
                                break;
267
                        case 't':
268
                                ChunkerStreamerTestMode = 1;
269
                                break;
270
                        case 'p':
271
                                sscanf(optarg, "%d", &pts_anomaly_threshold);
272
                                break;
273
                        case 'q':
274
                                sscanf(optarg, "%d", &newtime_anomaly_threshold);
275
                                break;
276
                        case 'F':
277
                                outside_world_url = strdup(optarg);
278
                                break;
279
                        case 'g':
280
                                sscanf(optarg, "%d", &gop_size);
281
                                break;
282
                        case 'b':
283
                                sscanf(optarg, "%d", &max_b_frames);
284
                                break;
285
                        case 'd':
286
                                sscanf(optarg, "%ld", &delay_audio);
287
                                break;
288
                        case 'x':
289
                                codec_options = strdup(optarg);
290
                                break;
291
                        default:
292
                                print_usage(argc, argv);
293
                                return -1;
294
                }
295
        }
296
        
297
        if(mandatories < 3) 
298
        {
299
                print_usage(argc, argv);
300
                return -1;
301
        }
302

    
303
#ifdef YUV_RECORD_ENABLED
304
        if(ChunkerStreamerTestMode)
305
        {
306
                DELETE_DIR("yuv_data");
307
                CREATE_DIR("yuv_data");
308
                //FILE* pFile=fopen("yuv_data/streamer_out.yuv", "w");
309
                //fclose(pFile);
310
        }
311
#endif
312

    
313
restart:
314
        // read the configuration file
315
        cmeta = chunkerInit();
316
        if (!outside_world_url) {
317
                outside_world_url = strdup(cmeta->outside_world_url);
318
        }
319
        switch(cmeta->strategy)
320
        {
321
                case 1:
322
                        chunkFilled = chunkFilledSizeStrategy;
323
                        break;
324
                default:
325
                        chunkFilled = chunkFilledFramesStrategy;
326
        }
327
                
328
        if(live_source)
329
                fprintf(stderr, "INIT: Using LIVE SOURCE TimeStamps\n");
330
        if(offset_av)
331
                fprintf(stderr, "INIT: Compensating AV OFFSET in file\n");
332

    
333
        // Register all formats and codecs
334
        av_register_all();
335

    
336
        // Open input file
337
        if(av_open_input_file(&pFormatCtx, av_input, NULL, 0, NULL) != 0) {
338
                fprintf(stdout, "INIT: Couldn't open video file. Exiting.\n");
339
                exit(-1);
340
        }
341

    
342
        // Retrieve stream information
343
        if(av_find_stream_info(pFormatCtx) < 0) {
344
                fprintf(stdout, "INIT: Couldn't find stream information. Exiting.\n");
345
                exit(-1);
346
        }
347

    
348
        // Dump information about file onto standard error
349
        av_dump_format(pFormatCtx, 0, av_input, 0);
350

    
351
        // Find the video and audio stream numbers
352
        for(i=0; i<pFormatCtx->nb_streams; i++) {
353
                if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream<0) {
354
                        videoStream=i;
355
                }
356
                if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream<0) {
357
                        audioStream=i;
358
                }
359
        }
360

    
361
        if(videoStream==-1 || audioStream==-1) {        // TODO: refine to work with 1 or the other
362
                fprintf(stdout, "INIT: Didn't find audio and video streams. Exiting.\n");
363
                exit(-1);
364
        }
365

    
366
        fprintf(stderr, "INIT: Num streams : %d TBR: %d %d RFRAMERATE:%d %d Duration:%ld\n", pFormatCtx->nb_streams, pFormatCtx->streams[videoStream]->time_base.num, pFormatCtx->streams[videoStream]->time_base.den, pFormatCtx->streams[videoStream]->r_frame_rate.num, pFormatCtx->streams[videoStream]->r_frame_rate.den, (long int)pFormatCtx->streams[videoStream]->duration);
367

    
368
        fprintf(stderr, "INIT: Video stream has id : %d\n",videoStream);
369
        fprintf(stderr, "INIT: Audio stream has id : %d\n",audioStream);
370

    
371

    
372
        // Get a pointer to the codec context for the input video stream
373
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;
374
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
375
        //extract W and H
376
        fprintf(stderr, "INIT: Width:%d Height:%d\n", pCodecCtx->width, pCodecCtx->height);
377

    
378
        // Get a pointer to the codec context for the input audio stream
379
        if(audioStream != -1) {
380
                aCodecCtx=pFormatCtx->streams[audioStream]->codec;
381
                fprintf(stderr, "INIT: AUDIO Codecid: %d channels %d samplerate %d\n", aCodecCtx->codec_id, aCodecCtx->channels, aCodecCtx->sample_rate);
382
        }
383

    
384
        // Figure out size
385
        dest_width = (dest_width > 0) ? dest_width : pCodecCtx->width;
386
        dest_height = (dest_height > 0) ? dest_height : pCodecCtx->height;
387

    
388
        //setup video output encoder
389
 if (strcmp(video_codec, "copy") == 0) {
390
        vcopy = true;
391
 } else {
392
        pCodecEnc = avcodec_find_encoder_by_name(video_codec);
393
        if (pCodecEnc) {
394
                fprintf(stderr, "INIT: Setting VIDEO codecID to: %d\n",pCodecEnc->id);
395
        } else {
396
                fprintf(stderr, "INIT: Unknown OUT VIDEO codec: %s!\n", video_codec);
397
                return -1; // Codec not found
398
        }
399

    
400
        pCodecCtxEnc=avcodec_alloc_context();
401
        pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;
402
        pCodecCtxEnc->codec_id = pCodecEnc->id;
403

    
404
        pCodecCtxEnc->bit_rate = video_bitrate;
405
        //~ pCodecCtxEnc->qmin = 30;
406
        //~ pCodecCtxEnc->qmax = 30;
407
        //times 20 follows the defaults, was not needed in previous versions of libavcodec
408
//        pCodecCtxEnc->crf = 20.0f;
409
        // resolution must be a multiple of two 
410
        pCodecCtxEnc->width = dest_width;
411
        pCodecCtxEnc->height = dest_height;
412
        // frames per second 
413
        //~ pCodecCtxEnc->time_base= pCodecCtx->time_base;//(AVRational){1,25};
414
        //printf("pCodecCtx->time_base=%d/%d\n", pCodecCtx->time_base.num, pCodecCtx->time_base.den);
415
        pCodecCtxEnc->time_base= pCodecCtx->time_base;//(AVRational){1,25};
416
        pCodecCtxEnc->gop_size = gop_size; // emit one intra frame every gop_size frames 
417
        pCodecCtxEnc->max_b_frames = max_b_frames;
418
        pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P;
419
        pCodecCtxEnc->flags |= CODEC_FLAG_PSNR;
420
        //~ pCodecCtxEnc->flags |= CODEC_FLAG_QSCALE;
421

    
422
        //some generic quality tuning
423
        pCodecCtxEnc->mb_decision = FF_MB_DECISION_RD;
424

    
425
        //some rate control parameters for streaming, taken from ffserver.c
426
        {
427
        /* Bitrate tolerance is less for streaming */
428
        AVCodecContext *av = pCodecCtxEnc;
429
        if (av->bit_rate_tolerance == 0)
430
            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
431
                      (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);
432
        //if (av->qmin == 0)
433
        //    av->qmin = 3;
434
        //if (av->qmax == 0)
435
        //    av->qmax = 31;
436
        //if (av->max_qdiff == 0)
437
        //    av->max_qdiff = 3;
438
        //av->qcompress = 0.5;
439
        //av->qblur = 0.5;
440

    
441
        //if (!av->nsse_weight)
442
        //    av->nsse_weight = 8;
443

    
444
        //av->frame_skip_cmp = FF_CMP_DCTMAX;
445
        //if (!av->me_method)
446
        //    av->me_method = ME_EPZS;
447
        //av->rc_buffer_aggressivity = 1.0;
448

    
449
        //if (!av->rc_eq)
450
        //    av->rc_eq = "tex^qComp";
451
        //if (!av->i_quant_factor)
452
        //    av->i_quant_factor = -0.8;
453
        //if (!av->b_quant_factor)
454
        //    av->b_quant_factor = 1.25;
455
        //if (!av->b_quant_offset)
456
        //    av->b_quant_offset = 1.25;
457
        if (!av->rc_max_rate)
458
            av->rc_max_rate = av->bit_rate * 2;
459

    
460
        if (av->rc_max_rate && !av->rc_buffer_size) {
461
            av->rc_buffer_size = av->rc_max_rate;
462
        }
463
        }
464
        //end of code taken fromffserver.c
465

    
466
  switch (pCodecEnc->id) {
467
    case CODEC_ID_H264 :
468
        // Fast Profile
469
        // libx264-fast.ffpreset preset 
470
        pCodecCtxEnc->coder_type = FF_CODER_TYPE_AC; // coder = 1 -> enable CABAC
471
        pCodecCtxEnc->flags |= CODEC_FLAG_LOOP_FILTER; // flags=+loop -> deblock
472
        pCodecCtxEnc->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
473
        pCodecCtxEnc->partitions |= X264_PART_I8X8|X264_PART_I4X4|X264_PART_P8X8|X264_PART_B8X8;        // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
474
        pCodecCtxEnc->me_method=ME_HEX; // me_method=hex
475
        pCodecCtxEnc->me_subpel_quality = 6; // subq=7
476
        pCodecCtxEnc->me_range = 16; // me_range=16
477
        //pCodecCtxEnc->gop_size = 250; // g=250
478
        //pCodecCtxEnc->keyint_min = 25; // keyint_min=25
479
        pCodecCtxEnc->scenechange_threshold = 40; // sc_threshold=40
480
        pCodecCtxEnc->i_quant_factor = 0.71; // i_qfactor=0.71
481
        pCodecCtxEnc->b_frame_strategy = 1; // b_strategy=1
482
        pCodecCtxEnc->qcompress = 0.6; // qcomp=0.6
483
        pCodecCtxEnc->qmin = 10; // qmin=10
484
        pCodecCtxEnc->qmax = 51; // qmax=51
485
        pCodecCtxEnc->max_qdiff = 4; // qdiff=4
486
        //pCodecCtxEnc->max_b_frames = 3; // bf=3
487
        pCodecCtxEnc->refs = 2; // refs=3
488
        //pCodecCtxEnc->directpred = 1; // directpred=1
489
        pCodecCtxEnc->directpred = 3; // directpred=1 in preset -> "directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)"
490
        //pCodecCtxEnc->trellis = 1; // trellis=1
491
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_BPYRAMID|CODEC_FLAG2_MIXED_REFS|CODEC_FLAG2_WPRED|CODEC_FLAG2_8X8DCT|CODEC_FLAG2_FASTPSKIP;        // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
492
        pCodecCtxEnc->weighted_p_pred = 2; // wpredp=2
493

    
494
        // libx264-main.ffpreset preset
495
        //pCodecCtxEnc->flags2|=CODEC_FLAG2_8X8DCT;
496
        //pCodecCtxEnc->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
497
        //pCodecCtxEnc->crf = 22;
498

    
499
#ifdef STREAMER_X264_USE_SSIM
500
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_SSIM;
501
#endif
502

    
503
        //pCodecCtxEnc->weighted_p_pred=2; //maps wpredp=2; weighted prediction analysis method
504
        // pCodecCtxEnc->rc_min_rate = 0;
505
        // pCodecCtxEnc->rc_max_rate = video_bitrate*2;
506
        // pCodecCtxEnc->rc_buffer_size = 0;
507
        break;
508
    case CODEC_ID_MPEG4 :
509
        break;
510
    default:
511
        fprintf(stderr, "INIT: Unsupported OUT VIDEO codec: %s!\n", video_codec);
512
  }
513

    
514
  if ((av_set_options_string(pCodecCtxEnc, codec_options, "=", ",")) < 0) {
515
    fprintf(stderr, "Error parsing options string: '%s'\n", codec_options);
516
    exit(1);
517
  }
518

    
519
        fprintf(stderr, "INIT: VIDEO timebase OUT:%d %d IN: %d %d\n", pCodecCtxEnc->time_base.num, pCodecCtxEnc->time_base.den, pCodecCtx->time_base.num, pCodecCtx->time_base.den);
520
 }
521

    
522
        if(pCodec==NULL) {
523
                fprintf(stderr, "INIT: Unsupported IN VIDEO pcodec!\n");
524
                return -1; // Codec not found
525
        }
526
        if(!vcopy && pCodecEnc==NULL) {
527
                fprintf(stderr, "INIT: Unsupported OUT VIDEO pcodecenc!\n");
528
                return -1; // Codec not found
529
        }
530
        if(avcodec_open(pCodecCtx, pCodec)<0) {
531
                fprintf(stderr, "INIT: could not open IN VIDEO codec\n");
532
                return -1; // Could not open codec
533
        }
534
        if(!vcopy && avcodec_open(pCodecCtxEnc, pCodecEnc)<0) {
535
                fprintf(stderr, "INIT: could not open OUT VIDEO codecEnc\n");
536
                return -1; // Could not open codec
537
        }
538
        if(audioStream!=-1) {
539
                //setup audio output encoder
540
                aCodecCtxEnc = avcodec_alloc_context();
541
                aCodecCtxEnc->bit_rate = audio_bitrate; //256000
542
                aCodecCtxEnc->sample_fmt = SAMPLE_FMT_S16;
543
                aCodecCtxEnc->sample_rate = aCodecCtx->sample_rate;
544
                aCodecCtxEnc->channels = aCodecCtx->channels;
545
                fprintf(stderr, "INIT: AUDIO bitrate OUT:%d sample_rate:%d channels:%d\n", aCodecCtxEnc->bit_rate, aCodecCtxEnc->sample_rate, aCodecCtxEnc->channels);
546

    
547
                // Find the decoder for the audio stream
548
                aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
549
                aCodecEnc = avcodec_find_encoder_by_name(audio_codec);
550
                if(aCodec==NULL) {
551
                        fprintf(stderr,"INIT: Unsupported acodec!\n");
552
                        return -1;
553
                }
554
                if(aCodecEnc==NULL) {
555
                        fprintf(stderr,"INIT: Unsupported acodecEnc!\n");
556
                        return -1;
557
                }
558
        
559
                if(avcodec_open(aCodecCtx, aCodec)<0) {
560
                        fprintf(stderr, "INIT: could not open IN AUDIO codec\n");
561
                        return -1; // Could not open codec
562
                }
563
                if(avcodec_open(aCodecCtxEnc, aCodecEnc)<0) {
564
                        fprintf(stderr, "INIT: could not open OUT AUDIO codec\n");
565
                        return -1; // Could not open codec
566
                }
567
        }
568
        else {
569
                fprintf(stderr,"INIT: NO AUDIO TRACK IN INPUT FILE\n");
570
        }
571

    
572
        // Allocate audio in and out buffers
573
        samples = (int16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
574
        if(samples == NULL) {
575
                fprintf(stderr, "INIT: Memory error alloc audio samples!!!\n");
576
                return -1;
577
        }
578
        audio_outbuf_size = STREAMER_MAX_AUDIO_BUFFER_SIZE;
579
        audio_outbuf = av_malloc(audio_outbuf_size);
580
        if(audio_outbuf == NULL) {
581
                fprintf(stderr, "INIT: Memory error alloc audio_outbuf!!!\n");
582
                return -1;
583
        }
584

    
585
        // Allocate video in frame and out buffer
586
        pFrame1=avcodec_alloc_frame();
587
        pFrame2=avcodec_alloc_frame();
588
        scaledFrame=avcodec_alloc_frame();
589
        if(pFrame1==NULL || pFrame2==NULL || scaledFrame == NULL) {
590
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
591
                return -1;
592
        }
593
        video_outbuf_size = STREAMER_MAX_VIDEO_BUFFER_SIZE;
594
        video_outbuf = av_malloc(video_outbuf_size);
595
        int scaledFrame_buf_size = avpicture_get_size( PIX_FMT_YUV420P, dest_width, dest_height);
596
        uint8_t* scaledFrame_buffer = (uint8_t *) av_malloc( scaledFrame_buf_size * sizeof( uint8_t ) );
597
        avpicture_fill( (AVPicture*) scaledFrame, scaledFrame_buffer, PIX_FMT_YUV420P, dest_width, dest_height);
598
        if(!video_outbuf || !scaledFrame_buffer) {
599
                fprintf(stderr, "INIT: Memory error alloc video_outbuf!!!\n");
600
                return -1;
601
        }
602

    
603
        //allocate Napa-Wine transport
604
        frame = (Frame *)malloc(sizeof(Frame));
605
        if(!frame) {
606
                fprintf(stderr, "INIT: Memory error alloc Frame!!!\n");
607
                return -1;
608
        }
609
        //create an empty first video chunk
610
        chunk = (ExternalChunk *)malloc(sizeof(ExternalChunk));
611
        if(!chunk) {
612
                fprintf(stderr, "INIT: Memory error alloc chunk!!!\n");
613
                return -1;
614
        }
615
        chunk->data = NULL;
616
        chunk->seq = 0;
617
        //initChunk(chunk, &seq_current_chunk); if i init them now i get out of sequence
618
        dcprintf(DEBUG_CHUNKER, "INIT: chunk video %d\n", chunk->seq);
619
        //create empty first audio chunk
620
        chunkaudio = (ExternalChunk *)malloc(sizeof(ExternalChunk));
621
        if(!chunkaudio) {
622
                fprintf(stderr, "INIT: Memory error alloc chunkaudio!!!\n");
623
                return -1;
624
        }
625
  chunkaudio->data=NULL;
626
        chunkaudio->seq = 0;
627
        //initChunk(chunkaudio, &seq_current_chunk);
628
        dcprintf(DEBUG_CHUNKER, "INIT: chunk audio %d\n", chunkaudio->seq);
629

    
630
#ifdef HTTPIO
631
        /* initialize the HTTP chunk pusher */
632
        initChunkPusher(); //TRIPLO
633
#endif
634

    
635
        long sleep=0;
636
        struct timeval now_tv;
637
        struct timeval tmp_tv;
638
        long long lateTime = 0;
639
        long long maxAudioInterval = 0;
640
        long long maxVDecodeTime = 0;
641
//        unsigned char lastIFrameDistance = 0;
642

    
643
#ifdef TCPIO
644
        static char peer_ip[16];
645
        static int peer_port;
646
        int res = sscanf(outside_world_url, "tcp://%15[0-9.]:%d", peer_ip, &peer_port);
647
        if (res < 2) {
648
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
649
                return -2;
650
        }
651
        
652
        initTCPPush(peer_ip, peer_port);
653
#endif
654
#ifdef UDPIO
655
        static char peer_ip[16];
656
        static int peer_port;
657
        int res = sscanf(outside_world_url, "udp://%15[0-9.]:%d", peer_ip, &peer_port);
658
        if (res < 2) {
659
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
660
                return -2;
661
        }
662
        
663
        initUDPPush(peer_ip, peer_port);
664
#endif
665
        
666
        char videotrace_filename[255];
667
        char psnr_filename[255];
668
        sprintf(videotrace_filename, "yuv_data/videotrace.log");
669
        sprintf(psnr_filename, "yuv_data/psnrtrace.log");
670
        FILE* videotrace = fopen(videotrace_filename, "w");
671
        FILE* psnrtrace = fopen(psnr_filename, "w");
672

    
673
#ifdef USE_AVFILTER
674
        //init AVFilter
675
        avfilter_register_all();
676
        init_filters(avfilter, pCodecCtx);
677
#endif
678

    
679
        //main loop to read from the input file
680
        while((av_read_frame(pFormatCtx, &packet)>=0) && !quit)
681
        {
682
                //detect if a strange number of anomalies is occurring
683
                if(ptsvideo1 < 0 || ptsvideo1 > packet.dts || ptsaudio1 < 0 || ptsaudio1 > packet.dts) {
684
                        pts_anomalies_counter++;
685
                        dcprintf(DEBUG_ANOMALIES, "READLOOP: pts BASE anomaly detected number %d\n", pts_anomalies_counter);
686
                        if(pts_anomaly_threshold >=0 && live_source) { //reset just in case of live source
687
                                if(pts_anomalies_counter > pts_anomaly_threshold) {
688
                                        dcprintf(DEBUG_ANOMALIES, "READLOOP: too many pts BASE anomalies. resetting pts base\n");
689
                                        av_free_packet(&packet);
690
                                        goto close;
691
                                }
692
                        }
693
                }
694

    
695
                //newTime_video and _audio are in usec
696
                //if video and audio stamps differ more than 5sec
697
                if( newTime_video - newTime_audio > 5000000 || newTime_video - newTime_audio < -5000000 ) {
698
                        newtime_anomalies_counter++;
699
                        dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME audio video differ anomaly detected number %d\n", newtime_anomalies_counter);
700
                }
701

    
702
                if(newtime_anomaly_threshold >=0 && newtime_anomalies_counter > newtime_anomaly_threshold) {
703
                        if(live_source) { //restart just in case of live source
704
                                dcprintf(DEBUG_ANOMALIES, "READLOOP: too many NEGATIVE TIMESTAMPS anomalies. Restarting.\n");
705
                                av_free_packet(&packet);
706
                                goto close;
707
                        }
708
                }
709

    
710
                // Is this a packet from the video stream?
711
                if(packet.stream_index==videoStream)
712
                {
713
                        if(!live_source)
714
                        {
715
                                if(audioStream != -1) { //take this "time bank" method into account only if we have audio track
716
                                        // lateTime < 0 means a positive time account that can be used to decode video frames
717
                                        // if (lateTime + maxVDecodeTime) >= 0 then we may have a negative time account after video transcoding
718
                                        // therefore, it's better to skip the frame
719
                                        if(timebank && (lateTime+maxVDecodeTime) >= 0)
720
                                        {
721
                                                dcprintf(DEBUG_ANOMALIES, "\n\n\t\t************************* SKIPPING VIDEO FRAME %ld ***********************************\n\n", sleep);
722
                                                av_free_packet(&packet);
723
                                                continue;
724
                                        }
725
                                }
726
                        }
727
                        
728
                        gettimeofday(&tmp_tv, NULL);
729
                        
730
                        //decode the video packet into a raw pFrame
731
                        if(avcodec_decode_video2(pCodecCtx, pFrame1, &frameFinished, &packet)>0)
732
                        {
733
                                AVFrame *pFrame;
734
                                pFrame = pFrame1;
735

    
736
                                // usleep(5000);
737
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOin pkt: dts %lld pts %lld pts-dts %lld\n", packet.dts, packet.pts, packet.pts-packet.dts );
738
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %lld pkt_pts %lld frame.pts %lld\n", pFrame->pkt_dts, pFrame->pkt_pts, pFrame->pts);
739
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame->pict_type, pFrame->key_frame ? " (key)" : "");
740
                                if(frameFinished)
741
                                { // it must be true all the time else error
742
                                
743
                                        frame->number = ++contFrameVideo;
744

    
745
#ifdef VIDEO_DEINTERLACE
746
                                if (!vcopy) {
747
                                        avpicture_deinterlace(
748
                                                (AVPicture*) pFrame,
749
                                                (const AVPicture*) pFrame,
750
                                                pCodecCtxEnc->pix_fmt,
751
                                                pCodecCtxEnc->width,
752
                                                pCodecCtxEnc->height);
753
                                }
754
#endif
755

    
756

    
757
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: finished frame %d dts %lld pts %lld\n", frame->number, packet.dts, packet.pts);
758
                                        if(frame->number==0) {
759
                                                if(packet.dts==AV_NOPTS_VALUE)
760
                                                {
761
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
762
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
763
                                                        av_free_packet(&packet);
764
                                                        continue;
765
                                                }
766
                                                last_pkt_dts = packet.dts;
767
                                                newTime = 0;
768
                                        }
769
                                        else {
770
                                                if(packet.dts!=AV_NOPTS_VALUE) {
771
                                                        delta_video = packet.dts-last_pkt_dts;
772
                                                        last_pkt_dts = packet.dts;
773
                                                }
774
                                                else if(delta_video==0)
775
                                                {
776
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
777
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
778
                                                        av_free_packet(&packet);
779
                                                        continue;
780
                                                }
781
                                        }
782
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: deltavideo : %d\n", (int)delta_video);
783

    
784
                                        if(vcopy) {
785
                                                video_frame_size = packet.size;
786
                                                if (video_frame_size > video_outbuf_size) {
787
                                                        fprintf(stderr, "VIDEO: error, outbuf too small, SKIPPING\n");;
788
                                                        av_free_packet(&packet);
789
                                                        continue;
790
                                                } else {
791
                                                        memcpy(video_outbuf, packet.data, video_frame_size);
792
                                                }
793

    
794
                                                if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
795
                                                        target_pts = pFrame->pkt_pts;
796
                                                }else {        //TODO: review this
797
                                                        target_pts = pFrame->pkt_dts;
798
                                                }
799
                                        } else {
800

    
801
                                            if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
802
                                                pFrame->pts = av_rescale_q(pFrame->pkt_pts, pFormatCtx->streams[videoStream]->time_base, pCodecCtxEnc->time_base);
803
                                            } else {        //try to figure out the pts //TODO: review this
804
                                                if (pFrame->pkt_dts != AV_NOPTS_VALUE) {
805
                                                        pFrame->pts = av_rescale_q(pFrame->pkt_dts, pFormatCtx->streams[videoStream]->time_base, pCodecCtxEnc->time_base);
806
                                                }
807
                                            }
808

    
809
#ifdef USE_AVFILTER
810
                                        //apply avfilters
811
                                        filter(pFrame,pFrame2);
812
                                        pFrame = pFrame2;
813
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %lld pkt_pts %lld frame.pts %lld\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
814
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
815
#endif
816

    
817
                                            if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
818
//                                                static AVPicture pict;
819
                                                static struct SwsContext *img_convert_ctx = NULL;
820
                                                
821
                                                pFrame->pict_type = 0;
822
                                                if(img_convert_ctx == NULL)
823
                                                {
824
                                                        img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, dest_width, dest_height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
825
                                                        if(img_convert_ctx == NULL) {
826
                                                                fprintf(stderr, "Cannot initialize the conversion context!\n");
827
                                                                exit(1);
828
                                                        }
829
                                                }
830
                                                sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, scaledFrame->data, scaledFrame->linesize);
831
                                                scaledFrame->pts = pFrame->pts;
832
                                                scaledFrame->pict_type = 0;
833
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, scaledFrame);
834
                                            } else {
835
                                                pFrame->pict_type = 0;
836
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, pFrame);
837
                                            }
838

    
839
                                            //use pts if dts is invalid
840
                                            if(pCodecCtxEnc->coded_frame->pts!=AV_NOPTS_VALUE)
841
                                                target_pts = av_rescale_q(pCodecCtxEnc->coded_frame->pts, pCodecCtxEnc->time_base, pFormatCtx->streams[videoStream]->time_base);
842
                                            else {        //TODO: review this
843
                                                av_free_packet(&packet);
844
                                                continue;
845
                                                //fprintf(stderr, "VIDEOout: pts error\n");
846
                                                //exit(1);
847
                                            }
848
                                        }
849

    
850
                                        if(video_frame_size <= 0)
851
                                        {
852
                                                contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
853
                                                av_free_packet(&packet);
854
                                                continue;
855
                                        }
856

    
857
                                        if(!vcopy && pCodecCtxEnc->coded_frame) {
858
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: pkt_dts %lld pkt_pts %lld frame.pts %lld\n", pCodecCtxEnc->coded_frame->pkt_dts, pCodecCtxEnc->coded_frame->pkt_pts, pCodecCtxEnc->coded_frame->pts);
859
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: outtype: %d%s\n", pCodecCtxEnc->coded_frame->pict_type, pCodecCtxEnc->coded_frame->key_frame ? " (key)" : "");
860
                                        }
861
#ifdef DISPLAY_PSNR
862
                                        static double ist_psnr = 0;
863
                                        static double cum_psnr = 0;
864
                                        static int psnr_samples = 0;
865
                                        if(!vcopy && pCodecCtxEnc->coded_frame) {
866
                                                if(pCodecCtxEnc->flags&CODEC_FLAG_PSNR) {
867
                                                        ist_psnr = GET_PSNR(pCodecCtxEnc->coded_frame->error[0]/(pCodecCtxEnc->width*pCodecCtxEnc->height*255.0*255.0));
868
                                                        psnr_samples++;
869
                                                        cum_psnr += ist_psnr;
870
                                                        fprintf(stderr, "PSNR: ist %.4f avg: %.4f\n", ist_psnr, cum_psnr / (double)psnr_samples);
871
                                                }
872
                                        }
873
#endif
874

    
875
                                        if(offset_av)
876
                                        {
877
                                                if(FirstTimeVideo && target_pts>0) {
878
                                                        ptsvideo1 = target_pts;
879
                                                        FirstTimeVideo = 0;
880
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: SET PTS BASE OFFSET %lld\n", ptsvideo1);
881
                                                }
882
                                        }
883
                                        else //we want to compensate audio and video offset for this source
884
                                        {
885
                                                if(FirstTimeVideo && target_pts>0) {
886
                                                        //maintain the offset between audio pts and video pts
887
                                                        //because in case of live source they have the same numbering
888
                                                        if(ptsaudio1 > 0) //if we have already seen some audio frames...
889
                                                                ptsvideo1 = ptsaudio1;
890
                                                        else
891
                                                                ptsvideo1 = target_pts;
892
                                                        FirstTimeVideo = 0;
893
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO LIVE: SET PTS BASE OFFSET %lld\n", ptsvideo1);
894
                                                }
895
                                        }
896
                                        //compute the new video timestamp in milliseconds
897
                                        if(frame->number>0) {
898
                                                newTime = (target_pts - ptsvideo1) * 1000 * pFormatCtx->streams[videoStream]->time_base.num / pFormatCtx->streams[videoStream]->time_base.den;
899
                                                // store timestamp in useconds for next frame sleep
900
                                                newTime_video = newTime*1000;
901
                                        }
902
                                        dcprintf(DEBUG_TIMESTAMPING, "VIDEO: NEWTIMESTAMP %ld\n", newTime);
903
                                        if(newTime<0) {
904
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: SKIPPING FRAME\n");
905
                                                newtime_anomalies_counter++;
906
                                                dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME negative video timestamp anomaly detected number %d\n", newtime_anomalies_counter);
907
                                                contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
908
                                                av_free_packet(&packet);
909
                                                continue; //SKIP THIS FRAME, bad timestamp
910
                                        }
911
                                        
912
                                        //~ printf("pCodecCtxEnc->error[0]=%lld\n", pFrame->error[0]);
913
        
914
                                        frame->timestamp.tv_sec = (long long)newTime/1000;
915
                                        frame->timestamp.tv_usec = newTime%1000;
916
                                        frame->size = video_frame_size;
917
                                        /* pict_type maybe 1 (I), 2 (P), 3 (B), 5 (AUDIO)*/
918
                                        frame->type = vcopy ? pFrame->pict_type : (unsigned char)pCodecCtxEnc->coded_frame->pict_type;
919

    
920
                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: original codec frame number %d vs. encoded %d vs. packed %d\n", pCodecCtx->frame_number, pCodecCtxEnc->frame_number, frame->number);
921
                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: duration %d timebase %d %d container timebase %d\n", (int)packet.duration, pCodecCtxEnc->time_base.den, pCodecCtxEnc->time_base.num, pCodecCtx->time_base.den);
922

    
923
#ifdef YUV_RECORD_ENABLED
924
                                        if(!vcopy && ChunkerStreamerTestMode)
925
                                        {
926
                                                if(videotrace)
927
                                                        fprintf(videotrace, "%d %d %d\n", frame->number, pFrame->pict_type, frame->size);
928

    
929
                                                if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width)
930
                                                        SaveFrame(scaledFrame, dest_width, dest_height);
931
                                                else
932
                                                        SaveFrame(pFrame, dest_width, dest_height);
933

    
934
                                                ++savedVideoFrames;
935
                                                SaveEncodedFrame(frame, video_outbuf);
936

    
937
                                                if(!firstSavedVideoFrame)
938
                                                        firstSavedVideoFrame = frame->number;
939
                                                
940
                                                char tmp_filename[255];
941
                                                sprintf(tmp_filename, "yuv_data/streamer_out_context.txt");
942
                                                FILE* tmp = fopen(tmp_filename, "w");
943
                                                if(tmp)
944
                                                {
945
                                                        fprintf(tmp, "width = %d\nheight = %d\ntotal_frames_saved = %d\ntotal_frames_decoded = %d\nfirst_frame_number = %ld\nlast_frame_number = %d\n"
946
                                                                ,dest_width, dest_height
947
                                                                ,savedVideoFrames, savedVideoFrames, firstSavedVideoFrame, frame->number);
948
                                                        fclose(tmp);
949
                                                }
950
                                        }
951
#endif
952

    
953
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: encapsulated frame size:%d type:%d\n", frame->size, frame->type);
954
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: timestamped sec %d usec:%d\n", frame->timestamp.tv_sec, frame->timestamp.tv_usec);
955
                                        //contFrameVideo++; //lets increase the numbering of the frames
956

    
957
                                        if(update_chunk(chunk, frame, video_outbuf) == -1) {
958
                                                fprintf(stderr, "VIDEO: unable to update chunk %d. Exiting.\n", chunk->seq);
959
                                                exit(-1);
960
                                        }
961

    
962
                                        if(chunkFilled(chunk, VIDEO_CHUNK)) { // is chunk filled using current strategy?
963
                                                //calculate priority
964
                                                chunk->priority /= chunk->frames_num;
965

    
966
                                                //SAVE ON FILE
967
                                                //saveChunkOnFile(chunk);
968
                                                //Send the chunk to an external transport/player
969
                                                sendChunk(chunk);
970
                                                dcprintf(DEBUG_CHUNKER, "VIDEO: sent chunk video %d, prio:%f\n", chunk->seq, chunk->priority);
971
                                                chunk->seq = 0; //signal that we need an increase
972
                                                //initChunk(chunk, &seq_current_chunk);
973
                                        }
974

    
975
                                        //compute how long it took to encode video frame
976
                                        gettimeofday(&now_tv, NULL);
977
                                        long long usec = (now_tv.tv_sec-tmp_tv.tv_sec)*1000000;
978
                                        usec+=(now_tv.tv_usec-tmp_tv.tv_usec);
979
                                        if(usec > maxVDecodeTime)
980
                                                maxVDecodeTime = usec;
981

    
982
                                        //we DONT have an audio track, so we compute timings and determine
983
                                        //how much time we have to sleep at next VIDEO frame taking
984
                                        //also into account how much time was needed to encode the current
985
                                        //video frame
986
                                        //all this in case the video source is not live, i.e. not self-timing
987
                                        //and only in case there is no audio track
988
                                        if(audioStream == -1) {
989
                                                if(!live_source) {
990
                                                        if(newTime_prev != 0) {
991
                                                                //how much delay between video frames ideally
992
                                                                long long maxDelay = newTime_video - newTime_prev;
993
                                                                sleep = (maxDelay - usec);
994
                                                                dcprintf(DEBUG_ANOMALIES,"\tmaxDelay=%ld\n", ((long)maxDelay));
995
                                                                dcprintf(DEBUG_ANOMALIES,"\tlast video frame interval=%ld; sleep time=%ld\n", ((long)usec), ((long)sleep));
996
                                                        }
997
                                                        else
998
                                                                sleep = 0;
999

    
1000
                                                        //update and store counters
1001
                                                        newTime_prev = newTime_video;
1002

    
1003
                                                        //i can also sleep now instead of at the beginning of
1004
                                                        //the next frame because in this case we only have video
1005
                                                        //frames, hence it would immediately be the next thing to do
1006
                                                        if(sleep > 0) {
1007
                                                                dcprintf(DEBUG_ANOMALIES, "\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1008
                                                                usleep(sleep);
1009
                                                        }
1010

    
1011
                                                }
1012
                                        }
1013

    
1014
                                }
1015
                        }
1016
                }
1017
                else if(packet.stream_index==audioStream)
1018
                {
1019
                        if(sleep > 0)
1020
                        {
1021
                                dcprintf(DEBUG_ANOMALIES, "\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1022
                                usleep(sleep);
1023
                        }
1024
                        
1025
                        audio_data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
1026
                        //decode the audio packet into a raw audio source buffer
1027
                        if(avcodec_decode_audio3(aCodecCtx, samples, &audio_data_size, &packet)>0)
1028
                        {
1029
                                dcprintf(DEBUG_AUDIO_FRAMES, "\n-------AUDIO FRAME\n");
1030
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: newTimeaudioSTART : %lf\n", (double)(packet.pts)*av_q2d(pFormatCtx->streams[audioStream]->time_base));
1031
                                if(audio_data_size>0) {
1032
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: datasizeaudio:%d\n", audio_data_size);
1033
                                        /* if a frame has been decoded, output it */
1034
                                        //fwrite(samples, 1, audio_data_size, outfileaudio);
1035
                                }
1036
                                else {
1037
                                        av_free_packet(&packet);
1038
                                        continue;
1039
                                }
1040
        
1041
                                audio_frame_size = avcodec_encode_audio(aCodecCtxEnc, audio_outbuf, audio_data_size, samples);
1042
                                if(audio_frame_size <= 0) {
1043
                                        av_free_packet(&packet);
1044
                                        continue;
1045
                                }
1046
                                
1047
                                frame->number = contFrameAudio;
1048

    
1049
                                if(frame->number==0) {
1050
                                        if(packet.dts==AV_NOPTS_VALUE) {
1051
                                                av_free_packet(&packet);
1052
                                                continue;
1053
                                        }
1054
                                        last_pkt_dts_audio = packet.dts;
1055
                                        newTime = 0;
1056
                                }
1057
                                else {
1058
                                        if(packet.dts!=AV_NOPTS_VALUE) {
1059
                                                delta_audio = packet.dts-last_pkt_dts_audio;
1060
                                                last_pkt_dts_audio = packet.dts;
1061
                                        }
1062
                                        else if(delta_audio==0) {
1063
                                                av_free_packet(&packet);
1064
                                                continue;
1065
                                        }
1066
                                }
1067
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: original codec frame number %d vs. encoded %d vs. packed %d\n", aCodecCtx->frame_number, aCodecCtxEnc->frame_number, frame->number);
1068
                                //use pts if dts is invalid
1069
                                if(packet.dts!=AV_NOPTS_VALUE)
1070
                                        target_pts = packet.dts;
1071
                                else if(packet.pts!=AV_NOPTS_VALUE) {
1072
                                        target_pts = packet.pts;
1073
                                } else  {
1074
                                        av_free_packet(&packet);
1075
                                        continue;
1076
                                }
1077

    
1078
                                if(offset_av)
1079
                                {
1080
                                        if(FirstTimeAudio && packet.dts>0) {
1081
                                                ptsaudio1 = packet.dts;
1082
                                                FirstTimeAudio = 0;
1083
                                                dcprintf(stderr, DEBUG_AUDIO_FRAMES, "AUDIO: SET PTS BASE OFFSET %lld\n", ptsaudio1);
1084
                                        }
1085
                                }
1086
                                else //we want to compensate audio and video offset for this source
1087
                                {
1088
                                        if(FirstTimeAudio && packet.dts>0) {
1089
                                                //maintain the offset between audio pts and video pts
1090
                                                //because in case of live source they have the same numbering
1091
                                                if(ptsvideo1 > 0) //if we have already seen some video frames...
1092
                                                        ptsaudio1 = ptsvideo1;
1093
                                                else
1094
                                                        ptsaudio1 = packet.dts;
1095
                                                FirstTimeAudio = 0;
1096
                                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO LIVE: SET PTS BASE OFFSET %f\n", ptsaudio1);
1097
                                        }
1098
                                }
1099
                                //compute the new audio timestamps in milliseconds
1100
                                if(frame->number>0) {
1101
                                        newTime = ((target_pts-ptsaudio1)*1000.0*((double)av_q2d(pFormatCtx->streams[audioStream]->time_base)));//*(double)delta_audio;
1102
                                        // store timestamp in useconds for next frame sleep
1103
                                        newTime_audio = newTime*1000;
1104
                                }
1105
                                dcprintf(DEBUG_TIMESTAMPING, "AUDIO: NEWTIMESTAMP %d\n", newTime);
1106
                                if(newTime<0) {
1107
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: SKIPPING FRAME\n");
1108
                                        newtime_anomalies_counter++;
1109
                                        dcprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME negative audio timestamp anomaly detected number %d\n", newtime_anomalies_counter);
1110
                                        av_free_packet(&packet);
1111
                                        continue; //SKIP THIS FRAME, bad timestamp
1112
                                }
1113

    
1114
                                frame->timestamp.tv_sec = (unsigned int)(newTime + delay_audio)/1000;
1115
                                frame->timestamp.tv_usec = (newTime + delay_audio)%1000;
1116
                                frame->size = audio_frame_size;
1117
                                frame->type = 5; // 5 is audio type
1118
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: pts %lld duration %d timebase %d %lld dts %d\n", packet.pts, (int)packet.duration, pFormatCtx->streams[audioStream]->time_base.num, pFormatCtx->streams[audioStream]->time_base.den, packet.dts);
1119
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: timestamp sec:%d usec:%d\n", frame->timestamp.tv_sec, frame->timestamp.tv_usec);
1120
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: deltaaudio %lld\n", delta_audio);        
1121
                                contFrameAudio++;
1122

    
1123
                                if(update_chunk(chunkaudio, frame, audio_outbuf) == -1) {
1124
                                        fprintf(stderr, "AUDIO: unable to update chunk %d. Exiting.\n", chunkaudio->seq);
1125
                                        exit(-1);
1126
                                }
1127
                                //set priority
1128
                                chunkaudio->priority = 1;
1129

    
1130
                                if(chunkFilled(chunkaudio, AUDIO_CHUNK)) {
1131
                                        // is chunk filled using current strategy?
1132
                                        //SAVE ON FILE
1133
                                        //saveChunkOnFile(chunkaudio);
1134
                                        //Send the chunk to an external transport/player
1135
                                        sendChunk(chunkaudio);
1136
                                        dcprintf(DEBUG_CHUNKER, "AUDIO: just sent chunk audio %d\n", chunkaudio->seq);
1137
                                        chunkaudio->seq = 0; //signal that we need an increase
1138
                                        //initChunk(chunkaudio, &seq_current_chunk);
1139
                                }
1140

    
1141
                                //we have an audio track, so we compute timings and determine
1142
                                //how much time we have to sleep at next audio frame taking
1143
                                //also into account how much time was needed to encode the
1144
                                //video frames
1145
                                //all this in case the video source is not live, i.e. not self-timing
1146
                                if(!live_source)
1147
                                {
1148
                                        if(newTime_prev != 0)
1149
                                        {
1150
                                                long long maxDelay = newTime_audio - newTime_prev;
1151

    
1152
                                                gettimeofday(&now_tv, NULL);
1153
                                                long long usec = (now_tv.tv_sec-lastAudioSent.tv_sec)*1000000;
1154
                                                usec+=(now_tv.tv_usec-lastAudioSent.tv_usec);
1155

    
1156
                                                if(usec > maxAudioInterval)
1157
                                                        maxAudioInterval = usec;
1158

    
1159
                                                lateTime -= (maxDelay - usec);
1160
                                                dcprintf(DEBUG_ANOMALIES,"\tmaxDelay=%ld, maxAudioInterval=%ld\n", ((long)maxDelay), ((long) maxAudioInterval));
1161
                                                dcprintf(DEBUG_ANOMALIES,"\tlast audio frame interval=%ld; lateTime=%ld\n", ((long)usec), ((long)lateTime));
1162

    
1163
                                                if((lateTime+maxAudioInterval) < 0)
1164
                                                        sleep = (lateTime+maxAudioInterval)*-1;
1165
                                                else
1166
                                                        sleep = 0;
1167
                                        }
1168
                                        else
1169
                                                sleep = 0;
1170

    
1171
                                        newTime_prev = newTime_audio;
1172
                                        gettimeofday(&lastAudioSent, NULL);
1173
                                }
1174
                        }
1175
                }
1176
                dcprintf(DEBUG_CHUNKER,"Free the packet that was allocated by av_read_frame\n");
1177
                av_free_packet(&packet);
1178
        }
1179
        
1180
        if(videotrace)
1181
                fclose(videotrace);
1182
        if(psnrtrace)
1183
                fclose(psnrtrace);
1184

    
1185
close:
1186
        if(chunk->seq != 0 && chunk->frames_num>0) {
1187
                //SAVE ON FILE
1188
                //saveChunkOnFile(chunk);
1189
                //Send the chunk to an external transport/player
1190
                sendChunk(chunk);
1191
                dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST VIDEO CHUNK\n");
1192
                chunk->seq = 0; //signal that we need an increase just in case we will restart
1193
        }
1194
        if(chunkaudio->seq != 0 && chunkaudio->frames_num>0) {
1195
                //SAVE ON FILE     
1196
                //saveChunkOnFile(chunkaudio);
1197
                //Send the chunk via http to an external transport/player
1198
                sendChunk(chunkaudio);
1199
                dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST AUDIO CHUNK\n");
1200
                chunkaudio->seq = 0; //signal that we need an increase just in case we will restart
1201
        }
1202

    
1203
#ifdef HTTPIO
1204
        /* finalize the HTTP chunk pusher */
1205
        finalizeChunkPusher();
1206
#endif
1207

    
1208
        free(chunk);
1209
        free(chunkaudio);
1210
        free(frame);
1211
        av_free(video_outbuf);
1212
        av_free(scaledFrame_buffer);
1213
        av_free(audio_outbuf);
1214
        free(cmeta);
1215

    
1216
        // Free the YUV frame
1217
        av_free(pFrame1);
1218
        av_free(pFrame2);
1219
        av_free(scaledFrame);
1220
        av_free(samples);
1221
  
1222
        // Close the codec
1223
        if (!vcopy) avcodec_close(pCodecCtx);
1224
        if (!vcopy) avcodec_close(pCodecCtxEnc);
1225

    
1226
        if(audioStream!=-1) {
1227
                avcodec_close(aCodecCtx);
1228
                avcodec_close(aCodecCtxEnc);
1229
        }
1230
  
1231
        // Close the video file
1232
        av_close_input_file(pFormatCtx);
1233

    
1234
        if(LOOP_MODE) {
1235
                //we want video to continue, but the av_read_frame stopped
1236
                //lets wait a 5 secs, and cycle in again
1237
                usleep(5000000);
1238
                dcprintf(DEBUG_CHUNKER, "CHUNKER: WAITING 5 secs FOR LIVE SOURCE TO SKIP ERRORS AND RESTARTING\n");
1239
                videoStream = -1;
1240
                audioStream = -1;
1241
                FirstTimeAudio=1;
1242
                FirstTimeVideo=1;
1243
                pts_anomalies_counter=0;
1244
                newtime_anomalies_counter=0;
1245
                newTime=0;
1246
                newTime_audio=0;
1247
                newTime_prev=0;
1248
                ptsvideo1=0;
1249
                ptsaudio1=0;
1250
                last_pkt_dts=0;
1251
                delta_video=0;
1252
                delta_audio=0;
1253
                last_pkt_dts_audio=0;
1254
                target_pts=0;
1255
                i=0;
1256
                //~ contFrameVideo = 0;
1257
                //~ contFrameAudio = 1;
1258
                
1259
#ifdef YUV_RECORD_ENABLED
1260
                if(ChunkerStreamerTestMode)
1261
                {
1262
                        video_record_count++;
1263
                        //~ savedVideoFrames = 0;
1264
                        
1265
                        //~ char tmp_filename[255];
1266
                        //~ sprintf(tmp_filename, "yuv_data/out_%d.yuv", video_record_count);
1267
                        //~ FILE *pFile=fopen(tmp_filename, "w");
1268
                        //~ if(pFile!=NULL)
1269
                                //~ fclose(pFile);
1270
                }
1271
#endif
1272

    
1273
                goto restart;
1274
        }
1275

    
1276
#ifdef TCPIO
1277
        finalizeTCPChunkPusher();
1278
#endif
1279

    
1280
#ifdef USE_AVFILTER
1281
        close_filters();
1282
#endif
1283

    
1284
        return 0;
1285
}
1286

    
1287
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf) {
1288
        //the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
1289
        static int sizeFrameHeader = 5*sizeof(int32_t);
1290

    
1291
        //moving temp pointer to encode Frame on the wire
1292
        uint8_t *tempdata = NULL;
1293

    
1294
        if(chunk->seq == 0) {
1295
                initChunk(chunk, &seq_current_chunk);
1296
        }
1297
        //add frame priority to chunk priority (to be normalized later on)
1298
        chunk->priority += frame->type + 1; // I:2, P:3, B:4
1299

    
1300
        //HINT on malloc
1301
        chunk->data = (uint8_t *)realloc(chunk->data, sizeof(uint8_t)*(chunk->payload_len + frame->size + sizeFrameHeader));
1302
        if(!chunk->data)  {
1303
                fprintf(stderr, "Memory error in chunk!!!\n");
1304
                return -1;
1305
        }
1306
        chunk->frames_num++; // number of frames in the current chunk
1307

    
1308
/*
1309
        //package the Frame header
1310
        tempdata = chunk->data+chunk->payload_len;
1311
        *((int32_t *)tempdata) = frame->number;
1312
        tempdata+=sizeof(int32_t);
1313
        *((struct timeval *)tempdata) = frame->timestamp;
1314
        tempdata+=sizeof(struct timeval);
1315
        *((int32_t *)tempdata) = frame->size;
1316
        tempdata+=sizeof(int32_t);
1317
        *((int32_t *)tempdata) = frame->type;
1318
        tempdata+=sizeof(int32_t);
1319
*/
1320
        //package the Frame header: network order and platform independent
1321
        tempdata = chunk->data+chunk->payload_len;
1322
        bit32_encoded_push(frame->number, tempdata);
1323
        bit32_encoded_push(frame->timestamp.tv_sec, tempdata + CHUNK_TRANSCODING_INT_SIZE);
1324
        bit32_encoded_push(frame->timestamp.tv_usec, tempdata + CHUNK_TRANSCODING_INT_SIZE*2);
1325
        bit32_encoded_push(frame->size, tempdata + CHUNK_TRANSCODING_INT_SIZE*3);
1326
        bit32_encoded_push(frame->type, tempdata + CHUNK_TRANSCODING_INT_SIZE*4);
1327

    
1328
         //insert the new frame data
1329
        memcpy(chunk->data + chunk->payload_len + sizeFrameHeader, outbuf, frame->size);
1330
        chunk->payload_len += frame->size + sizeFrameHeader; // update payload length
1331
        //chunk lenght is updated just prior to pushing it out because
1332
        //the chunk header len is better calculated there
1333
        //chunk->len = sizeChunkHeader + chunk->payload_len; // update overall length
1334

    
1335
        //update timestamps
1336
        if(((int)frame->timestamp.tv_sec < (int)chunk->start_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->start_time.tv_sec && (int)frame->timestamp.tv_usec < (int)chunk->start_time.tv_usec) || (int)chunk->start_time.tv_sec==-1) {
1337
                                                chunk->start_time.tv_sec = frame->timestamp.tv_sec;
1338
                                                chunk->start_time.tv_usec = frame->timestamp.tv_usec;
1339
        }
1340
        
1341
        if(((int)frame->timestamp.tv_sec > (int)chunk->end_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->end_time.tv_sec && (int)frame->timestamp.tv_usec > (int)chunk->end_time.tv_usec) || (int)chunk->end_time.tv_sec==-1) {
1342
                                                chunk->end_time.tv_sec = frame->timestamp.tv_sec;
1343
                                                chunk->end_time.tv_usec = frame->timestamp.tv_usec;
1344
        }
1345
        return 0;
1346
}
1347

    
1348
void SaveFrame(AVFrame *pFrame, int width, int height)
1349
{
1350
        FILE *pFile;
1351
        int  y;
1352

    
1353
         // Open file
1354
        char tmp_filename[255];
1355
        sprintf(tmp_filename, "yuv_data/streamer_out.yuv");
1356
        pFile=fopen(tmp_filename, "ab");
1357
        if(pFile==NULL)
1358
                return;
1359

    
1360
        // Write header
1361
        //fprintf(pFile, "P5\n%d %d\n255\n", width, height);
1362
  
1363
        // Write Y data
1364
        for(y=0; y<height; y++)
1365
                  if(fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile) != width)
1366
                {
1367
                        printf("errno = %d\n", errno);
1368
                        exit(1);
1369
                }
1370
        // Write U data
1371
        for(y=0; y<height/2; y++)
1372
                  if(fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile) != width/2)
1373
                  {
1374
                        printf("errno = %d\n", errno);
1375
                        exit(1);
1376
                }
1377
        // Write V data
1378
        for(y=0; y<height/2; y++)
1379
                  if(fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile) != width/2)
1380
                  {
1381
                        printf("errno = %d\n", errno);
1382
                        exit(1);
1383
                }
1384
  
1385
        // Close file
1386
        fclose(pFile);
1387
}
1388

    
1389
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf)
1390
{
1391
        static FILE* pFile = NULL;
1392
        
1393
        pFile=fopen("yuv_data/streamer_out.mpeg4", "ab");
1394
        fwrite(frame, sizeof(Frame), 1, pFile);
1395
        fwrite(video_outbuf, frame->size, 1, pFile);
1396
        fclose(pFile);
1397
}