Statistics
| Branch: | Revision:

chunker-player / chunker_streamer / chunker_streamer.c @ ee1a01d4

History | View | Annotate | Download (51.8 KB)

1
/*
2
 *  Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
3
 *  Copyright (c) 2010-2011 Csaba Kiraly
4
 *  developed for the Napa-Wine EU project. See www.napa-wine.eu
5
 *
6
 *  This is free software; see lgpl-2.1.txt
7
 */
8

    
9
#include "chunker_streamer.h"
10
#include <signal.h>
11
#include <math.h>
12
#include <getopt.h>
13
#include <libswscale/swscale.h>
14
#include <libavutil/opt.h>
15
#include "libav-compat.h"
16

    
17
#ifdef USE_AVFILTER
18
#include <libavfilter/avfilter.h>
19
#include "chunker_filtering.h"
20
#endif
21

    
22
#include "chunk_pusher.h"
23

    
24
struct outstream {
25
        struct output *output;
26
        ExternalChunk *chunk;
27
        AVCodecContext *pCodecCtxEnc;
28
};
29
#define QUALITYLEVELS_MAX 9
30
struct outstream outstream[1+QUALITYLEVELS_MAX+1];
31
int qualitylevels = 1;
32
int indexchannel = 0;
33
int passthrough = 0;
34

    
35
#define DEBUG
36
#define DEBUG_AUDIO_FRAMES  false
37
#define DEBUG_VIDEO_FRAMES  false
38
#define DEBUG_CHUNKER false
39
#define DEBUG_ANOMALIES true
40
#define DEBUG_TIMESTAMPING false
41
#include "dbg.h"
42

    
43
#define STREAMER_MAX(a,b) ((a>b)?(a):(b))
44
#define STREAMER_MIN(a,b) ((a<b)?(a):(b))
45

    
46
//#define DISPLAY_PSNR
47
#define GET_PSNR(x) ((x==0) ? 0 : (-10.0*log(x)/log(10)))
48

    
49
ChunkerMetadata *cmeta = NULL;
50
int seq_current_chunk = 1; //chunk numbering starts from 1; HINT do i need more bytes?
51

    
52
#define AUDIO_CHUNK 0
53
#define VIDEO_CHUNK 1
54

    
55
void SaveFrame(AVFrame *pFrame, int width, int height);
56
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf);
57
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf);
58
void bit32_encoded_push(uint32_t v, uint8_t *p);
59

    
60
int video_record_count = 0;
61
int savedVideoFrames = 0;
62
long int firstSavedVideoFrame = 0;
63
int ChunkerStreamerTestMode = 0;
64

    
65
int pts_anomaly_threshold = -1;
66
int newtime_anomaly_threshold = -1;
67
bool timebank = false;
68
char *outside_world_url = NULL;
69

    
70
int gop_size = 25;
71
int max_b_frames = 3;
72
bool vcopy = false;
73

    
74
long delay_audio = 0; //delay audio by x millisec
75

    
76
char *avfilter="yadif";
77

    
78
// Constant number of frames per chunk
79
int chunkFilledFramesStrategy(ExternalChunk *echunk, int chunkType)
80
{
81
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if frames num %d == %d in chunk %d\n", echunk->frames_num, cmeta->framesPerChunk[chunkType], echunk->seq);
82
        if(echunk->frames_num == cmeta->framesPerChunk[chunkType])
83
                return 1;
84

    
85
        return 0;
86
}
87

    
88
// Constant size. Note that for now each chunk will have a size just greater or equal than the required value
89
// It can be considered as constant size.
90
int chunkFilledSizeStrategy(ExternalChunk *echunk, int chunkType)
91
{
92
        dcprintf(DEBUG_CHUNKER, "CHUNKER: check if chunk size %d >= %d in chunk %d\n", echunk->payload_len, cmeta->targetChunkSize, echunk->seq);
93
        if(echunk->payload_len >= cmeta->targetChunkSize)
94
                return 1;
95
        
96
        return 0;
97
}
98

    
99
// Performace optimization.
100
// The chunkFilled function has been splitted into two functions (one for each strategy).
101
// Instead of continuously check the strategy flag (which is constant),
102
// we change the callback just once according to the current strategy (look at the switch statement in the main in which this function pointer is set)
103
int (*chunkFilled)(ExternalChunk *echunk, int chunkType);
104

    
105
void initChunk(ExternalChunk *chunk, int *seq_num) {
106
        chunk->seq = (*seq_num)++;
107
        chunk->frames_num = 0;
108
        chunk->payload_len = 0;
109
        chunk->len=0;
110
  if(chunk->data != NULL)
111
    free(chunk->data);
112
        chunk->data = NULL;
113
        chunk->start_time.tv_sec = -1;
114
        chunk->start_time.tv_usec = -1;
115
        chunk->end_time.tv_sec = -1;
116
        chunk->end_time.tv_usec = -1;
117
        chunk->priority = 0;
118
        chunk->category = 0;
119
        chunk->_refcnt = 0;
120
}
121

    
122
int quit = 0;
123

    
124
void sigproc()
125
{
126
        printf("you have pressed ctrl-c, terminating...\n");
127
        quit = 1;
128
}
129

    
130
static void print_usage(int argc, char *argv[])
131
{
132
  fprintf (stderr,
133
    "\nUsage:%s [options]\n"
134
    "\n"
135
    "Mandatory options:\n"
136
    "\t[-i input file]\n"
137
    "\t[-a audio bitrate]\n"
138
    "\t[-v video bitrate]\n\n"
139
    "Other options:\n"
140
    "\t[-F output] (overrides config file)\n"
141
    "\t[-A audioencoder]\n"
142
    "\t[-V videoencoder]\n"
143
    "\t[-s WxH]: force video size.\n"
144
    "\t[-l]: this is a live stream.\n"
145
    "\t[-o]: adjust A/V frame timestamps (deafault off, use it only with flawed containers)\n"
146
    "\t[-p]: pts anomaly threshold (default: -1=off).\n"
147
    "\t[-q]: sync anomaly threshold ((default: -1=off).\n"
148
    "\t[-t]: QoE test mode\n\n"
149

    
150
    "\t[--video_stream]:set video_stream ID in input\n"
151
    "\t[--audio_stream]:set audio_stream ID in input\n"
152
    "\t[--avfilter]:set input filter (default: yadif\n"
153
    "\t[--passthrough 0/1]: turn off/on generation of passthrough channel\n"
154
    "\t[--indexchannel 0/1]: turn off/on generation of index channel\n"
155
    "\t[--qualitylevels q]:set number of quality levels to q\n"
156
    "\n"
157
    "Codec options:\n"
158
    "\t[-g GOP]: gop size\n"
159
    "\t[-b frames]: max number of consecutive b frames\n"
160
    "\t[-x extas]: extra video codec options (e.g. -x me_method=hex,flags2=+dct8x8+wpred+bpyrami+mixed_refs)\n"
161
    "\n"
162
    "=======================================================\n", argv[0]
163
    );
164
  }
165

    
166
int sendChunk(struct output *output, ExternalChunk *chunk) {
167
#ifdef HTTPIO
168
                                                return pushChunkHttp(chunk, outside_world_url);
169
#endif
170
#ifdef TCPIO
171
                                                return pushChunkTcp(output, chunk);
172
#endif
173
#ifdef UDPIO
174
                                                return pushChunkUDP(chunk);
175
#endif
176
}
177

    
178
/*
179
 * pre-process a video Frame with the configured filters (stateful!)
180
 * pFrame: next frame of the stream
181
 * returns: NULL on error, the same pointer if NOP, otherwise a newly allocated Frame
182
 */
183
AVFrame *preprocessFrame(AVFrame *pFrame) {
184
#ifdef USE_AVFILTER
185
        AVFrame *pFrame2 = NULL;
186
        pFrame2=avcodec_alloc_frame();
187
        if(pFrame2==NULL) {
188
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
189
                if(pFrame2) av_free(pFrame2);
190
                return NULL;
191
        }
192
#endif
193

    
194
#ifdef VIDEO_DEINTERLACE
195
        avpicture_deinterlace(
196
                (AVPicture*) pFrame,
197
                (const AVPicture*) pFrame,
198
                pCodecCtxEnc->pix_fmt,
199
                pCodecCtxEnc->width,
200
                pCodecCtxEnc->height);
201
#endif
202

    
203
#ifdef USE_AVFILTER
204
        //apply avfilters
205
        if (filter(pFrame,pFrame2) <= 0) {
206
                return NULL;
207
        }
208
        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOfilter: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
209
        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOfilter intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
210
        return pFrame2;
211
#else
212
        return pFrame;
213
#endif
214
}
215

    
216

    
217
int transcodeFrame(uint8_t *video_outbuf, int video_outbuf_size, int64_t *target_pts, AVFrame *pFrame, AVRational time_base, AVCodecContext *pCodecCtx, AVCodecContext *pCodecCtxEnc)
218
{
219
        int video_frame_size = 0;
220
        AVFrame *scaledFrame = NULL;
221
        scaledFrame=avcodec_alloc_frame();
222
        if(scaledFrame==NULL) {
223
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
224
                if(scaledFrame) av_free(scaledFrame);
225
                return -1;
226
        }
227
        int scaledFrame_buf_size = avpicture_get_size( PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
228
        uint8_t* scaledFrame_buffer = (uint8_t *) av_malloc( scaledFrame_buf_size * sizeof( uint8_t ) );
229
        avpicture_fill( (AVPicture*) scaledFrame, scaledFrame_buffer, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
230
        if(!video_outbuf || !scaledFrame_buffer) {
231
                fprintf(stderr, "INIT: Memory error alloc video_outbuf!!!\n");
232
                return -1;
233
        }
234

    
235

    
236

    
237
                                            if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
238
//                                                static AVPicture pict;
239
                                                static struct SwsContext *img_convert_ctx = NULL;
240

    
241
                                                pFrame->pict_type = 0;
242
                                                img_convert_ctx = sws_getCachedContext(img_convert_ctx, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
243
                                                if(img_convert_ctx == NULL) {
244
                                                        fprintf(stderr, "Cannot initialize the conversion context!\n");
245
                                                        exit(1);
246
                                                }
247
                                                sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, scaledFrame->data, scaledFrame->linesize);
248
                                                scaledFrame->pts = pFrame->pts;
249
                                                scaledFrame->pict_type = 0;
250
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, scaledFrame);
251
                                            } else {
252
                                                pFrame->pict_type = 0;
253
                                                video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, pFrame);
254
                                            }
255

    
256
                                            //use pts if dts is invalid
257
                                            if(pCodecCtxEnc->coded_frame->pts!=AV_NOPTS_VALUE)
258
                                                *target_pts = av_rescale_q(pCodecCtxEnc->coded_frame->pts, pCodecCtxEnc->time_base, time_base);
259
                                            else {        //TODO: review this
260
                                                if(scaledFrame) av_free(scaledFrame);
261
                                                if(scaledFrame_buffer) av_free(scaledFrame_buffer);
262
                                                return -1;
263
                                            }
264

    
265
                                        if(video_frame_size > 0) {
266
                                            if(pCodecCtxEnc->coded_frame) {
267
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pCodecCtxEnc->coded_frame->pkt_dts, pCodecCtxEnc->coded_frame->pkt_pts, pCodecCtxEnc->coded_frame->pts);
268
                                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: outtype: %d%s\n", pCodecCtxEnc->coded_frame->pict_type, pCodecCtxEnc->coded_frame->key_frame ? " (key)" : "");
269
                                            }
270
#ifdef DISPLAY_PSNR
271
                                            static double ist_psnr = 0;
272
                                            static double cum_psnr = 0;
273
                                            static int psnr_samples = 0;
274
                                            if(pCodecCtxEnc->coded_frame) {
275
                                                if(pCodecCtxEnc->flags&CODEC_FLAG_PSNR) {
276
                                                        ist_psnr = GET_PSNR(pCodecCtxEnc->coded_frame->error[0]/(pCodecCtxEnc->width*pCodecCtxEnc->height*255.0*255.0));
277
                                                        psnr_samples++;
278
                                                        cum_psnr += ist_psnr;
279
                                                        fprintf(stderr, "PSNR: ist %.4f avg: %.4f\n", ist_psnr, cum_psnr / (double)psnr_samples);
280
                                                }
281
                                            }
282
#endif
283
                                        }
284

    
285
        if(scaledFrame) av_free(scaledFrame);
286
        if(scaledFrame_buffer) av_free(scaledFrame_buffer);
287
        return video_frame_size;
288
}
289

    
290

    
291
void createFrame(struct Frame *frame, long long newTime, int video_frame_size, int pict_type)
292
{
293

    
294
                                        frame->timestamp.tv_sec = (long long)newTime/1000;
295
                                        frame->timestamp.tv_usec = newTime%1000;
296
                                        frame->size = video_frame_size;
297
                                        /* pict_type maybe 1 (I), 2 (P), 3 (B), 5 (AUDIO)*/
298
                                        frame->type = pict_type;
299

    
300

    
301
/* should be on some other place
302
//                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: original codec frame number %d vs. encoded %d vs. packed %d\n", pCodecCtx->frame_number, pCodecCtxEnc->frame_number, frame->number);
303
//                                        if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: duration %d timebase %d %d container timebase %d\n", (int)packet.duration, pCodecCtxEnc->time_base.den, pCodecCtxEnc->time_base.num, pCodecCtx->time_base.den);
304

305
#ifdef YUV_RECORD_ENABLED
306
                                        if(!vcopy && ChunkerStreamerTestMode)
307
                                        {
308
                                                if(videotrace)
309
                                                        fprintf(videotrace, "%d %d %d\n", frame->number, pict_type, frame->size);
310

311
                                                SaveFrame(pFrame, dest_width, dest_height);
312

313
                                                ++savedVideoFrames;
314
                                                SaveEncodedFrame(frame, video_outbuf);
315

316
                                                if(!firstSavedVideoFrame)
317
                                                        firstSavedVideoFrame = frame->number;
318

319
                                                char tmp_filename[255];
320
                                                sprintf(tmp_filename, "yuv_data/streamer_out_context.txt");
321
                                                FILE* tmp = fopen(tmp_filename, "w");
322
                                                if(tmp)
323
                                                {
324
                                                        fprintf(tmp, "width = %d\nheight = %d\ntotal_frames_saved = %d\ntotal_frames_decoded = %d\nfirst_frame_number = %ld\nlast_frame_number = %d\n"
325
                                                                ,dest_width, dest_height
326
                                                                ,savedVideoFrames, savedVideoFrames, firstSavedVideoFrame, frame->number);
327
                                                        fclose(tmp);
328
                                                }
329
                                        }
330
#endif
331
*/
332

    
333
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: encapsulated frame size:%d type:%d\n", frame->size, frame->type);
334
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: timestamped sec %ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
335
}
336

    
337

    
338
void addFrameToOutstream(struct outstream *os, Frame *frame, uint8_t *video_outbuf)
339
{
340

    
341
        ExternalChunk *chunk = os->chunk;
342
        struct output *output = os->output;
343

    
344
                                        if(update_chunk(chunk, frame, video_outbuf) == -1) {
345
                                                fprintf(stderr, "VIDEO: unable to update chunk %d. Exiting.\n", chunk->seq);
346
                                                exit(-1);
347
                                        }
348

    
349
                                        if(chunkFilled(chunk, VIDEO_CHUNK)) { // is chunk filled using current strategy?
350
                                                //calculate priority
351
                                                chunk->priority /= chunk->frames_num;
352

    
353
                                                //SAVE ON FILE
354
                                                //saveChunkOnFile(chunk);
355
                                                //Send the chunk to an external transport/player
356
                                                sendChunk(output, chunk);
357
                                                dctprintf(DEBUG_CHUNKER, "VIDEO: sent chunk video %d, prio:%f, size %d\n", chunk->seq, chunk->priority, chunk->len);
358
                                                chunk->seq = 0; //signal that we need an increase
359
                                                //initChunk(chunk, &seq_current_chunk);
360
                                        }
361
}
362

    
363
long long pts2ms(int64_t pts, AVRational time_base)
364
{
365
        return pts * 1000 * time_base.num / time_base.den;
366
}
367

    
368
AVCodecContext *openVideoEncoder(const char *video_codec, int video_bitrate, int dest_width, int dest_height, AVRational time_base, const char *codec_options) {
369

    
370
        AVCodec *pCodecEnc;
371
        AVCodecContext *pCodecCtxEnc;
372

    
373
        //setup video output encoder
374
        if (strcmp(video_codec, "copy") == 0) {
375
                return NULL;
376
        }
377

    
378
        pCodecEnc = avcodec_find_encoder_by_name(video_codec);
379
        if (pCodecEnc) {
380
                fprintf(stderr, "INIT: Setting VIDEO codecID to: %d\n",pCodecEnc->id);
381
        } else {
382
                fprintf(stderr, "INIT: Unknown OUT VIDEO codec: %s!\n", video_codec);
383
                return NULL; // Codec not found
384
        }
385

    
386
        pCodecCtxEnc=avcodec_alloc_context();
387
        pCodecCtxEnc->codec_type = CODEC_TYPE_VIDEO;
388
        pCodecCtxEnc->codec_id = pCodecEnc->id;
389

    
390
        pCodecCtxEnc->bit_rate = video_bitrate;
391
        //~ pCodecCtxEnc->qmin = 30;
392
        //~ pCodecCtxEnc->qmax = 30;
393
        //times 20 follows the defaults, was not needed in previous versions of libavcodec
394
//        pCodecCtxEnc->crf = 20.0f;
395
        // resolution must be a multiple of two 
396
        pCodecCtxEnc->width = dest_width;
397
        pCodecCtxEnc->height = dest_height;
398
        // frames per second 
399
        //~ pCodecCtxEnc->time_base= pCodecCtx->time_base;//(AVRational){1,25};
400
        //printf("pCodecCtx->time_base=%d/%d\n", pCodecCtx->time_base.num, pCodecCtx->time_base.den);
401
        pCodecCtxEnc->time_base= time_base;//(AVRational){1,25};
402
        pCodecCtxEnc->gop_size = gop_size; // emit one intra frame every gop_size frames 
403
        pCodecCtxEnc->max_b_frames = max_b_frames;
404
        pCodecCtxEnc->pix_fmt = PIX_FMT_YUV420P;
405
        pCodecCtxEnc->flags |= CODEC_FLAG_PSNR;
406
        //~ pCodecCtxEnc->flags |= CODEC_FLAG_QSCALE;
407

    
408
        //some generic quality tuning
409
        pCodecCtxEnc->mb_decision = FF_MB_DECISION_RD;
410

    
411
        //some rate control parameters for streaming, taken from ffserver.c
412
        {
413
        /* Bitrate tolerance is less for streaming */
414
        AVCodecContext *av = pCodecCtxEnc;
415
        //if (av->bit_rate_tolerance == 0)        //ffmeg sets the dafult to 4M independent of other parameters, for some reason
416
            av->bit_rate_tolerance = FFMAX(av->bit_rate / 4,
417
                      (int64_t)av->bit_rate*av->time_base.num/av->time_base.den);
418
        //if (av->qmin == 0)
419
        //    av->qmin = 3;
420
        //if (av->qmax == 0)
421
        //    av->qmax = 31;
422
        //if (av->max_qdiff == 0)
423
        //    av->max_qdiff = 3;
424
        //av->qcompress = 0.5;
425
        //av->qblur = 0.5;
426

    
427
        //if (!av->nsse_weight)
428
        //    av->nsse_weight = 8;
429

    
430
        //av->frame_skip_cmp = FF_CMP_DCTMAX;
431
        //if (!av->me_method)
432
        //    av->me_method = ME_EPZS;
433
        //av->rc_buffer_aggressivity = 1.0;
434

    
435
        //if (!av->rc_eq)
436
        //    av->rc_eq = "tex^qComp";
437
        //if (!av->i_quant_factor)
438
        //    av->i_quant_factor = -0.8;
439
        //if (!av->b_quant_factor)
440
        //    av->b_quant_factor = 1.25;
441
        //if (!av->b_quant_offset)
442
        //    av->b_quant_offset = 1.25;
443
        if (!av->rc_max_rate)
444
            av->rc_max_rate = av->bit_rate * 1;
445

    
446
        if (av->rc_max_rate && !av->rc_buffer_size) {
447
            av->rc_buffer_size = av->rc_max_rate;
448
        }
449
        }
450
        //end of code taken fromffserver.c
451

    
452
  switch (pCodecEnc->id) {
453
    case CODEC_ID_H264 :
454
        // Fast Profile
455
        // libx264-fast.ffpreset preset 
456
        pCodecCtxEnc->coder_type = FF_CODER_TYPE_AC; // coder = 1 -> enable CABAC
457
        pCodecCtxEnc->flags |= CODEC_FLAG_LOOP_FILTER; // flags=+loop -> deblock
458
        pCodecCtxEnc->me_cmp|= 1; // cmp=+chroma, where CHROMA = 1
459
        pCodecCtxEnc->partitions |= X264_PART_I8X8|X264_PART_I4X4|X264_PART_P8X8|X264_PART_B8X8;        // partitions=+parti8x8+parti4x4+partp8x8+partb8x8
460
        pCodecCtxEnc->me_method=ME_HEX; // me_method=hex
461
        pCodecCtxEnc->me_subpel_quality = 6; // subq=7
462
        pCodecCtxEnc->me_range = 16; // me_range=16
463
        //pCodecCtxEnc->gop_size = 250; // g=250
464
        //pCodecCtxEnc->keyint_min = 25; // keyint_min=25
465
        pCodecCtxEnc->scenechange_threshold = 40; // sc_threshold=40
466
        pCodecCtxEnc->i_quant_factor = 0.71; // i_qfactor=0.71
467
        pCodecCtxEnc->b_frame_strategy = 1; // b_strategy=1
468
        pCodecCtxEnc->qcompress = 0.6; // qcomp=0.6
469
        pCodecCtxEnc->qmin = 10; // qmin=10
470
        pCodecCtxEnc->qmax = 51; // qmax=51
471
        pCodecCtxEnc->max_qdiff = 4; // qdiff=4
472
        //pCodecCtxEnc->max_b_frames = 3; // bf=3
473
        pCodecCtxEnc->refs = 2; // refs=3
474
        //pCodecCtxEnc->directpred = 1; // directpred=1
475
        pCodecCtxEnc->directpred = 3; // directpred=1 in preset -> "directpred", "direct mv prediction mode - 0 (none), 1 (spatial), 2 (temporal), 3 (auto)"
476
        //pCodecCtxEnc->trellis = 1; // trellis=1
477
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_BPYRAMID|CODEC_FLAG2_MIXED_REFS|CODEC_FLAG2_WPRED|CODEC_FLAG2_8X8DCT|CODEC_FLAG2_FASTPSKIP;        // flags2=+bpyramid+mixed_refs+wpred+dct8x8+fastpskip
478
        pCodecCtxEnc->weighted_p_pred = 2; // wpredp=2
479

    
480
        // libx264-main.ffpreset preset
481
        //pCodecCtxEnc->flags2|=CODEC_FLAG2_8X8DCT;
482
        //pCodecCtxEnc->flags2^=CODEC_FLAG2_8X8DCT; // flags2=-dct8x8
483
        //pCodecCtxEnc->crf = 22;
484

    
485
#ifdef STREAMER_X264_USE_SSIM
486
        pCodecCtxEnc->flags2 |= CODEC_FLAG2_SSIM;
487
#endif
488

    
489
        //pCodecCtxEnc->weighted_p_pred=2; //maps wpredp=2; weighted prediction analysis method
490
        // pCodecCtxEnc->rc_min_rate = 0;
491
        // pCodecCtxEnc->rc_max_rate = video_bitrate*2;
492
        // pCodecCtxEnc->rc_buffer_size = 0;
493
        break;
494
    case CODEC_ID_MPEG4 :
495
        break;
496
    default:
497
        fprintf(stderr, "INIT: Unsupported OUT VIDEO codec: %s!\n", video_codec);
498
  }
499

    
500
  if ((av_set_options_string(pCodecCtxEnc, codec_options, "=", ",")) < 0) {
501
    fprintf(stderr, "Error parsing options string: '%s'\n", codec_options);
502
    return NULL;
503
  }
504

    
505
  if(avcodec_open(pCodecCtxEnc, pCodecEnc)<0) {
506
    fprintf(stderr, "INIT: could not open OUT VIDEO codecEnc\n");
507
    return NULL; // Could not open codec
508
  }
509

    
510
 return pCodecCtxEnc;
511
}
512

    
513

    
514
int main(int argc, char *argv[]) {
515
        signal(SIGINT, sigproc);
516
        
517
        int i=0,j,k;
518

    
519
        //output variables
520
        uint8_t *video_outbuf = NULL;
521
        int video_outbuf_size, video_frame_size;
522
        uint8_t *audio_outbuf = NULL;
523
        int audio_outbuf_size, audio_frame_size;
524
        int audio_data_size;
525

    
526
        //numeric identifiers of input streams
527
        int videoStream = -1;
528
        int audioStream = -1;
529

    
530
//        int len1;
531
        int frameFinished;
532
        //frame sequential counters
533
        int contFrameAudio=1, contFrameVideo=0;
534
//        int numBytes;
535

    
536
        //command line parameters
537
        int audio_bitrate = -1;
538
        int video_bitrate = -1;
539
        char *audio_codec = "mp2";
540
        char *video_codec = "mpeg4";
541
        char *codec_options = "";
542
        int live_source = 0; //tells to sleep before reading next frame in not live (i.e. file)
543
        int offset_av = 0; //tells to compensate for offset between audio and video in the file
544
        
545
        //a raw buffer for decoded uncompressed audio samples
546
        int16_t *samples = NULL;
547
        //a raw uncompressed video picture
548
        AVFrame *pFrame1 = NULL;
549

    
550
        AVFormatContext *pFormatCtx = NULL;
551
        AVCodecContext  *pCodecCtx = NULL ,*aCodecCtxEnc = NULL ,*aCodecCtx = NULL;
552
        AVCodec         *pCodec = NULL ,*aCodec = NULL ,*aCodecEnc = NULL;
553
        AVPacket         packet;
554

    
555
        //stuff needed to compute the right timestamps
556
        short int FirstTimeAudio=1, FirstTimeVideo=1;
557
        short int pts_anomalies_counter=0;
558
        short int newtime_anomalies_counter=0;
559
        long long newTime=0, newTime_audio=0, newTime_video=0, newTime_prev=0;
560
        struct timeval lastAudioSent = {0, 0};
561
        int64_t ptsvideo1=0;
562
        int64_t ptsaudio1=0;
563
        int64_t last_pkt_dts=0, delta_video=0, delta_audio=0, last_pkt_dts_audio=0, target_pts=0;
564

    
565
        //Napa-Wine specific Frame and Chunk structures for transport
566
        Frame *frame = NULL;
567
        ExternalChunk *chunkaudio = NULL;
568
        
569
        char av_input[1024];
570
        int dest_width = -1;
571
        int dest_height = -1;
572
        
573
        static struct option long_options[] =
574
        {
575
                {"audio_stream", required_argument, 0, 0},
576
                {"video_stream", required_argument, 0, 0},
577
                {"avfilter", required_argument, 0, 0},
578
                {"indexchannel", required_argument, 0, 0},
579
                {"passthrough", required_argument, 0, 0},
580
                {"qualitylevels", required_argument, 0, 'Q'},
581
                {0, 0, 0, 0}
582
        };
583
        /* `getopt_long' stores the option index here. */
584
        int option_index = 0, c;
585
        int mandatories = 0;
586
        while ((c = getopt_long (argc, argv, "i:a:v:A:V:s:lop:q:tF:g:b:d:x:Q:", long_options, &option_index)) != -1)
587
        {
588
                switch (c) {
589
                        case 0: //for long options
590
                                if( strcmp( "audio_stream", long_options[option_index].name ) == 0 ) { audioStream = atoi(optarg); }
591
                                if( strcmp( "video_stream", long_options[option_index].name ) == 0 ) { videoStream = atoi(optarg); }
592
                                if( strcmp( "avfilter", long_options[option_index].name ) == 0 ) { avfilter = strdup(optarg); }
593
                                if( strcmp( "indexchannel", long_options[option_index].name ) == 0 ) { indexchannel = atoi(optarg); }
594
                                if( strcmp( "passthrough", long_options[option_index].name ) == 0 ) { passthrough = atoi(optarg); }
595
                                break;
596
                        case 'i':
597
                                sprintf(av_input, "%s", optarg);
598
                                mandatories++;
599
                                break;
600
                        case 'a':
601
                                sscanf(optarg, "%d", &audio_bitrate);
602
                                mandatories++;
603
                                break;
604
                        case 'v':
605
                                sscanf(optarg, "%d", &video_bitrate);
606
                                mandatories++;
607
                                break;
608
                        case 'A':
609
                                audio_codec = strdup(optarg);
610
                                break;
611
                        case 'V':
612
                                video_codec = strdup(optarg);
613
                                break;
614
                        case 's':
615
                                sscanf(optarg, "%dx%d", &dest_width, &dest_height);
616
                                break;
617
                        case 'l':
618
                                live_source = 1;
619
                                break;
620
                        case 'o':
621
                                offset_av = 1;
622
                                break;
623
                        case 't':
624
                                ChunkerStreamerTestMode = 1;
625
                                break;
626
                        case 'p':
627
                                sscanf(optarg, "%d", &pts_anomaly_threshold);
628
                                break;
629
                        case 'q':
630
                                sscanf(optarg, "%d", &newtime_anomaly_threshold);
631
                                break;
632
                        case 'F':
633
                                outside_world_url = strdup(optarg);
634
                                break;
635
                        case 'g':
636
                                sscanf(optarg, "%d", &gop_size);
637
                                break;
638
                        case 'b':
639
                                sscanf(optarg, "%d", &max_b_frames);
640
                                break;
641
                        case 'd':
642
                                sscanf(optarg, "%ld", &delay_audio);
643
                                break;
644
                        case 'x':
645
                                codec_options = strdup(optarg);
646
                                break;
647
                        case 'Q':
648
                                sscanf(optarg, "%d", &qualitylevels);
649
                                if (qualitylevels > QUALITYLEVELS_MAX) {
650
                                        fprintf(stderr,"Too many quality levels: %d (max:%d)\n", qualitylevels, QUALITYLEVELS_MAX);
651
                                        return -1;
652
                                }
653
                                break;
654
                        default:
655
                                print_usage(argc, argv);
656
                                return -1;
657
                }
658
        }
659
        
660
        if(mandatories < 3) 
661
        {
662
                print_usage(argc, argv);
663
                return -1;
664
        }
665

    
666
#ifdef _WIN32
667
        {
668
                WORD wVersionRequested;
669
                WSADATA wsaData;
670
                int err;
671
                wVersionRequested = MAKEWORD(2, 2);
672
                err = WSAStartup(wVersionRequested, &wsaData);
673
                if (err != 0) {
674
                        fprintf(stderr, "WSAStartup failed with error: %d\n", err);
675
                        return -2;
676
                }
677
        }
678
#endif
679

    
680
#ifdef YUV_RECORD_ENABLED
681
        if(ChunkerStreamerTestMode)
682
        {
683
                DELETE_DIR("yuv_data");
684
                CREATE_DIR("yuv_data");
685
                //FILE* pFile=fopen("yuv_data/streamer_out.yuv", "w");
686
                //fclose(pFile);
687
        }
688
#endif
689

    
690
        // read the configuration file
691
        cmeta = chunkerInit();
692
        if (!outside_world_url) {
693
                outside_world_url = strdup(cmeta->outside_world_url);
694
        }
695
        switch(cmeta->strategy)
696
        {
697
                case 1:
698
                        chunkFilled = chunkFilledSizeStrategy;
699
                        break;
700
                default:
701
                        chunkFilled = chunkFilledFramesStrategy;
702
        }
703

    
704
#ifdef TCPIO
705
        static char peer_ip[16];
706
        static int peer_port;
707
        int res = sscanf(outside_world_url, "tcp://%15[0-9.]:%d", peer_ip, &peer_port);
708
        if (res < 2) {
709
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
710
                return -2;
711
        }
712

    
713
        for (i=0; i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
714
                outstream[i].output = initTCPPush(peer_ip, peer_port+i);
715
                if (!outstream[i].output) {
716
                        fprintf(stderr, "Error initializing output module, exiting\n");
717
                        exit(1);
718
                }
719
        }
720
#endif
721

    
722
restart:
723
        if(live_source)
724
                fprintf(stderr, "INIT: Using LIVE SOURCE TimeStamps\n");
725
        if(offset_av)
726
                fprintf(stderr, "INIT: Compensating AV OFFSET in file\n");
727

    
728
        // Register all formats and codecs
729
        av_register_all();
730

    
731
        // Open input file
732
        if (!pFormatCtx) {        // do not reopen if it is already open after a restart
733
                if (av_open_input_file(&pFormatCtx, av_input, NULL, 0, NULL) != 0) {
734
                        fprintf(stderr, "INIT: Couldn't open video file. Exiting.\n");
735
                        exit(-1);
736
                }
737

    
738
                // Retrieve stream information
739
                if(av_find_stream_info(pFormatCtx) < 0) {
740
                        fprintf(stderr, "INIT: Couldn't find stream information. Exiting.\n");
741
                        exit(-1);
742
                }
743

    
744
                // Dump information about file onto standard error
745
                av_dump_format(pFormatCtx, 0, av_input, 0);
746

    
747
                // Find the video and audio stream numbers
748
                for(i=0; i<pFormatCtx->nb_streams; i++) {
749
                        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream<0) {
750
                                videoStream=i;
751
                        }
752
                        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream<0) {
753
                                audioStream=i;
754
                        }
755
                }
756
        }
757

    
758
        if(videoStream==-1 || audioStream==-1) {        // TODO: refine to work with 1 or the other
759
                fprintf(stderr, "INIT: Didn't find audio and video streams. Exiting.\n");
760
                exit(-1);
761
        }
762

    
763
        fprintf(stderr, "INIT: Num streams : %d TBR: %d %d RFRAMERATE:%d %d Duration:%ld\n", pFormatCtx->nb_streams, pFormatCtx->streams[videoStream]->time_base.num, pFormatCtx->streams[videoStream]->time_base.den, pFormatCtx->streams[videoStream]->r_frame_rate.num, pFormatCtx->streams[videoStream]->r_frame_rate.den, (long int)pFormatCtx->streams[videoStream]->duration);
764

    
765
        fprintf(stderr, "INIT: Video stream has id : %d\n",videoStream);
766
        fprintf(stderr, "INIT: Audio stream has id : %d\n",audioStream);
767

    
768

    
769
        // Get a pointer to the codec context for the input video stream
770
        pCodecCtx=pFormatCtx->streams[videoStream]->codec;
771
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
772
        //extract W and H
773
        fprintf(stderr, "INIT: Width:%d Height:%d\n", pCodecCtx->width, pCodecCtx->height);
774

    
775
        // Get a pointer to the codec context for the input audio stream
776
        if(audioStream != -1) {
777
                aCodecCtx=pFormatCtx->streams[audioStream]->codec;
778
                fprintf(stderr, "INIT: AUDIO Codecid: %d channels %d samplerate %d\n", aCodecCtx->codec_id, aCodecCtx->channels, aCodecCtx->sample_rate);
779
        }
780

    
781
        // Figure out size
782
        dest_width = (dest_width > 0) ? dest_width : pCodecCtx->width;
783
        dest_height = (dest_height > 0) ? dest_height : pCodecCtx->height;
784

    
785
        //initialize outstream structures
786
        for (i=0; i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
787
                outstream[i].chunk = (ExternalChunk *)malloc(sizeof(ExternalChunk));
788
                if(!outstream[i].chunk) {
789
                        fprintf(stderr, "INIT: Memory error alloc chunk!!!\n");
790
                        return -1;
791
                }
792
                outstream[i].chunk->data = NULL;
793
                outstream[i].chunk->seq = 0;
794
                dcprintf(DEBUG_CHUNKER, "INIT: chunk video %d\n", outstream[i].chunk->seq);
795
                outstream[i].pCodecCtxEnc = NULL;
796
        }
797
        if (passthrough) outstream[0].pCodecCtxEnc = NULL;
798
        for (i=(passthrough?1:0),j=1,k=1; i < (passthrough?1:0) + qualitylevels; i++) {
799
                outstream[i].pCodecCtxEnc = openVideoEncoder(video_codec, video_bitrate/j, (dest_width/k/2)*2, (dest_height/k/2)*2, pCodecCtx->time_base, codec_options);        // (w/2)*2, since libx264 requires width,height to be even
800
                if (!outstream[i].pCodecCtxEnc) {
801
                        return -1;
802
                }
803
                j*=3;        //reduce bitrate to 1/3
804
                k*=2;        //reduce dimensions to 1/2
805
        }
806
        if (indexchannel) {
807
                outstream[(passthrough?1:0) + qualitylevels].pCodecCtxEnc = openVideoEncoder(video_codec, 50000, 160, 120, pCodecCtx->time_base, codec_options);
808
                if (!outstream[(passthrough?1:0) + qualitylevels].pCodecCtxEnc) {
809
                        return -1;
810
                }
811
        }
812

    
813
        //fprintf(stderr, "INIT: VIDEO timebase OUT:%d %d IN: %d %d\n", outstream[1].pCodecCtxEnc->time_base.num, outstream[1].pCodecCtxEnc->time_base.den, pCodecCtx->time_base.num, pCodecCtx->time_base.den);
814

    
815
        if(pCodec==NULL) {
816
                fprintf(stderr, "INIT: Unsupported IN VIDEO pcodec!\n");
817
                return -1; // Codec not found
818
        }
819
        if(avcodec_open(pCodecCtx, pCodec)<0) {
820
                fprintf(stderr, "INIT: could not open IN VIDEO codec\n");
821
                return -1; // Could not open codec
822
        }
823
        if(audioStream!=-1) {
824
                //setup audio output encoder
825
                aCodecCtxEnc = avcodec_alloc_context();
826
                aCodecCtxEnc->bit_rate = audio_bitrate; //256000
827
                aCodecCtxEnc->sample_fmt = SAMPLE_FMT_S16;
828
                aCodecCtxEnc->sample_rate = aCodecCtx->sample_rate;
829
                aCodecCtxEnc->channels = aCodecCtx->channels;
830
                fprintf(stderr, "INIT: AUDIO bitrate OUT:%d sample_rate:%d channels:%d\n", aCodecCtxEnc->bit_rate, aCodecCtxEnc->sample_rate, aCodecCtxEnc->channels);
831

    
832
                // Find the decoder for the audio stream
833
                aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
834
                aCodecEnc = avcodec_find_encoder_by_name(audio_codec);
835
                if(aCodec==NULL) {
836
                        fprintf(stderr,"INIT: Unsupported acodec!\n");
837
                        return -1;
838
                }
839
                if(aCodecEnc==NULL) {
840
                        fprintf(stderr,"INIT: Unsupported acodecEnc!\n");
841
                        return -1;
842
                }
843
        
844
                if(avcodec_open(aCodecCtx, aCodec)<0) {
845
                        fprintf(stderr, "INIT: could not open IN AUDIO codec\n");
846
                        return -1; // Could not open codec
847
                }
848
                if(avcodec_open(aCodecCtxEnc, aCodecEnc)<0) {
849
                        fprintf(stderr, "INIT: could not open OUT AUDIO codec\n");
850
                        return -1; // Could not open codec
851
                }
852
        }
853
        else {
854
                fprintf(stderr,"INIT: NO AUDIO TRACK IN INPUT FILE\n");
855
        }
856

    
857
        // Allocate audio in and out buffers
858
        samples = (int16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
859
        if(samples == NULL) {
860
                fprintf(stderr, "INIT: Memory error alloc audio samples!!!\n");
861
                return -1;
862
        }
863
        audio_outbuf_size = STREAMER_MAX_AUDIO_BUFFER_SIZE;
864
        audio_outbuf = av_malloc(audio_outbuf_size);
865
        if(audio_outbuf == NULL) {
866
                fprintf(stderr, "INIT: Memory error alloc audio_outbuf!!!\n");
867
                return -1;
868
        }
869

    
870
        // Allocate video in frame and out buffer
871
        pFrame1=avcodec_alloc_frame();
872
        if(pFrame1==NULL) {
873
                fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
874
                return -1;
875
        }
876
        video_outbuf_size = STREAMER_MAX_VIDEO_BUFFER_SIZE;
877
        video_outbuf = av_malloc(video_outbuf_size);
878

    
879
        //allocate Napa-Wine transport
880
        frame = (Frame *)malloc(sizeof(Frame));
881
        if(!frame) {
882
                fprintf(stderr, "INIT: Memory error alloc Frame!!!\n");
883
                return -1;
884
        }
885

    
886
        //create empty first audio chunk
887

    
888
        chunkaudio = (ExternalChunk *)malloc(sizeof(ExternalChunk));
889
        if(!chunkaudio) {
890
                fprintf(stderr, "INIT: Memory error alloc chunkaudio!!!\n");
891
                return -1;
892
        }
893
  chunkaudio->data=NULL;
894
        chunkaudio->seq = 0;
895
        //initChunk(chunkaudio, &seq_current_chunk);
896
        dcprintf(DEBUG_CHUNKER, "INIT: chunk audio %d\n", chunkaudio->seq);
897

    
898
#ifdef HTTPIO
899
        /* initialize the HTTP chunk pusher */
900
        initChunkPusher(); //TRIPLO
901
#endif
902

    
903
        long sleep=0;
904
        struct timeval now_tv;
905
        struct timeval tmp_tv;
906
        long long lateTime = 0;
907
        long long maxAudioInterval = 0;
908
        long long maxVDecodeTime = 0;
909
//        unsigned char lastIFrameDistance = 0;
910

    
911
#ifdef UDPIO
912
        static char peer_ip[16];
913
        static int peer_port;
914
        int res = sscanf(outside_world_url, "udp://%15[0-9.]:%d", peer_ip, &peer_port);
915
        if (res < 2) {
916
                fprintf(stderr,"error parsing output url: %s\n", outside_world_url);
917
                return -2;
918
        }
919
        
920
        initUDPPush(peer_ip, peer_port);
921
#endif
922
        
923
        char videotrace_filename[255];
924
        char psnr_filename[255];
925
        sprintf(videotrace_filename, "yuv_data/videotrace.log");
926
        sprintf(psnr_filename, "yuv_data/psnrtrace.log");
927
        FILE* videotrace = fopen(videotrace_filename, "w");
928
        FILE* psnrtrace = fopen(psnr_filename, "w");
929

    
930
#ifdef USE_AVFILTER
931
        //init AVFilter
932
        avfilter_register_all();
933
        init_filters(avfilter, pCodecCtx);
934
#endif
935

    
936
        //main loop to read from the input file
937
        while((av_read_frame(pFormatCtx, &packet)>=0) && !quit)
938
        {
939
                //detect if a strange number of anomalies is occurring
940
                if(ptsvideo1 < 0 || ptsvideo1 > packet.dts || ptsaudio1 < 0 || ptsaudio1 > packet.dts) {
941
                        pts_anomalies_counter++;
942
                        dctprintf(DEBUG_ANOMALIES, "READLOOP: pts BASE anomaly detected number %d (a:%"PRId64" v:%"PRId64" dts:%"PRId64")\n", pts_anomalies_counter, ptsaudio1, ptsvideo1, packet.dts);
943
                        if(pts_anomaly_threshold >=0 && live_source) { //reset just in case of live source
944
                                if(pts_anomalies_counter > pts_anomaly_threshold) {
945
                                        dctprintf(DEBUG_ANOMALIES, "READLOOP: too many pts BASE anomalies. resetting pts base\n");
946
                                        av_free_packet(&packet);
947
                                        goto close;
948
                                }
949
                        }
950
                }
951

    
952
                //newTime_video and _audio are in usec
953
                //if video and audio stamps differ more than 5sec
954
                if( newTime_video - newTime_audio > 5000000 || newTime_video - newTime_audio < -5000000 ) {
955
                        newtime_anomalies_counter++;
956
                        dctprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME audio video differ anomaly detected number %d (a:%lld, v:%lld)\n", newtime_anomalies_counter, newTime_audio, newTime_video);
957
                }
958

    
959
                if(newtime_anomaly_threshold >=0 && newtime_anomalies_counter > newtime_anomaly_threshold) {
960
                        if(live_source) { //restart just in case of live source
961
                                dctprintf(DEBUG_ANOMALIES, "READLOOP: too many NEGATIVE TIMESTAMPS anomalies. Restarting.\n");
962
                                av_free_packet(&packet);
963
                                goto close;
964
                        }
965
                }
966

    
967
                // Is this a packet from the video stream?
968
                if(packet.stream_index==videoStream)
969
                {
970
                        if(!live_source)
971
                        {
972
                                if(audioStream != -1) { //take this "time bank" method into account only if we have audio track
973
                                        // lateTime < 0 means a positive time account that can be used to decode video frames
974
                                        // if (lateTime + maxVDecodeTime) >= 0 then we may have a negative time account after video transcoding
975
                                        // therefore, it's better to skip the frame
976
                                        if(timebank && (lateTime+maxVDecodeTime) >= 0)
977
                                        {
978
                                                dcprintf(DEBUG_ANOMALIES, "\n\n\t\t************************* SKIPPING VIDEO FRAME %ld ***********************************\n\n", sleep);
979
                                                av_free_packet(&packet);
980
                                                continue;
981
                                        }
982
                                }
983
                        }
984
                        
985
                        gettimeofday(&tmp_tv, NULL);
986
                        
987
                        //decode the video packet into a raw pFrame
988

    
989
                        if(avcodec_decode_video2(pCodecCtx, pFrame1, &frameFinished, &packet)>0)
990
                        {
991
                                AVFrame *pFrame;
992
                                pFrame = pFrame1;
993

    
994
                                // usleep(5000);
995
                                dctprintf(DEBUG_VIDEO_FRAMES, "VIDEOin pkt: dts %"PRId64" pts %"PRId64" pts-dts %"PRId64"\n", packet.dts, packet.pts, packet.pts-packet.dts );
996
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame->pkt_dts, pFrame->pkt_pts, pFrame->pts);
997
                                dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame->pict_type, pFrame->key_frame ? " (key)" : "");
998

    
999
                                if(frameFinished)
1000
                                { // it must be true all the time else error
1001
                                        AVFrame *pFrame2 = NULL;
1002

    
1003
                                        frame->number = ++contFrameVideo;
1004

    
1005

    
1006

    
1007
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: finished frame %d dts %"PRId64" pts %"PRId64"\n", frame->number, packet.dts, packet.pts);
1008
                                        if(frame->number==0) {
1009
                                                if(packet.dts==AV_NOPTS_VALUE)
1010
                                                {
1011
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
1012
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
1013
                                                        av_free_packet(&packet);
1014
                                                        continue;
1015
                                                }
1016
                                                last_pkt_dts = packet.dts;
1017
                                                newTime = 0;
1018
                                        }
1019
                                        else {
1020
                                                if(packet.dts!=AV_NOPTS_VALUE) {
1021
                                                        delta_video = packet.dts-last_pkt_dts;
1022
                                                        last_pkt_dts = packet.dts;
1023
                                                }
1024
                                                else if(delta_video==0)
1025
                                                {
1026
                                                        //a Dts with a noPts value is troublesome case for delta calculation based on Dts
1027
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
1028
                                                        av_free_packet(&packet);
1029
                                                        continue;
1030
                                                }
1031
                                        }
1032
                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: deltavideo : %d\n", (int)delta_video);
1033

    
1034
                                        //set initial timestamp
1035
                                        if(FirstTimeVideo && pFrame->pkt_pts>0) {
1036
                                                if(offset_av) {
1037
                                                        ptsvideo1 = pFrame->pkt_pts;
1038
                                                        FirstTimeVideo = 0;
1039
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: SET PTS BASE OFFSET %"PRId64"\n", ptsvideo1);
1040
                                                } else { //we want to compensate audio and video offset for this source
1041
                                                        //maintain the offset between audio pts and video pts
1042
                                                        //because in case of live source they have the same numbering
1043
                                                        if(ptsaudio1 > 0) //if we have already seen some audio frames...
1044
                                                                ptsvideo1 = ptsaudio1;
1045
                                                        else
1046
                                                                ptsvideo1 = pFrame->pkt_pts;
1047
                                                        FirstTimeVideo = 0;
1048
                                                        dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO LIVE: SET PTS BASE OFFSET %"PRId64"\n", ptsvideo1);
1049
                                                }
1050
                                        }
1051

    
1052
                                        // store timestamp in useconds for next frame sleep
1053
                                        if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
1054
                                                newTime_video = pts2ms(pFrame->pkt_pts - ptsvideo1, pFormatCtx->streams[videoStream]->time_base)*1000;
1055
                                        } else {
1056
                                                newTime_video = pts2ms(pFrame->pkt_dts - ptsvideo1, pFormatCtx->streams[videoStream]->time_base)*1000;        //TODO: a better estimate is needed
1057
                                        }
1058
                                        dcprintf(DEBUG_VIDEO_FRAMES, "Setting v:%lld\n", newTime_video);
1059

    
1060
                                        if(passthrough) {        //copy channel
1061
                                                video_frame_size = packet.size;
1062
                                                if (video_frame_size > video_outbuf_size) {
1063
                                                        fprintf(stderr, "VIDEO: error, outbuf too small, SKIPPING\n");;
1064
                                                        av_free_packet(&packet);
1065
                                                        continue;
1066
                                                } else {
1067
                                                        memcpy(video_outbuf, packet.data, video_frame_size);
1068
                                                }
1069

    
1070
                                                if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
1071
                                                        target_pts = pFrame->pkt_pts;
1072
                                                }else {        //TODO: review this
1073
                                                        target_pts = pFrame->pkt_dts;
1074
                                                }
1075
                                                createFrame(frame, pts2ms(target_pts - ptsvideo1, pFormatCtx->streams[videoStream]->time_base), video_frame_size, 
1076
                                                    pFrame->pict_type);
1077
                                                addFrameToOutstream(&outstream[0], frame, video_outbuf);
1078
                                        }
1079

    
1080
                                        if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
1081
                                                pFrame->pts = av_rescale_q(pFrame->pkt_pts, pFormatCtx->streams[videoStream]->time_base, outstream[(passthrough?1:0)].pCodecCtxEnc->time_base);
1082
                                        } else {        //try to figure out the pts //TODO: review this
1083
                                                if (pFrame->pkt_dts != AV_NOPTS_VALUE) {
1084
                                                        pFrame->pts = av_rescale_q(pFrame->pkt_dts, pFormatCtx->streams[videoStream]->time_base, outstream[(passthrough?1:0)].pCodecCtxEnc->time_base);
1085
                                                }
1086
                                        }
1087

    
1088
                                        pFrame2 = preprocessFrame(pFrame);
1089
                                        if (!pFrame2) {
1090
                                                dcprintf(DEBUG_VIDEO_FRAMES, "no otput from preprocessing Frame\n");
1091
                                                av_free_packet(&packet);
1092
                                                continue;
1093
                                        } else if (pFrame2 == pFrame) {        // handle the case of NOP preprocess
1094
                                                pFrame = pFrame2;
1095
                                                pFrame2 = NULL;
1096
                                        } else {
1097
                                                pFrame = pFrame2;
1098
                                        }
1099

    
1100
                                        for (i=(passthrough?1:0); i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
1101
                                                video_frame_size = transcodeFrame(video_outbuf, video_outbuf_size, &target_pts, pFrame, pFormatCtx->streams[videoStream]->time_base, pCodecCtx, outstream[i].pCodecCtxEnc);
1102
                                                if (video_frame_size <= 0) {
1103
                                                        av_free_packet(&packet);
1104
                                                        contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
1105
                                                        continue;        //TODO: this seems wrong, continuing the internal cycle
1106
                                                }
1107
                                                createFrame(frame, pts2ms(target_pts - ptsvideo1, pFormatCtx->streams[videoStream]->time_base), video_frame_size,
1108
                                                    (unsigned char)outstream[i].pCodecCtxEnc->coded_frame->pict_type);
1109
                                                addFrameToOutstream(&outstream[i], frame, video_outbuf);
1110
                                        }
1111

    
1112

    
1113
                                        //compute how long it took to encode video frame
1114
                                        gettimeofday(&now_tv, NULL);
1115
                                        long long usec = (now_tv.tv_sec-tmp_tv.tv_sec)*1000000;
1116
                                        usec+=(now_tv.tv_usec-tmp_tv.tv_usec);
1117
                                        if(usec > maxVDecodeTime)
1118
                                                maxVDecodeTime = usec;
1119

    
1120
                                        //we DONT have an audio track, so we compute timings and determine
1121
                                        //how much time we have to sleep at next VIDEO frame taking
1122
                                        //also into account how much time was needed to encode the current
1123
                                        //video frame
1124
                                        //all this in case the video source is not live, i.e. not self-timing
1125
                                        //and only in case there is no audio track
1126
                                        if(audioStream == -1) {
1127
                                                if(!live_source) {
1128
                                                        if(newTime_prev != 0) {
1129
                                                                //how much delay between video frames ideally
1130
                                                                long long maxDelay = newTime_video - newTime_prev;
1131
                                                                sleep = (maxDelay - usec);
1132
                                                                dcprintf(DEBUG_ANOMALIES,"\tmaxDelay=%ld\n", ((long)maxDelay));
1133
                                                                dcprintf(DEBUG_ANOMALIES,"\tlast video frame interval=%ld; sleep time=%ld\n", ((long)usec), ((long)sleep));
1134
                                                        }
1135
                                                        else
1136
                                                                sleep = 0;
1137

    
1138
                                                        //update and store counters
1139
                                                        newTime_prev = newTime_video;
1140

    
1141
                                                        //i can also sleep now instead of at the beginning of
1142
                                                        //the next frame because in this case we only have video
1143
                                                        //frames, hence it would immediately be the next thing to do
1144
                                                        if(sleep > 0) {
1145
                                                                dcprintf(DEBUG_TIMESTAMPING,"\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1146
                                                                usleep(sleep);
1147
                                                        }
1148

    
1149
                                                }
1150
                                        }
1151
                                        if(pFrame2) av_free(pFrame2);
1152
                                }
1153
                        }
1154
                } else if(packet.stream_index==audioStream) {
1155
                        if(sleep > 0)
1156
                        {
1157
                                dcprintf(DEBUG_TIMESTAMPING, "\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
1158
                                usleep(sleep);
1159
                        }
1160
                        
1161
                        audio_data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
1162
                        //decode the audio packet into a raw audio source buffer
1163
                        if(avcodec_decode_audio3(aCodecCtx, samples, &audio_data_size, &packet)>0)
1164
                        {
1165
                                dcprintf(DEBUG_AUDIO_FRAMES, "\n-------AUDIO FRAME\n");
1166
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: newTimeaudioSTART : %lf\n", (double)(packet.pts)*av_q2d(pFormatCtx->streams[audioStream]->time_base));
1167
                                if(audio_data_size>0) {
1168
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: datasizeaudio:%d\n", audio_data_size);
1169
                                        /* if a frame has been decoded, output it */
1170
                                        //fwrite(samples, 1, audio_data_size, outfileaudio);
1171
                                }
1172
                                else {
1173
                                        av_free_packet(&packet);
1174
                                        continue;
1175
                                }
1176
        
1177
                                audio_frame_size = avcodec_encode_audio(aCodecCtxEnc, audio_outbuf, audio_data_size, samples);
1178
                                if(audio_frame_size <= 0) {
1179
                                        av_free_packet(&packet);
1180
                                        continue;
1181
                                }
1182
                                
1183
                                frame->number = contFrameAudio;
1184

    
1185
                                if(frame->number==0) {
1186
                                        if(packet.dts==AV_NOPTS_VALUE) {
1187
                                                av_free_packet(&packet);
1188
                                                continue;
1189
                                        }
1190
                                        last_pkt_dts_audio = packet.dts;
1191
                                        newTime = 0;
1192
                                }
1193
                                else {
1194
                                        if(packet.dts!=AV_NOPTS_VALUE) {
1195
                                                delta_audio = packet.dts-last_pkt_dts_audio;
1196
                                                last_pkt_dts_audio = packet.dts;
1197
                                        }
1198
                                        else if(delta_audio==0) {
1199
                                                av_free_packet(&packet);
1200
                                                continue;
1201
                                        }
1202
                                }
1203
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: original codec frame number %d vs. encoded %d vs. packed %d\n", aCodecCtx->frame_number, aCodecCtxEnc->frame_number, frame->number);
1204
                                //use pts if dts is invalid
1205
                                if(packet.dts!=AV_NOPTS_VALUE)
1206
                                        target_pts = packet.dts;
1207
                                else if(packet.pts!=AV_NOPTS_VALUE) {
1208
                                        target_pts = packet.pts;
1209
                                } else  {
1210
                                        av_free_packet(&packet);
1211
                                        continue;
1212
                                }
1213

    
1214
                                if(offset_av)
1215
                                {
1216
                                        if(FirstTimeAudio && packet.dts>0) {
1217
                                                ptsaudio1 = packet.dts;
1218
                                                FirstTimeAudio = 0;
1219
                                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: SET PTS BASE OFFSET %"PRId64"\n", ptsaudio1);
1220
                                        }
1221
                                }
1222
                                else //we want to compensate audio and video offset for this source
1223
                                {
1224
                                        if(FirstTimeAudio && packet.dts>0) {
1225
                                                //maintain the offset between audio pts and video pts
1226
                                                //because in case of live source they have the same numbering
1227
                                                if(ptsvideo1 > 0) //if we have already seen some video frames...
1228
                                                        ptsaudio1 = ptsvideo1;
1229
                                                else
1230
                                                        ptsaudio1 = packet.dts;
1231
                                                FirstTimeAudio = 0;
1232
                                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO LIVE: SET PTS BASE OFFSET %"PRId64"\n", ptsaudio1);
1233
                                        }
1234
                                }
1235
                                //compute the new audio timestamps in milliseconds
1236
                                if(frame->number>0) {
1237
                                        newTime = ((target_pts-ptsaudio1)*1000.0*((double)av_q2d(pFormatCtx->streams[audioStream]->time_base)));//*(double)delta_audio;
1238
                                        // store timestamp in useconds for next frame sleep
1239
                                        newTime_audio = newTime*1000;
1240
                                }
1241
                                dcprintf(DEBUG_TIMESTAMPING, "AUDIO: NEWTIMESTAMP %lld\n", newTime);
1242
                                if(newTime<0) {
1243
                                        dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: SKIPPING FRAME\n");
1244
                                        newtime_anomalies_counter++;
1245
                                        dctprintf(DEBUG_ANOMALIES, "READLOOP: NEWTIME negative audio timestamp anomaly detected number %d (a:%lld)\n", newtime_anomalies_counter, newTime*1000);
1246
                                        av_free_packet(&packet);
1247
                                        continue; //SKIP THIS FRAME, bad timestamp
1248
                                }
1249

    
1250
                                frame->timestamp.tv_sec = (unsigned int)(newTime + delay_audio)/1000;
1251
                                frame->timestamp.tv_usec = (newTime + delay_audio)%1000;
1252
                                frame->size = audio_frame_size;
1253
                                frame->type = 5; // 5 is audio type
1254
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: pts %"PRId64" duration %d timebase %d %d dts %"PRId64"\n", packet.pts, packet.duration, pFormatCtx->streams[audioStream]->time_base.num, pFormatCtx->streams[audioStream]->time_base.den, packet.dts);
1255
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: timestamp sec:%ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
1256
                                dcprintf(DEBUG_AUDIO_FRAMES, "AUDIO: deltaaudio %"PRId64"\n", delta_audio);        
1257
                                contFrameAudio++;
1258

    
1259
                                if(update_chunk(chunkaudio, frame, audio_outbuf) == -1) {
1260
                                        fprintf(stderr, "AUDIO: unable to update chunk %d. Exiting.\n", chunkaudio->seq);
1261
                                        exit(-1);
1262
                                }
1263
                                //set priority
1264
                                chunkaudio->priority = 1;
1265

    
1266
                                if(chunkFilled(chunkaudio, AUDIO_CHUNK)) {
1267
                                        // is chunk filled using current strategy?
1268
                                        //SAVE ON FILE
1269
                                        //saveChunkOnFile(chunkaudio);
1270
                                        //Send the chunk to an external transport/player
1271
                                        for (i=0; i < (passthrough?1:0) + qualitylevels; i++) {        //do not send audio to the index channel
1272
                                                sendChunk(outstream[i].output, chunkaudio);
1273
                                        }
1274
                                        dctprintf(DEBUG_CHUNKER, "AUDIO: just sent chunk audio %d\n", chunkaudio->seq);
1275
                                        chunkaudio->seq = 0; //signal that we need an increase
1276
                                        //initChunk(chunkaudio, &seq_current_chunk);
1277
                                }
1278

    
1279
                                //we have an audio track, so we compute timings and determine
1280
                                //how much time we have to sleep at next audio frame taking
1281
                                //also into account how much time was needed to encode the
1282
                                //video frames
1283
                                //all this in case the video source is not live, i.e. not self-timing
1284
                                if(!live_source)
1285
                                {
1286
                                        if(newTime_prev != 0)
1287
                                        {
1288
                                                long long maxDelay = newTime_audio - newTime_prev;
1289

    
1290
                                                gettimeofday(&now_tv, NULL);
1291
                                                long long usec = (now_tv.tv_sec-lastAudioSent.tv_sec)*1000000;
1292
                                                usec+=(now_tv.tv_usec-lastAudioSent.tv_usec);
1293

    
1294
                                                if(usec > maxAudioInterval)
1295
                                                        maxAudioInterval = usec;
1296

    
1297
                                                lateTime -= (maxDelay - usec);
1298
                                                dcprintf(DEBUG_TIMESTAMPING,"\tmaxDelay=%ld, maxAudioInterval=%ld\n", ((long)maxDelay), ((long) maxAudioInterval));
1299
                                                dcprintf(DEBUG_TIMESTAMPING,"\tlast audio frame interval=%ld; lateTime=%ld\n", ((long)usec), ((long)lateTime));
1300

    
1301
                                                if((lateTime+maxAudioInterval) < 0)
1302
                                                        sleep = (lateTime+maxAudioInterval)*-1;
1303
                                                else
1304
                                                        sleep = 0;
1305
                                        }
1306
                                        else
1307
                                                sleep = 0;
1308

    
1309
                                        newTime_prev = newTime_audio;
1310
                                        gettimeofday(&lastAudioSent, NULL);
1311
                                }
1312
                        }
1313
                }
1314
                dcprintf(DEBUG_CHUNKER,"Free the packet that was allocated by av_read_frame\n");
1315
                av_free_packet(&packet);
1316
        }
1317
        
1318
        if(videotrace)
1319
                fclose(videotrace);
1320
        if(psnrtrace)
1321
                fclose(psnrtrace);
1322

    
1323
close:
1324
        for (i=0; i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
1325
                if(outstream[i].chunk->seq != 0 && outstream[i].chunk->frames_num>0) {
1326
                        sendChunk(outstream[i].output, outstream[0].chunk);
1327
                        dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST VIDEO CHUNK\n");
1328
                        outstream[i].chunk->seq = 0; //signal that we need an increase just in case we will restart
1329
                }
1330
        }
1331
        for (i=0; i < (passthrough?1:0) + qualitylevels; i++) {
1332
                if(chunkaudio->seq != 0 && chunkaudio->frames_num>0) {
1333
                        sendChunk(outstream[i].output, chunkaudio);
1334
                        dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST AUDIO CHUNK\n");
1335
                }
1336
        }
1337
        chunkaudio->seq = 0; //signal that we need an increase just in case we will restart
1338

    
1339
#ifdef HTTPIO
1340
        /* finalize the HTTP chunk pusher */
1341
        finalizeChunkPusher();
1342
#endif
1343

    
1344
        for (i=0; i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
1345
                free(outstream[i].chunk);
1346
        }
1347
        free(chunkaudio);
1348
        free(frame);
1349
        av_free(video_outbuf);
1350
        av_free(audio_outbuf);
1351

    
1352
        // Free the YUV frame
1353
        av_free(pFrame1);
1354
        av_free(samples);
1355
  
1356
        // Close the codec
1357
        avcodec_close(pCodecCtx);
1358
        for (i=(passthrough?1:0); i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
1359
                avcodec_close(outstream[i].pCodecCtxEnc);
1360
        }
1361
#ifdef USE_AVFILTER
1362
        close_filters();
1363
#endif
1364

    
1365
        if(audioStream!=-1) {
1366
                avcodec_close(aCodecCtx);
1367
                avcodec_close(aCodecCtxEnc);
1368
        }
1369
  
1370
        // Close the video file
1371
        if (strcmp(av_input, "/dev/stdin") != 0) {        //TODO: implement better check for stdin
1372
                av_close_input_file(pFormatCtx);
1373
                pFormatCtx = NULL;
1374
        }
1375

    
1376
        if(LOOP_MODE) {
1377
                //we want video to continue, but the av_read_frame stopped
1378
                //lets wait a 5 secs, and cycle in again
1379
                usleep(5000000);
1380
                dprintf("CHUNKER: WAITING 5 secs FOR LIVE SOURCE TO SKIP ERRORS AND RESTARTING\n");
1381
                //videoStream = -1;        //we assume this remains the same (also needed when set explicitly)
1382
                //audioStream = -1;        //we assume this remains the same (also needed when set explicitly)
1383
                FirstTimeAudio=1;
1384
                FirstTimeVideo=1;
1385
                pts_anomalies_counter=0;
1386
                newtime_anomalies_counter=0;
1387
                newTime=0;
1388
                newTime_audio=0;
1389
                newTime_prev=0;
1390
                ptsvideo1=0;
1391
                ptsaudio1=0;
1392
                last_pkt_dts=0;
1393
                delta_video=0;
1394
                delta_audio=0;
1395
                last_pkt_dts_audio=0;
1396
                target_pts=0;
1397
                i=0;
1398
                //~ contFrameVideo = 0;
1399
                //~ contFrameAudio = 1;
1400
                
1401
#ifdef YUV_RECORD_ENABLED
1402
                if(ChunkerStreamerTestMode)
1403
                {
1404
                        video_record_count++;
1405
                        //~ savedVideoFrames = 0;
1406
                        
1407
                        //~ char tmp_filename[255];
1408
                        //~ sprintf(tmp_filename, "yuv_data/out_%d.yuv", video_record_count);
1409
                        //~ FILE *pFile=fopen(tmp_filename, "w");
1410
                        //~ if(pFile!=NULL)
1411
                                //~ fclose(pFile);
1412
                }
1413
#endif
1414

    
1415
                goto restart;
1416
        }
1417

    
1418
        free(cmeta);
1419

    
1420
#ifdef TCPIO
1421
        for (i=0; i < (passthrough?1:0) + qualitylevels + (indexchannel?1:0); i++) {
1422
                finalizeTCPChunkPusher(outstream[i].output);
1423
        }
1424
#endif
1425

    
1426

    
1427
        return 0;
1428
}
1429

    
1430
int update_chunk(ExternalChunk *chunk, Frame *frame, uint8_t *outbuf) {
1431
        //the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
1432
        static int sizeFrameHeader = 5*sizeof(int32_t);
1433

    
1434
        //moving temp pointer to encode Frame on the wire
1435
        uint8_t *tempdata = NULL;
1436

    
1437
        if(chunk->seq == 0) {
1438
                initChunk(chunk, &seq_current_chunk);
1439
        }
1440
        //add frame priority to chunk priority (to be normalized later on)
1441
        chunk->priority += frame->type + 1; // I:2, P:3, B:4
1442

    
1443
        //HINT on malloc
1444
        chunk->data = (uint8_t *)realloc(chunk->data, sizeof(uint8_t)*(chunk->payload_len + frame->size + sizeFrameHeader));
1445
        if(!chunk->data)  {
1446
                fprintf(stderr, "Memory error in chunk!!!\n");
1447
                return -1;
1448
        }
1449
        chunk->frames_num++; // number of frames in the current chunk
1450

    
1451
/*
1452
        //package the Frame header
1453
        tempdata = chunk->data+chunk->payload_len;
1454
        *((int32_t *)tempdata) = frame->number;
1455
        tempdata+=sizeof(int32_t);
1456
        *((struct timeval *)tempdata) = frame->timestamp;
1457
        tempdata+=sizeof(struct timeval);
1458
        *((int32_t *)tempdata) = frame->size;
1459
        tempdata+=sizeof(int32_t);
1460
        *((int32_t *)tempdata) = frame->type;
1461
        tempdata+=sizeof(int32_t);
1462
*/
1463
        //package the Frame header: network order and platform independent
1464
        tempdata = chunk->data+chunk->payload_len;
1465
        bit32_encoded_push(frame->number, tempdata);
1466
        bit32_encoded_push(frame->timestamp.tv_sec, tempdata + CHUNK_TRANSCODING_INT_SIZE);
1467
        bit32_encoded_push(frame->timestamp.tv_usec, tempdata + CHUNK_TRANSCODING_INT_SIZE*2);
1468
        bit32_encoded_push(frame->size, tempdata + CHUNK_TRANSCODING_INT_SIZE*3);
1469
        bit32_encoded_push(frame->type, tempdata + CHUNK_TRANSCODING_INT_SIZE*4);
1470

    
1471
         //insert the new frame data
1472
        memcpy(chunk->data + chunk->payload_len + sizeFrameHeader, outbuf, frame->size);
1473
        chunk->payload_len += frame->size + sizeFrameHeader; // update payload length
1474
        //chunk lenght is updated just prior to pushing it out because
1475
        //the chunk header len is better calculated there
1476
        //chunk->len = sizeChunkHeader + chunk->payload_len; // update overall length
1477

    
1478
        //update timestamps
1479
        if(((int)frame->timestamp.tv_sec < (int)chunk->start_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->start_time.tv_sec && (int)frame->timestamp.tv_usec < (int)chunk->start_time.tv_usec) || (int)chunk->start_time.tv_sec==-1) {
1480
                                                chunk->start_time.tv_sec = frame->timestamp.tv_sec;
1481
                                                chunk->start_time.tv_usec = frame->timestamp.tv_usec;
1482
        }
1483
        
1484
        if(((int)frame->timestamp.tv_sec > (int)chunk->end_time.tv_sec) || ((int)frame->timestamp.tv_sec==(int)chunk->end_time.tv_sec && (int)frame->timestamp.tv_usec > (int)chunk->end_time.tv_usec) || (int)chunk->end_time.tv_sec==-1) {
1485
                                                chunk->end_time.tv_sec = frame->timestamp.tv_sec;
1486
                                                chunk->end_time.tv_usec = frame->timestamp.tv_usec;
1487
        }
1488
        return 0;
1489
}
1490

    
1491
void SaveFrame(AVFrame *pFrame, int width, int height)
1492
{
1493
        FILE *pFile;
1494
        int  y;
1495

    
1496
         // Open file
1497
        char tmp_filename[255];
1498
        sprintf(tmp_filename, "yuv_data/streamer_out.yuv");
1499
        pFile=fopen(tmp_filename, "ab");
1500
        if(pFile==NULL)
1501
                return;
1502

    
1503
        // Write header
1504
        //fprintf(pFile, "P5\n%d %d\n255\n", width, height);
1505
  
1506
        // Write Y data
1507
        for(y=0; y<height; y++)
1508
                  if(fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile) != width)
1509
                {
1510
                        printf("errno = %d\n", errno);
1511
                        exit(1);
1512
                }
1513
        // Write U data
1514
        for(y=0; y<height/2; y++)
1515
                  if(fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile) != width/2)
1516
                  {
1517
                        printf("errno = %d\n", errno);
1518
                        exit(1);
1519
                }
1520
        // Write V data
1521
        for(y=0; y<height/2; y++)
1522
                  if(fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile) != width/2)
1523
                  {
1524
                        printf("errno = %d\n", errno);
1525
                        exit(1);
1526
                }
1527
  
1528
        // Close file
1529
        fclose(pFile);
1530
}
1531

    
1532
void SaveEncodedFrame(Frame* frame, uint8_t *video_outbuf)
1533
{
1534
        static FILE* pFile = NULL;
1535
        
1536
        pFile=fopen("yuv_data/streamer_out.mpeg4", "ab");
1537
        fwrite(frame, sizeof(Frame), 1, pFile);
1538
        fwrite(video_outbuf, frame->size, 1, pFile);
1539
        fclose(pFile);
1540
}