Revision daaa68ea chunker_streamer/chunker_streamer.c

View differences:

chunker_streamer/chunker_streamer.c
20 20

  
21 21
#include "chunk_pusher.h"
22 22

  
23
static struct output* output;
23
struct outstream {
24
	struct output *output;
25
	ExternalChunk *chunk;
26
};
27
struct outstream outstream;
24 28

  
25 29
#define DEBUG
26 30
#define DEBUG_AUDIO_FRAMES  false
......
162 166
#endif
163 167
}
164 168

  
169

  
170
int transcodeFrame(uint8_t *video_outbuf, int video_outbuf_size, int64_t *target_pts, AVFrame *pFrame, AVRational time_base, AVCodecContext *pCodecCtx, AVCodecContext *pCodecCtxEnc)
171
{
172
	int video_frame_size = 0;
173
	AVFrame *pFrame2 = NULL;
174
	AVFrame *scaledFrame = NULL;
175
	pFrame2=avcodec_alloc_frame();
176
	scaledFrame=avcodec_alloc_frame();
177
	if(pFrame2==NULL || scaledFrame==NULL) {
178
		fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
179
		if(pFrame2) av_free(pFrame2);
180
		if(scaledFrame) av_free(scaledFrame);
181
		return -1;
182
	}
183
	int scaledFrame_buf_size = avpicture_get_size( PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
184
	uint8_t* scaledFrame_buffer = (uint8_t *) av_malloc( scaledFrame_buf_size * sizeof( uint8_t ) );
185
	avpicture_fill( (AVPicture*) scaledFrame, scaledFrame_buffer, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height);
186
	if(!video_outbuf || !scaledFrame_buffer) {
187
		fprintf(stderr, "INIT: Memory error alloc video_outbuf!!!\n");
188
		return -1;
189
	}
190

  
191
					    if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
192
						pFrame->pts = av_rescale_q(pFrame->pkt_pts, time_base, pCodecCtxEnc->time_base);
193
					    } else {	//try to figure out the pts //TODO: review this
194
						if (pFrame->pkt_dts != AV_NOPTS_VALUE) {
195
							pFrame->pts = av_rescale_q(pFrame->pkt_dts, time_base, pCodecCtxEnc->time_base);
196
						}
197
					    }
198

  
199
#ifdef VIDEO_DEINTERLACE
200
					    avpicture_deinterlace(
201
							(AVPicture*) pFrame,
202
							(const AVPicture*) pFrame,
203
							pCodecCtxEnc->pix_fmt,
204
							pCodecCtxEnc->width,
205
							pCodecCtxEnc->height);
206
#endif
207

  
208
#ifdef USE_AVFILTER
209
					    //apply avfilters
210
					    filter(pFrame,pFrame2);
211
					    pFrame = pFrame2;
212
					    dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
213
					    dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
214
#endif
215

  
216
					    if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
217
//						static AVPicture pict;
218
						static struct SwsContext *img_convert_ctx = NULL;
219

  
220
						pFrame->pict_type = 0;
221
						if(img_convert_ctx == NULL)
222
						{
223
							img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, pCodecCtxEnc->width, pCodecCtxEnc->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
224
							if(img_convert_ctx == NULL) {
225
								fprintf(stderr, "Cannot initialize the conversion context!\n");
226
								exit(1);
227
							}
228
						}
229
						sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, scaledFrame->data, scaledFrame->linesize);
230
						scaledFrame->pts = pFrame->pts;
231
						scaledFrame->pict_type = 0;
232
						video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, scaledFrame);
233
					    } else {
234
						pFrame->pict_type = 0;
235
						video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, pFrame);
236
					    }
237

  
238
					    //use pts if dts is invalid
239
					    if(pCodecCtxEnc->coded_frame->pts!=AV_NOPTS_VALUE)
240
						*target_pts = av_rescale_q(pCodecCtxEnc->coded_frame->pts, pCodecCtxEnc->time_base, time_base);
241
					    else {	//TODO: review this
242
						if(pFrame2) av_free(pFrame2);
243
						if(scaledFrame) av_free(scaledFrame);
244
						if(scaledFrame_buffer) av_free(scaledFrame_buffer);
245
						return -1;
246
					    }
247

  
248
					if(video_frame_size > 0) {
249
					    if(pCodecCtxEnc->coded_frame) {
250
						dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pCodecCtxEnc->coded_frame->pkt_dts, pCodecCtxEnc->coded_frame->pkt_pts, pCodecCtxEnc->coded_frame->pts);
251
						dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: outtype: %d%s\n", pCodecCtxEnc->coded_frame->pict_type, pCodecCtxEnc->coded_frame->key_frame ? " (key)" : "");
252
					    }
253
#ifdef DISPLAY_PSNR
254
					    static double ist_psnr = 0;
255
					    static double cum_psnr = 0;
256
					    static int psnr_samples = 0;
257
					    if(pCodecCtxEnc->coded_frame) {
258
						if(pCodecCtxEnc->flags&CODEC_FLAG_PSNR) {
259
							ist_psnr = GET_PSNR(pCodecCtxEnc->coded_frame->error[0]/(pCodecCtxEnc->width*pCodecCtxEnc->height*255.0*255.0));
260
							psnr_samples++;
261
							cum_psnr += ist_psnr;
262
							fprintf(stderr, "PSNR: ist %.4f avg: %.4f\n", ist_psnr, cum_psnr / (double)psnr_samples);
263
						}
264
					    }
265
#endif
266
					}
267

  
268
	if(pFrame2) av_free(pFrame2);
269
	if(scaledFrame) av_free(scaledFrame);
270
	if(scaledFrame_buffer) av_free(scaledFrame_buffer);
271
	return video_frame_size;
272
}
273

  
274

  
275
void createFrame(struct Frame *frame, long long newTime, int video_frame_size, int pict_type)
276
{
277

  
278
					frame->timestamp.tv_sec = (long long)newTime/1000;
279
					frame->timestamp.tv_usec = newTime%1000;
280
					frame->size = video_frame_size;
281
					/* pict_type maybe 1 (I), 2 (P), 3 (B), 5 (AUDIO)*/
282
					frame->type = pict_type;
283

  
284

  
285
/* should be on some other place
286
//					if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: original codec frame number %d vs. encoded %d vs. packed %d\n", pCodecCtx->frame_number, pCodecCtxEnc->frame_number, frame->number);
287
//					if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: duration %d timebase %d %d container timebase %d\n", (int)packet.duration, pCodecCtxEnc->time_base.den, pCodecCtxEnc->time_base.num, pCodecCtx->time_base.den);
288

  
289
#ifdef YUV_RECORD_ENABLED
290
					if(!vcopy && ChunkerStreamerTestMode)
291
					{
292
						if(videotrace)
293
							fprintf(videotrace, "%d %d %d\n", frame->number, pict_type, frame->size);
294

  
295
						SaveFrame(pFrame, dest_width, dest_height);
296

  
297
						++savedVideoFrames;
298
						SaveEncodedFrame(frame, video_outbuf);
299

  
300
						if(!firstSavedVideoFrame)
301
							firstSavedVideoFrame = frame->number;
302

  
303
						char tmp_filename[255];
304
						sprintf(tmp_filename, "yuv_data/streamer_out_context.txt");
305
						FILE* tmp = fopen(tmp_filename, "w");
306
						if(tmp)
307
						{
308
							fprintf(tmp, "width = %d\nheight = %d\ntotal_frames_saved = %d\ntotal_frames_decoded = %d\nfirst_frame_number = %ld\nlast_frame_number = %d\n"
309
								,dest_width, dest_height
310
								,savedVideoFrames, savedVideoFrames, firstSavedVideoFrame, frame->number);
311
							fclose(tmp);
312
						}
313
					}
314
#endif
315
*/
316

  
317
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: encapsulated frame size:%d type:%d\n", frame->size, frame->type);
318
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: timestamped sec %ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
319
}
320

  
321

  
322
void addFrameToOutstream(struct outstream *os, Frame *frame, uint8_t *video_outbuf)
323
{
324

  
325
	ExternalChunk *chunk = os->chunk;
326
	struct output *output = os->output;
327

  
328
					if(update_chunk(chunk, frame, video_outbuf) == -1) {
329
						fprintf(stderr, "VIDEO: unable to update chunk %d. Exiting.\n", chunk->seq);
330
						exit(-1);
331
					}
332

  
333
					if(chunkFilled(chunk, VIDEO_CHUNK)) { // is chunk filled using current strategy?
334
						//calculate priority
335
						chunk->priority /= chunk->frames_num;
336

  
337
						//SAVE ON FILE
338
						//saveChunkOnFile(chunk);
339
						//Send the chunk to an external transport/player
340
						sendChunk(output, chunk);
341
						dctprintf(DEBUG_CHUNKER, "VIDEO: sent chunk video %d, prio:%f, size %d\n", chunk->seq, chunk->priority, chunk->len);
342
						chunk->seq = 0; //signal that we need an increase
343
						//initChunk(chunk, &seq_current_chunk);
344
					}
345
}
346

  
165 347
int main(int argc, char *argv[]) {
166 348
	signal(SIGINT, sigproc);
167 349
	
......
197 379
	int16_t *samples = NULL;
198 380
	//a raw uncompressed video picture
199 381
	AVFrame *pFrame1 = NULL;
200
	AVFrame *pFrame2 = NULL;
201
	AVFrame *scaledFrame = NULL;
202 382

  
203 383
	AVFormatContext *pFormatCtx;
204 384
	AVCodecContext  *pCodecCtx = NULL ,*pCodecCtxEnc = NULL ,*aCodecCtxEnc = NULL ,*aCodecCtx = NULL;
......
217 397

  
218 398
	//Napa-Wine specific Frame and Chunk structures for transport
219 399
	Frame *frame = NULL;
220
	ExternalChunk *chunk = NULL;
221 400
	ExternalChunk *chunkaudio = NULL;
222 401
	
223 402
	char av_input[1024];
......
589 768

  
590 769
	// Allocate video in frame and out buffer
591 770
	pFrame1=avcodec_alloc_frame();
592
	pFrame2=avcodec_alloc_frame();
593
	scaledFrame=avcodec_alloc_frame();
594
	if(pFrame1==NULL || pFrame2==NULL || scaledFrame == NULL) {
771
	if(pFrame1==NULL) {
595 772
		fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
596 773
		return -1;
597 774
	}
598 775
	video_outbuf_size = STREAMER_MAX_VIDEO_BUFFER_SIZE;
599 776
	video_outbuf = av_malloc(video_outbuf_size);
600
	int scaledFrame_buf_size = avpicture_get_size( PIX_FMT_YUV420P, dest_width, dest_height);
601
	uint8_t* scaledFrame_buffer = (uint8_t *) av_malloc( scaledFrame_buf_size * sizeof( uint8_t ) );
602
	avpicture_fill( (AVPicture*) scaledFrame, scaledFrame_buffer, PIX_FMT_YUV420P, dest_width, dest_height);
603
	if(!video_outbuf || !scaledFrame_buffer) {
604
		fprintf(stderr, "INIT: Memory error alloc video_outbuf!!!\n");
605
		return -1;
606
	}
607 777

  
608 778
	//allocate Napa-Wine transport
609 779
	frame = (Frame *)malloc(sizeof(Frame));
......
611 781
		fprintf(stderr, "INIT: Memory error alloc Frame!!!\n");
612 782
		return -1;
613 783
	}
784

  
614 785
	//create an empty first video chunk
615
	chunk = (ExternalChunk *)malloc(sizeof(ExternalChunk));
616
	if(!chunk) {
786
	outstream.chunk = (ExternalChunk *)malloc(sizeof(ExternalChunk));
787
	if(!outstream.chunk) {
617 788
		fprintf(stderr, "INIT: Memory error alloc chunk!!!\n");
618 789
		return -1;
619 790
	}
620
	chunk->data = NULL;
621
	chunk->seq = 0;
622
	//initChunk(chunk, &seq_current_chunk); if i init them now i get out of sequence
623
	dcprintf(DEBUG_CHUNKER, "INIT: chunk video %d\n", chunk->seq);
791
	outstream.chunk->data = NULL;
792
	outstream.chunk->seq = 0;
793
	dcprintf(DEBUG_CHUNKER, "INIT: chunk video %d\n", outstream.chunk->seq);
624 794
	//create empty first audio chunk
795

  
625 796
	chunkaudio = (ExternalChunk *)malloc(sizeof(ExternalChunk));
626 797
	if(!chunkaudio) {
627 798
		fprintf(stderr, "INIT: Memory error alloc chunkaudio!!!\n");
......
654 825
		return -2;
655 826
	}
656 827
	
657
	output = initTCPPush(peer_ip, peer_port);
658
	if (!output) {
828
	outstream.output = initTCPPush(peer_ip, peer_port);
829
	if (!outstream.output) {
659 830
		fprintf(stderr, "Error initializing output module, exiting\n");
660 831
		exit(1);
661 832
	}
......
796 967
							target_pts = pFrame->pkt_dts;
797 968
						}
798 969
					} else {
799

  
800
					    if (pFrame->pkt_pts != AV_NOPTS_VALUE) {
801
						pFrame->pts = av_rescale_q(pFrame->pkt_pts, pFormatCtx->streams[videoStream]->time_base, pCodecCtxEnc->time_base);
802
					    } else {	//try to figure out the pts //TODO: review this
803
						if (pFrame->pkt_dts != AV_NOPTS_VALUE) {
804
							pFrame->pts = av_rescale_q(pFrame->pkt_dts, pFormatCtx->streams[videoStream]->time_base, pCodecCtxEnc->time_base);
805
						}
806
					    }
807

  
808
#ifdef VIDEO_DEINTERLACE
809
					    avpicture_deinterlace(
810
							(AVPicture*) pFrame,
811
							(const AVPicture*) pFrame,
812
							pCodecCtxEnc->pix_fmt,
813
							pCodecCtxEnc->width,
814
							pCodecCtxEnc->height);
815
				}
816
#endif
817

  
818
#ifdef USE_AVFILTER
819
					    //apply avfilters
820
					    filter(pFrame,pFrame2);
821
					    pFrame = pFrame2;
822
					    dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
823
					    dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
824
#endif
825

  
826
					    if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
827
//						static AVPicture pict;
828
						static struct SwsContext *img_convert_ctx = NULL;
829
						
830
						pFrame->pict_type = 0;
831
						if(img_convert_ctx == NULL)
832
						{
833
							img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, dest_width, dest_height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
834
							if(img_convert_ctx == NULL) {
835
								fprintf(stderr, "Cannot initialize the conversion context!\n");
836
								exit(1);
837
							}
838
						}
839
						sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, scaledFrame->data, scaledFrame->linesize);
840
						scaledFrame->pts = pFrame->pts;
841
						scaledFrame->pict_type = 0;
842
						video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, scaledFrame);
843
					    } else {
844
						pFrame->pict_type = 0;
845
						video_frame_size = avcodec_encode_video(pCodecCtxEnc, video_outbuf, video_outbuf_size, pFrame);
846
					    }
847

  
848
					    //use pts if dts is invalid
849
					    if(pCodecCtxEnc->coded_frame->pts!=AV_NOPTS_VALUE)
850
						target_pts = av_rescale_q(pCodecCtxEnc->coded_frame->pts, pCodecCtxEnc->time_base, pFormatCtx->streams[videoStream]->time_base);
851
					    else {	//TODO: review this
852
						av_free_packet(&packet);
853
						continue;
854
						//fprintf(stderr, "VIDEOout: pts error\n");
855
						//exit(1);
856
					    }
857
					}
858

  
859
					if(video_frame_size <= 0)
860
					{
861
						contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
862
						av_free_packet(&packet);
863
						continue;
864
					}
865

  
866
					if(!vcopy && pCodecCtxEnc->coded_frame) {
867
						dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: pkt_dts %"PRId64" pkt_pts %"PRId64" frame.pts %"PRId64"\n", pCodecCtxEnc->coded_frame->pkt_dts, pCodecCtxEnc->coded_frame->pkt_pts, pCodecCtxEnc->coded_frame->pts);
868
						dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOout: outtype: %d%s\n", pCodecCtxEnc->coded_frame->pict_type, pCodecCtxEnc->coded_frame->key_frame ? " (key)" : "");
869
					}
870
#ifdef DISPLAY_PSNR
871
					static double ist_psnr = 0;
872
					static double cum_psnr = 0;
873
					static int psnr_samples = 0;
874
					if(!vcopy && pCodecCtxEnc->coded_frame) {
875
						if(pCodecCtxEnc->flags&CODEC_FLAG_PSNR) {
876
							ist_psnr = GET_PSNR(pCodecCtxEnc->coded_frame->error[0]/(pCodecCtxEnc->width*pCodecCtxEnc->height*255.0*255.0));
877
							psnr_samples++;
878
							cum_psnr += ist_psnr;
879
							fprintf(stderr, "PSNR: ist %.4f avg: %.4f\n", ist_psnr, cum_psnr / (double)psnr_samples);
970
						video_frame_size = transcodeFrame(video_outbuf, video_outbuf_size, &target_pts, pFrame, pFormatCtx->streams[videoStream]->time_base, pCodecCtx, pCodecCtxEnc);
971
						if (video_frame_size <= 0) {
972
							av_free_packet(&packet);
973
							contFrameVideo = STREAMER_MAX(contFrameVideo-1, 0);
974
							continue;
880 975
						}
881 976
					}
882
#endif
883 977

  
884 978
					if(offset_av)
885 979
					{
......
917 1011
						av_free_packet(&packet);
918 1012
						continue; //SKIP THIS FRAME, bad timestamp
919 1013
					}
920
					
921
					//~ printf("pCodecCtxEnc->error[0]=%"PRId64"\n", pFrame->error[0]);
922
	
923
					frame->timestamp.tv_sec = (long long)newTime/1000;
924
					frame->timestamp.tv_usec = newTime%1000;
925
					frame->size = video_frame_size;
926
					/* pict_type maybe 1 (I), 2 (P), 3 (B), 5 (AUDIO)*/
927
					frame->type = vcopy ? pFrame->pict_type : (unsigned char)pCodecCtxEnc->coded_frame->pict_type;
928

  
929
					if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: original codec frame number %d vs. encoded %d vs. packed %d\n", pCodecCtx->frame_number, pCodecCtxEnc->frame_number, frame->number);
930
					if (!vcopy) dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: duration %d timebase %d %d container timebase %d\n", (int)packet.duration, pCodecCtxEnc->time_base.den, pCodecCtxEnc->time_base.num, pCodecCtx->time_base.den);
931

  
932
#ifdef YUV_RECORD_ENABLED
933
					if(!vcopy && ChunkerStreamerTestMode)
934
					{
935
						if(videotrace)
936
							fprintf(videotrace, "%d %d %d\n", frame->number, pFrame->pict_type, frame->size);
937

  
938
						if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width)
939
							SaveFrame(scaledFrame, dest_width, dest_height);
940
						else
941
							SaveFrame(pFrame, dest_width, dest_height);
942

  
943
						++savedVideoFrames;
944
						SaveEncodedFrame(frame, video_outbuf);
945

  
946
						if(!firstSavedVideoFrame)
947
							firstSavedVideoFrame = frame->number;
948
						
949
						char tmp_filename[255];
950
						sprintf(tmp_filename, "yuv_data/streamer_out_context.txt");
951
						FILE* tmp = fopen(tmp_filename, "w");
952
						if(tmp)
953
						{
954
							fprintf(tmp, "width = %d\nheight = %d\ntotal_frames_saved = %d\ntotal_frames_decoded = %d\nfirst_frame_number = %ld\nlast_frame_number = %d\n"
955
								,dest_width, dest_height
956
								,savedVideoFrames, savedVideoFrames, firstSavedVideoFrame, frame->number);
957
							fclose(tmp);
958
						}
959
					}
960
#endif
961 1014

  
962
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: encapsulated frame size:%d type:%d\n", frame->size, frame->type);
963
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: timestamped sec %ld usec:%ld\n", (long)frame->timestamp.tv_sec, (long)frame->timestamp.tv_usec);
964
					//contFrameVideo++; //lets increase the numbering of the frames
965

  
966
					if(update_chunk(chunk, frame, video_outbuf) == -1) {
967
						fprintf(stderr, "VIDEO: unable to update chunk %d. Exiting.\n", chunk->seq);
968
						exit(-1);
969
					}
970

  
971
					if(chunkFilled(chunk, VIDEO_CHUNK)) { // is chunk filled using current strategy?
972
						//calculate priority
973
						chunk->priority /= chunk->frames_num;
974

  
975
						//SAVE ON FILE
976
						//saveChunkOnFile(chunk);
977
						//Send the chunk to an external transport/player
978
						sendChunk(output, chunk);
979
						dctprintf(DEBUG_CHUNKER, "VIDEO: sent chunk video %d, prio:%f, size %d\n", chunk->seq, chunk->priority, chunk->len);
980
						chunk->seq = 0; //signal that we need an increase
981
						//initChunk(chunk, &seq_current_chunk);
982
					}
1015
					createFrame(frame, newTime, video_frame_size, 
1016
					            vcopy ? pFrame->pict_type : (unsigned char)pCodecCtxEnc->coded_frame->pict_type);
1017
					addFrameToOutstream(&outstream, frame, video_outbuf);
983 1018

  
984 1019
					//compute how long it took to encode video frame
985 1020
					gettimeofday(&now_tv, NULL);
......
1019 1054

  
1020 1055
						}
1021 1056
					}
1022

  
1023 1057
				}
1024 1058
			}
1025
		}
1026
		else if(packet.stream_index==audioStream)
1027
		{
1059
		} else if(packet.stream_index==audioStream) {
1028 1060
			if(sleep > 0)
1029 1061
			{
1030 1062
				dcprintf(DEBUG_TIMESTAMPING, "\n\tREADLOOP: going to sleep for %ld microseconds\n", sleep);
......
1141 1173
					//SAVE ON FILE
1142 1174
					//saveChunkOnFile(chunkaudio);
1143 1175
					//Send the chunk to an external transport/player
1144
					sendChunk(output, chunkaudio);
1176
					sendChunk(outstream.output, chunkaudio);
1145 1177
					dctprintf(DEBUG_CHUNKER, "AUDIO: just sent chunk audio %d\n", chunkaudio->seq);
1146 1178
					chunkaudio->seq = 0; //signal that we need an increase
1147 1179
					//initChunk(chunkaudio, &seq_current_chunk);
......
1192 1224
		fclose(psnrtrace);
1193 1225

  
1194 1226
close:
1195
	if(chunk->seq != 0 && chunk->frames_num>0) {
1227
	if(outstream.chunk->seq != 0 && outstream.chunk->frames_num>0) {
1196 1228
		//SAVE ON FILE
1197 1229
		//saveChunkOnFile(chunk);
1198 1230
		//Send the chunk to an external transport/player
1199
		sendChunk(output, chunk);
1231
		sendChunk(outstream.output, outstream.chunk);
1200 1232
		dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST VIDEO CHUNK\n");
1201
		chunk->seq = 0; //signal that we need an increase just in case we will restart
1233
		outstream.chunk->seq = 0; //signal that we need an increase just in case we will restart
1202 1234
	}
1203 1235
	if(chunkaudio->seq != 0 && chunkaudio->frames_num>0) {
1204 1236
		//SAVE ON FILE     
1205 1237
		//saveChunkOnFile(chunkaudio);
1206 1238
		//Send the chunk via http to an external transport/player
1207
		sendChunk(output, chunkaudio);
1239
		sendChunk(outstream.output, chunkaudio);
1208 1240
		dcprintf(DEBUG_CHUNKER, "CHUNKER: SENDING LAST AUDIO CHUNK\n");
1209 1241
		chunkaudio->seq = 0; //signal that we need an increase just in case we will restart
1210 1242
	}
......
1214 1246
	finalizeChunkPusher();
1215 1247
#endif
1216 1248

  
1217
	free(chunk);
1249
	free(outstream.chunk);
1218 1250
	free(chunkaudio);
1219 1251
	free(frame);
1220 1252
	av_free(video_outbuf);
1221
	av_free(scaledFrame_buffer);
1222 1253
	av_free(audio_outbuf);
1223 1254
	free(cmeta);
1224 1255

  
1225 1256
	// Free the YUV frame
1226 1257
	av_free(pFrame1);
1227
	av_free(pFrame2);
1228
	av_free(scaledFrame);
1229 1258
	av_free(samples);
1230 1259
  
1231 1260
	// Close the codec
......
1283 1312
	}
1284 1313

  
1285 1314
#ifdef TCPIO
1286
	finalizeTCPChunkPusher(output);
1315
	finalizeTCPChunkPusher(outstream.output);
1287 1316
#endif
1288 1317

  
1289 1318
#ifdef USE_AVFILTER

Also available in: Unified diff