Revision 51d67a10 chunker_streamer/chunker_streamer.c

View differences:

chunker_streamer/chunker_streamer.c
11 11
#include <getopt.h>
12 12
#include <libswscale/swscale.h>
13 13

  
14
#ifdef USE_AVFILTER
15
#include "chunker_filtering.h"
16
#endif
17

  
14 18
#define DEBUG
15 19
#define DEBUG_AUDIO_FRAMES  false
16 20
#define DEBUG_VIDEO_FRAMES  false
......
55 59

  
56 60
long delay_audio = 0; //delay audio by x millisec
57 61

  
62
char *avfilter="yadif";
63

  
58 64
// Constant number of frames per chunk
59 65
int chunkFilledFramesStrategy(ExternalChunk *echunk, int chunkType)
60 66
{
......
126 132
    "\t[-p]: pts anomaly threshold (default: -1=off).\n"
127 133
    "\t[-q]: sync anomaly threshold ((default: -1=off).\n"
128 134
    "\t[-t]: QoE test mode\n\n"
135

  
136
    "\t[--avfilter]:set input filter (default: yadif\n"
137
    "\n"
129 138
    "Codec options:\n"
130 139
    "\t[-g GOP]: gop size\n"
131 140
    "\t[-b frames]: max number of consecutive b frames\n"
......
181 190
	//a raw buffer for decoded uncompressed audio samples
182 191
	int16_t *samples = NULL;
183 192
	//a raw uncompressed video picture
184
	AVFrame *pFrame = NULL;
193
	AVFrame *pFrame1 = NULL;
194
	AVFrame *pFrame2 = NULL;
185 195
	AVFrame *scaledFrame = NULL;
186 196

  
187 197
	AVFormatContext *pFormatCtx;
......
213 223
		/* These options set a flag. */
214 224
		{"audio_stream", required_argument, 0, 0},
215 225
		{"video_stream", required_argument, 0, 0},
226
		{"avfilter", required_argument, 0, 0},
216 227
		{0, 0, 0, 0}
217 228
	};
218 229
	/* `getopt_long' stores the option index here. */
......
224 235
			case 0: //for long options
225 236
				if( strcmp( "audio_stream", long_options[option_index].name ) == 0 ) { audioStream = atoi(optarg); }
226 237
				if( strcmp( "video_stream", long_options[option_index].name ) == 0 ) { videoStream = atoi(optarg); }
238
				if( strcmp( "avfilter", long_options[option_index].name ) == 0 ) { avfilter = strdup(optarg); }
227 239
				break;
228 240
			case 'i':
229 241
				sprintf(av_input, "%s", optarg);
......
571 583
	}
572 584

  
573 585
	// Allocate video in frame and out buffer
574
	pFrame=avcodec_alloc_frame();
586
	pFrame1=avcodec_alloc_frame();
587
	pFrame2=avcodec_alloc_frame();
575 588
	scaledFrame=avcodec_alloc_frame();
576
	if(pFrame==NULL || scaledFrame == NULL) {
589
	if(pFrame1==NULL || pFrame2==NULL || scaledFrame == NULL) {
577 590
		fprintf(stderr, "INIT: Memory error alloc video frame!!!\n");
578 591
		return -1;
579 592
	}
......
657 670
	FILE* videotrace = fopen(videotrace_filename, "w");
658 671
	FILE* psnrtrace = fopen(psnr_filename, "w");
659 672

  
673
#ifdef USE_AVFILTER
674
	//init AVFilter
675
	avfilter_register_all();
676
	init_filters(avfilter, pCodecCtx);
677
#endif
678

  
660 679
	//main loop to read from the input file
661 680
	while((av_read_frame(pFormatCtx, &packet)>=0) && !quit)
662 681
	{
......
709 728
			gettimeofday(&tmp_tv, NULL);
710 729
			
711 730
			//decode the video packet into a raw pFrame
712
			if(avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet)>0)
731
			if(avcodec_decode_video2(pCodecCtx, pFrame1, &frameFinished, &packet)>0)
713 732
			{
733
				AVFrame *pFrame;
734
				pFrame = pFrame1;
735

  
714 736
				// usleep(5000);
715 737
				dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOin pkt: dts %lld pts %lld pts-dts %lld\n", packet.dts, packet.pts, packet.pts-packet.dts );
716 738
				dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %lld pkt_pts %lld frame.pts %lld\n", pFrame->pkt_dts, pFrame->pkt_pts, pFrame->pts);
......
731 753
				}
732 754
#endif
733 755

  
756

  
734 757
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEO: finished frame %d dts %lld pts %lld\n", frame->number, packet.dts, packet.pts);
735 758
					if(frame->number==0) {
736 759
						if(packet.dts==AV_NOPTS_VALUE)
......
783 806
						}
784 807
					    }
785 808

  
809
#ifdef USE_AVFILTER
810
					//apply avfilters
811
					filter(pFrame,pFrame2);
812
					pFrame = pFrame2;
813
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode: pkt_dts %lld pkt_pts %lld frame.pts %lld\n", pFrame2->pkt_dts, pFrame2->pkt_pts, pFrame2->pts);
814
					dcprintf(DEBUG_VIDEO_FRAMES, "VIDEOdecode intype %d%s\n", pFrame2->pict_type, pFrame2->key_frame ? " (key)" : "");
815
#endif
816

  
786 817
					    if(pCodecCtx->height != pCodecCtxEnc->height || pCodecCtx->width != pCodecCtxEnc->width) {
787 818
//						static AVPicture pict;
788 819
						static struct SwsContext *img_convert_ctx = NULL;
......
1183 1214
	free(cmeta);
1184 1215

  
1185 1216
	// Free the YUV frame
1186
	av_free(pFrame);
1217
	av_free(pFrame1);
1218
	av_free(pFrame2);
1187 1219
	av_free(scaledFrame);
1188 1220
	av_free(samples);
1189 1221
  
......
1245 1277
	finalizeTCPChunkPusher();
1246 1278
#endif
1247 1279

  
1280
#ifdef USE_AVFILTER
1281
	close_filters();
1282
#endif
1283

  
1248 1284
	return 0;
1249 1285
}
1250 1286

  

Also available in: Unified diff