chunker-player / chunker_player / player_core.c @ 0b2dc7c4
History | View | Annotate | Download (44.2 KB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
|
3 |
* developed for the Napa-Wine EU project. See www.napa-wine.eu
|
4 |
*
|
5 |
* This is free software; see lgpl-2.1.txt
|
6 |
*/
|
7 |
|
8 |
#include "player_defines.h" |
9 |
#include "chunker_player.h" |
10 |
#include "player_gui.h" |
11 |
#include "player_core.h" |
12 |
#include "player_stats.h" |
13 |
#include <assert.h> |
14 |
#include <time.h> |
15 |
|
16 |
void SaveFrame(AVFrame *pFrame, int width, int height); |
17 |
int VideoCallback(void *valthread); |
18 |
int CollectStatisticsThread(void *params); |
19 |
void AudioCallback(void *userdata, Uint8 *stream, int len); |
20 |
void PacketQueueClearStats(PacketQueue *q);
|
21 |
void ChunkerPlayerCore_Pause();
|
22 |
|
23 |
//int lastCheckedVideoFrame = -1;
|
24 |
long int last_video_frame_extracted = -1; |
25 |
|
26 |
int timeval_subtract(struct timeval* x, struct timeval* y, struct timeval* result) |
27 |
{ |
28 |
// Perform the carry for the later subtraction by updating y.
|
29 |
if (x->tv_usec < y->tv_usec)
|
30 |
{ |
31 |
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; |
32 |
y->tv_usec -= 1000000 * nsec;
|
33 |
y->tv_sec += nsec; |
34 |
} |
35 |
if (x->tv_usec - y->tv_usec > 1000000) |
36 |
{ |
37 |
int nsec = (x->tv_usec - y->tv_usec) / 1000000; |
38 |
y->tv_usec += 1000000 * nsec;
|
39 |
y->tv_sec -= nsec; |
40 |
} |
41 |
|
42 |
// Compute the time remaining to wait. tv_usec is certainly positive.
|
43 |
result->tv_sec = x->tv_sec - y->tv_sec; |
44 |
result->tv_usec = x->tv_usec - y->tv_usec; |
45 |
|
46 |
// Return 1 if result is negative.
|
47 |
return x->tv_sec < y->tv_sec;
|
48 |
} |
49 |
|
50 |
|
51 |
void PacketQueueInit(PacketQueue *q, short int Type) |
52 |
{ |
53 |
#ifdef DEBUG_QUEUE
|
54 |
printf("QUEUE: INIT BEGIN: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
55 |
#endif
|
56 |
memset(q,0,sizeof(PacketQueue)); |
57 |
q->mutex = SDL_CreateMutex(); |
58 |
QueueFillingMode=1;
|
59 |
q->queueType=Type; |
60 |
q->last_frame_extracted = -1;
|
61 |
q->first_pkt= NULL;
|
62 |
//q->last_pkt = NULL;
|
63 |
q->nb_packets = 0;
|
64 |
q->size = 0;
|
65 |
q->density= 0.0; |
66 |
FirstTime = 1;
|
67 |
FirstTimeAudio = 1;
|
68 |
//init up statistics
|
69 |
|
70 |
q->PacketHistory.Mutex = SDL_CreateMutex(); |
71 |
PacketQueueClearStats(q); |
72 |
|
73 |
#ifdef DEBUG_QUEUE
|
74 |
printf("QUEUE: INIT END: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
75 |
#endif
|
76 |
} |
77 |
|
78 |
void PacketQueueReset(PacketQueue *q)
|
79 |
{ |
80 |
AVPacketList *tmp,*tmp1; |
81 |
#ifdef DEBUG_QUEUE
|
82 |
printf("QUEUE: RESET BEGIN: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
83 |
#endif
|
84 |
SDL_LockMutex(q->mutex); |
85 |
|
86 |
tmp = q->first_pkt; |
87 |
while(tmp) {
|
88 |
tmp1 = tmp; |
89 |
tmp = tmp->next; |
90 |
av_free_packet(&(tmp1->pkt)); |
91 |
av_free(tmp1); |
92 |
#ifdef DEBUG_QUEUE
|
93 |
printf("F ");
|
94 |
#endif
|
95 |
q->PacketHistory.LostCount++; |
96 |
} |
97 |
#ifdef DEBUG_QUEUE
|
98 |
printf("\n");
|
99 |
#endif
|
100 |
|
101 |
QueueFillingMode=1;
|
102 |
q->last_frame_extracted = -1;
|
103 |
|
104 |
// on queue reset do not reset loss count
|
105 |
// (loss count reset is done on queue init, ie channel switch)
|
106 |
q->density=0.0; |
107 |
q->first_pkt= NULL;
|
108 |
//q->last_pkt = NULL;
|
109 |
q->nb_packets = 0;
|
110 |
q->size = 0;
|
111 |
FirstTime = 1;
|
112 |
FirstTimeAudio = 1;
|
113 |
//clean up statistics
|
114 |
PacketQueueClearStats(q); |
115 |
#ifdef DEBUG_QUEUE
|
116 |
printf("QUEUE: RESET END: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
117 |
#endif
|
118 |
SDL_UnlockMutex(q->mutex); |
119 |
} |
120 |
|
121 |
void PacketQueueClearStats(PacketQueue *q)
|
122 |
{ |
123 |
sprintf(q->stats_message, "%s", "\n"); |
124 |
int i;
|
125 |
memset((void*)q->PacketHistory.History, 0, sizeof(SHistoryElement)*QUEUE_HISTORY_SIZE); |
126 |
for(i=0; i<QUEUE_HISTORY_SIZE; i++) |
127 |
{ |
128 |
q->PacketHistory.History[i].Statistics.LastIFrameDistance = -1;
|
129 |
q->PacketHistory.History[i].Status = -1;
|
130 |
} |
131 |
q->PacketHistory.Index = q->PacketHistory.LogIndex = 0;
|
132 |
q->PacketHistory.Index = q->PacketHistory.QoEIndex = 0;
|
133 |
q->PacketHistory.LostCount = q->PacketHistory.PlayedCount = q->PacketHistory.SkipCount = 0;
|
134 |
} |
135 |
|
136 |
int ChunkerPlayerCore_PacketQueuePut(PacketQueue *q, AVPacket *pkt)
|
137 |
{ |
138 |
//~ printf("\tSTREAM_INDEX=%d\n", pkt->stream_index);
|
139 |
short int skip = 0; |
140 |
AVPacketList *pkt1, *tmp, *prevtmp; |
141 |
int res = 0; |
142 |
|
143 |
if(q->nb_packets > queue_filling_threshold*QUEUE_MAX_GROW_FACTOR) {
|
144 |
#ifdef DEBUG_QUEUE
|
145 |
printf("QUEUE: PUT i have TOO MANY packets %d Type=%s, RESETTING\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
146 |
#endif
|
147 |
PacketQueueReset(q); |
148 |
} |
149 |
|
150 |
//make a copy of the incoming packet
|
151 |
if(av_dup_packet(pkt) < 0) { |
152 |
#ifdef DEBUG_QUEUE
|
153 |
printf("QUEUE: PUT in Queue cannot duplicate in packet : NPackets=%d Type=%s\n",q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
154 |
#endif
|
155 |
return -1; |
156 |
} |
157 |
pkt1 = av_malloc(sizeof(AVPacketList));
|
158 |
|
159 |
if(!pkt1) {
|
160 |
av_free_packet(pkt); |
161 |
return -1; |
162 |
} |
163 |
pkt1->pkt = *pkt; |
164 |
pkt1->next = NULL;
|
165 |
|
166 |
static time_t last_auto_switch = 0; |
167 |
|
168 |
// file streaming loop detected => re-tune channel and start grabbing statistics
|
169 |
if(
|
170 |
(pkt->stream_index < last_video_frame_extracted) |
171 |
&& (pkt->stream_index <= RESTART_FRAME_NUMBER_THRESHOLD) |
172 |
&& ((time(NULL) - last_auto_switch) > 10) |
173 |
) |
174 |
{ |
175 |
last_auto_switch = time(NULL);
|
176 |
SDL_LockMutex(q->mutex); |
177 |
ReTune(&(Channels[SelectedChannel])); |
178 |
SDL_UnlockMutex(q->mutex); |
179 |
} |
180 |
|
181 |
else
|
182 |
{ |
183 |
SDL_LockMutex(q->mutex); |
184 |
|
185 |
// INSERTION SORT ALGORITHM
|
186 |
// before inserting pkt, check if pkt.stream_index is <= current_extracted_frame.
|
187 |
if(pkt->stream_index > q->last_frame_extracted)
|
188 |
{ |
189 |
// either checking starting from the first_pkt or needed other struct like AVPacketList with next and prev....
|
190 |
//if (!q->last_pkt)
|
191 |
if(!q->first_pkt) {
|
192 |
q->first_pkt = pkt1; |
193 |
q->last_pkt = pkt1; |
194 |
} |
195 |
else if(pkt->stream_index < q->first_pkt->pkt.stream_index) { |
196 |
//the packet that has arrived is earlier than the first we got some time ago!
|
197 |
//we need to put it at the head of the queue
|
198 |
pkt1->next = q->first_pkt; |
199 |
q->first_pkt = pkt1; |
200 |
} |
201 |
else {
|
202 |
tmp = q->first_pkt; |
203 |
while(tmp->pkt.stream_index < pkt->stream_index) {
|
204 |
prevtmp = tmp; |
205 |
tmp = tmp->next; |
206 |
|
207 |
if(!tmp) {
|
208 |
break;
|
209 |
} |
210 |
} |
211 |
if(tmp && tmp->pkt.stream_index == pkt->stream_index) {
|
212 |
//we already have a frame with that index
|
213 |
skip = 1;
|
214 |
#ifdef DEBUG_QUEUE
|
215 |
printf("%s QUEUE: PUT: we already have frame with index %d, skipping\n", ((q->queueType == AUDIO) ? "AUDIO" : "VIDEO"), pkt->stream_index); |
216 |
#endif
|
217 |
} |
218 |
else {
|
219 |
prevtmp->next = pkt1; |
220 |
pkt1->next = tmp; |
221 |
if(pkt1->next == NULL) |
222 |
q->last_pkt = pkt1; |
223 |
} |
224 |
//q->last_pkt->next = pkt1; // It was uncommented when not insertion sort
|
225 |
} |
226 |
if(skip == 0) { |
227 |
//q->last_pkt = pkt1;
|
228 |
q->nb_packets++; |
229 |
q->size += pkt1->pkt.size; |
230 |
if(q->nb_packets>=queue_filling_threshold && QueueFillingMode) // && q->queueType==AUDIO) |
231 |
{ |
232 |
QueueFillingMode=0;
|
233 |
#ifdef DEBUG_QUEUE
|
234 |
printf("QUEUE: PUT: FillingMode set to zero\n");
|
235 |
#endif
|
236 |
} |
237 |
} |
238 |
} |
239 |
else {
|
240 |
av_free_packet(&pkt1->pkt); |
241 |
av_free(pkt1); |
242 |
#ifdef DEBUG_QUEUE
|
243 |
printf("QUEUE: PUT: NOT inserting because index %d <= last extracted %d\n", pkt->stream_index, q->last_frame_extracted);
|
244 |
#endif
|
245 |
res = 1;
|
246 |
} |
247 |
SDL_UnlockMutex(q->mutex); |
248 |
} |
249 |
|
250 |
return res;
|
251 |
} |
252 |
|
253 |
int ChunkerPlayerCore_InitCodecs(int width, int height, int sample_rate, short int audio_channels) |
254 |
{ |
255 |
// some initializations
|
256 |
QueueStopped = 0;
|
257 |
AudioQueueOffset=0;
|
258 |
AVPlaying = 0;
|
259 |
GotSigInt = 0;
|
260 |
FirstTimeAudio=1;
|
261 |
FirstTime = 1;
|
262 |
deltaAudioQError=0;
|
263 |
InitRect = NULL;
|
264 |
img_convert_ctx = NULL;
|
265 |
|
266 |
SDL_AudioSpec wanted_spec; |
267 |
AVCodec *aCodec; |
268 |
|
269 |
memset(&VideoCallbackThreadParams, 0, sizeof(ThreadVal)); |
270 |
|
271 |
VideoCallbackThreadParams.width = width; |
272 |
VideoCallbackThreadParams.height = height; |
273 |
|
274 |
// Register all formats and codecs
|
275 |
avcodec_init(); |
276 |
av_register_all(); |
277 |
|
278 |
aCodecCtx = avcodec_alloc_context(); |
279 |
//aCodecCtx->bit_rate = 64000;
|
280 |
aCodecCtx->sample_rate = sample_rate; |
281 |
aCodecCtx->channels = audio_channels; |
282 |
#ifdef MP3_AUDIO_ENCODER
|
283 |
aCodec = avcodec_find_decoder(CODEC_ID_MP3); // codec audio
|
284 |
#else
|
285 |
aCodec = avcodec_find_decoder(CODEC_ID_MP2); |
286 |
#endif
|
287 |
printf("MP2 codec id %d MP3 codec id %d\n",CODEC_ID_MP2,CODEC_ID_MP3);
|
288 |
if(!aCodec) {
|
289 |
printf("Codec not found!\n");
|
290 |
return -1; |
291 |
} |
292 |
if(avcodec_open(aCodecCtx, aCodec)<0) { |
293 |
fprintf(stderr, "could not open codec\n");
|
294 |
return -1; // Could not open codec |
295 |
} |
296 |
printf("using audio Codecid: %d ",aCodecCtx->codec_id);
|
297 |
printf("samplerate: %d ",aCodecCtx->sample_rate);
|
298 |
printf("channels: %d\n",aCodecCtx->channels);
|
299 |
CurrentAudioFreq = wanted_spec.freq = aCodecCtx->sample_rate; |
300 |
wanted_spec.format = AUDIO_S16SYS; |
301 |
wanted_spec.channels = aCodecCtx->channels; |
302 |
wanted_spec.silence = 0;
|
303 |
CurrentAudioSamples = wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; |
304 |
wanted_spec.callback = AudioCallback; |
305 |
wanted_spec.userdata = aCodecCtx; |
306 |
if(SDL_OpenAudio(&wanted_spec,&AudioSpecification)<0) |
307 |
{ |
308 |
fprintf(stderr,"SDL_OpenAudio: %s\n", SDL_GetError());
|
309 |
return -1; |
310 |
} |
311 |
dimAudioQ = AudioSpecification.size; |
312 |
deltaAudioQ = (float)((float)AudioSpecification.samples)*1000/AudioSpecification.freq; |
313 |
|
314 |
#ifdef DEBUG_AUDIO
|
315 |
printf("freq:%d\n",AudioSpecification.freq);
|
316 |
printf("format:%d\n",AudioSpecification.format);
|
317 |
printf("channels:%d\n",AudioSpecification.channels);
|
318 |
printf("silence:%d\n",AudioSpecification.silence);
|
319 |
printf("samples:%d\n",AudioSpecification.samples);
|
320 |
printf("size:%d\n",AudioSpecification.size);
|
321 |
printf("deltaAudioQ: %f\n",deltaAudioQ);
|
322 |
#endif
|
323 |
|
324 |
outbuf_audio = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
325 |
|
326 |
//initialize the audio and the video queues
|
327 |
PacketQueueInit(&audioq, AUDIO); |
328 |
PacketQueueInit(&videoq, VIDEO); |
329 |
|
330 |
// Init audio and video buffers
|
331 |
av_init_packet(&AudioPkt); |
332 |
av_init_packet(&VideoPkt); |
333 |
//printf("AVCODEC_MAX_AUDIO_FRAME_SIZE=%d\n", AVCODEC_MAX_AUDIO_FRAME_SIZE);
|
334 |
AudioPkt.data=(uint8_t *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
335 |
if(!AudioPkt.data) return 1; |
336 |
VideoPkt.data=(uint8_t *)malloc(width*height*3/2); |
337 |
if(!VideoPkt.data) return 1; |
338 |
|
339 |
InitRect = (SDL_Rect*) malloc(sizeof(SDL_Rect));
|
340 |
if(!InitRect)
|
341 |
{ |
342 |
printf("Memory error!!!\n");
|
343 |
return -1; |
344 |
} |
345 |
InitRect->x = OverlayRect.x; |
346 |
InitRect->y = OverlayRect.y; |
347 |
InitRect->w = OverlayRect.w; |
348 |
InitRect->h = OverlayRect.h; |
349 |
|
350 |
char audio_stats[255], video_stats[255]; |
351 |
sprintf(audio_stats, "waiting for incoming audio packets...");
|
352 |
sprintf(video_stats, "waiting for incoming video packets...");
|
353 |
ChunkerPlayerGUI_SetStatsText(audio_stats, video_stats,LED_GREEN); |
354 |
|
355 |
return 0; |
356 |
} |
357 |
|
358 |
int DecodeEnqueuedAudio(AVPacket *pkt, PacketQueue *q, int* size) |
359 |
{ |
360 |
uint16_t *audio_bufQ = NULL;
|
361 |
int16_t *dataQ = NULL;
|
362 |
int data_sizeQ = AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
363 |
int lenQ;
|
364 |
int ret = 0; |
365 |
|
366 |
//set the flag to decoded anyway
|
367 |
pkt->convergence_duration = -1;
|
368 |
|
369 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
370 |
if(audio_bufQ) {
|
371 |
#ifdef DEBUG_AUDIO_BUFFER
|
372 |
printf("AUDIO_BUFFER: about to decode packet %d, size %d, data %d\n", pkt->stream_index, pkt->size, pkt->data);
|
373 |
#endif
|
374 |
//decode the packet data
|
375 |
lenQ = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_bufQ, &data_sizeQ, pkt); |
376 |
if(lenQ > 0) { |
377 |
dataQ = (int16_t *)av_malloc(data_sizeQ); //this will be free later at the time of playback
|
378 |
if(dataQ) {
|
379 |
memcpy(dataQ, audio_bufQ, data_sizeQ); |
380 |
if(pkt->data != NULL) |
381 |
{ |
382 |
//discard the old encoded bytes
|
383 |
av_free(pkt->data); |
384 |
} |
385 |
//subtract them from queue size
|
386 |
q->size -= pkt->size; |
387 |
*size = pkt->size; |
388 |
pkt->data = (int8_t *)dataQ; |
389 |
pkt->size = data_sizeQ; |
390 |
//add new size to queue size
|
391 |
q->size += pkt->size; |
392 |
ret = 1;
|
393 |
} |
394 |
else {
|
395 |
#ifdef DEBUG_AUDIO_BUFFER
|
396 |
printf("AUDIO_BUFFER: cannot alloc space for decoded packet %d\n", pkt->stream_index);
|
397 |
#endif
|
398 |
} |
399 |
} |
400 |
else {
|
401 |
#ifdef DEBUG_AUDIO_BUFFER
|
402 |
printf("AUDIO_BUFFER: cannot decode packet %d\n", pkt->stream_index);
|
403 |
#endif
|
404 |
} |
405 |
av_free(audio_bufQ); |
406 |
} |
407 |
else {
|
408 |
#ifdef DEBUG_AUDIO_BUFFER
|
409 |
printf("AUDIO_BUFFER: cannot alloc decode buffer for packet %d\n", pkt->stream_index);
|
410 |
#endif
|
411 |
} |
412 |
return ret; //problems occurred |
413 |
} |
414 |
|
415 |
/**
|
416 |
* removes a packet from the list and returns the next
|
417 |
* */
|
418 |
AVPacketList *RemoveFromQueue(PacketQueue *q, AVPacketList *p) |
419 |
{ |
420 |
AVPacketList *retpk = p->next; |
421 |
q->nb_packets--; |
422 |
//adjust size here and not in the various cases of the dequeue
|
423 |
q->size -= p->pkt.size; |
424 |
if(&p->pkt)
|
425 |
{ |
426 |
av_free_packet(&p->pkt); |
427 |
} |
428 |
if(p)
|
429 |
av_free(p); |
430 |
return retpk;
|
431 |
} |
432 |
|
433 |
AVPacketList *SeekAndDecodePacketStartingFrom(AVPacketList *p, PacketQueue *q, int* size)
|
434 |
{ |
435 |
while(p) {
|
436 |
//check if audio packet has been already decoded
|
437 |
if(p->pkt.convergence_duration == 0) { |
438 |
//not decoded yet, try to decode it
|
439 |
if( !DecodeEnqueuedAudio(&(p->pkt), q, size) ) {
|
440 |
//it was not possible to decode this packet, return next one
|
441 |
p = RemoveFromQueue(q, p); |
442 |
} |
443 |
else
|
444 |
return p;
|
445 |
} |
446 |
else
|
447 |
return p;
|
448 |
} |
449 |
return NULL; |
450 |
} |
451 |
|
452 |
int PacketQueueGet(PacketQueue *q, AVPacket *pkt, short int av, int* size) |
453 |
{ |
454 |
//AVPacket tmp;
|
455 |
AVPacketList *pkt1 = NULL;
|
456 |
int ret=-1; |
457 |
int SizeToCopy=0; |
458 |
struct timeval now_tv;
|
459 |
|
460 |
SDL_LockMutex(q->mutex); |
461 |
|
462 |
#ifdef DEBUG_QUEUE
|
463 |
printf("QUEUE: Get NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
464 |
#endif
|
465 |
|
466 |
if((q->queueType==AUDIO && QueueFillingMode) || QueueStopped)
|
467 |
{ |
468 |
SDL_UnlockMutex(q->mutex); |
469 |
return -1; |
470 |
} |
471 |
|
472 |
if(av==1) { //somebody requested an audio packet, q is the audio queue |
473 |
//try to dequeue the first packet of the audio queue
|
474 |
pkt1 = SeekAndDecodePacketStartingFrom(q->first_pkt, q, size); |
475 |
if(pkt1) { //yes we have them! |
476 |
if(pkt1->pkt.size-AudioQueueOffset > dimAudioQ) {
|
477 |
//one packet is enough to give us the requested number of bytes by the audio_callback
|
478 |
#ifdef DEBUG_QUEUE_DEEP
|
479 |
printf(" AV=1 and Extract from the same packet\n");
|
480 |
#endif
|
481 |
pkt->size = dimAudioQ; |
482 |
memcpy(pkt->data,pkt1->pkt.data+AudioQueueOffset,dimAudioQ); |
483 |
pkt->dts = pkt1->pkt.dts; |
484 |
pkt->pts = pkt1->pkt.pts; |
485 |
pkt->stream_index = pkt1->pkt.stream_index;//1;
|
486 |
pkt->flags = 1;
|
487 |
pkt->pos = -1;
|
488 |
pkt->convergence_duration = -1;
|
489 |
#ifdef DEBUG_QUEUE_DEEP
|
490 |
printf(" Adjust timestamps Old = %lld New = %lld\n", pkt1->pkt.dts, (int64_t)(pkt1->pkt.dts + deltaAudioQ + deltaAudioQError));
|
491 |
#endif
|
492 |
int64_t Olddts=pkt1->pkt.dts; |
493 |
pkt1->pkt.dts += deltaAudioQ + deltaAudioQError; |
494 |
pkt1->pkt.pts += deltaAudioQ + deltaAudioQError; |
495 |
deltaAudioQError=(float)Olddts + deltaAudioQ + deltaAudioQError - (float)pkt1->pkt.dts; |
496 |
AudioQueueOffset += dimAudioQ; |
497 |
#ifdef DEBUG_QUEUE_DEEP
|
498 |
printf(" deltaAudioQError = %f\n",deltaAudioQError);
|
499 |
#endif
|
500 |
|
501 |
ChunkerPlayerStats_UpdateAudioLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted); |
502 |
|
503 |
//update index of last frame extracted
|
504 |
q->last_frame_extracted = pkt->stream_index; |
505 |
#ifdef DEBUG_AUDIO_BUFFER
|
506 |
printf("1: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
507 |
#endif
|
508 |
ret = 1; //OK |
509 |
} |
510 |
else {
|
511 |
//we need bytes from two consecutive packets to satisfy the audio_callback
|
512 |
#ifdef DEBUG_QUEUE_DEEP
|
513 |
printf(" AV = 1 and Extract from 2 packets\n");
|
514 |
#endif
|
515 |
//check for a valid next packet since we will finish the current packet
|
516 |
//and also take some bytes from the next one
|
517 |
pkt1->next = SeekAndDecodePacketStartingFrom(pkt1->next, q, size); |
518 |
if(pkt1->next) {
|
519 |
#ifdef DEBUG_QUEUE_DEEP
|
520 |
printf(" we have a next...\n");
|
521 |
#endif
|
522 |
pkt->size = dimAudioQ; |
523 |
pkt->dts = pkt1->pkt.dts; |
524 |
pkt->pts = pkt1->pkt.pts; |
525 |
pkt->stream_index = pkt1->pkt.stream_index;//1;
|
526 |
pkt->flags = 1;
|
527 |
pkt->pos = -1;
|
528 |
pkt->convergence_duration = -1;
|
529 |
{ |
530 |
SizeToCopy=pkt1->pkt.size-AudioQueueOffset; |
531 |
#ifdef DEBUG_QUEUE_DEEP
|
532 |
printf(" SizeToCopy=%d\n",SizeToCopy);
|
533 |
#endif
|
534 |
memcpy(pkt->data, pkt1->pkt.data+AudioQueueOffset, SizeToCopy); |
535 |
memcpy(pkt->data+SizeToCopy, pkt1->next->pkt.data, (dimAudioQ-SizeToCopy)*sizeof(uint8_t));
|
536 |
} |
537 |
#ifdef DEBUG_AUDIO_BUFFER
|
538 |
printf("2: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
539 |
#endif
|
540 |
} |
541 |
#ifdef DEBUG_AUDIO_BUFFER
|
542 |
else {
|
543 |
printf("2: NONEXT\n");
|
544 |
} |
545 |
#endif
|
546 |
//HINT SEE before q->size -= SizeToCopy;
|
547 |
q->first_pkt = RemoveFromQueue(q, pkt1); |
548 |
|
549 |
// Adjust timestamps
|
550 |
pkt1 = q->first_pkt; |
551 |
if(pkt1) {
|
552 |
int Offset=(dimAudioQ-SizeToCopy)*1000/(AudioSpecification.freq*2*AudioSpecification.channels); |
553 |
int64_t LastDts=pkt1->pkt.dts; |
554 |
pkt1->pkt.dts += Offset + deltaAudioQError; |
555 |
pkt1->pkt.pts += Offset + deltaAudioQError; |
556 |
deltaAudioQError = (float)LastDts + (float)Offset + deltaAudioQError - (float)pkt1->pkt.dts; |
557 |
#ifdef DEBUG_QUEUE_DEEP
|
558 |
printf(" Adjust timestamps Old = %lld New = %lld\n", LastDts, pkt1->pkt.dts);
|
559 |
#endif
|
560 |
AudioQueueOffset = dimAudioQ - SizeToCopy; |
561 |
//SEE BEFORE HINT q->size -= AudioQueueOffset;
|
562 |
ret = 1;
|
563 |
|
564 |
ChunkerPlayerStats_UpdateAudioLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted); |
565 |
} |
566 |
else {
|
567 |
AudioQueueOffset=0;
|
568 |
} |
569 |
#ifdef DEBUG_QUEUE_DEEP
|
570 |
printf(" deltaAudioQError = %f\n",deltaAudioQError);
|
571 |
#endif
|
572 |
//update index of last frame extracted
|
573 |
q->last_frame_extracted = pkt->stream_index; |
574 |
} |
575 |
} |
576 |
} |
577 |
else { //somebody requested a video packet, q is the video queue |
578 |
pkt1 = q->first_pkt; |
579 |
if(pkt1) {
|
580 |
#ifdef DEBUG_QUEUE_DEEP
|
581 |
printf(" AV not 1\n");
|
582 |
#endif
|
583 |
pkt->size = pkt1->pkt.size; |
584 |
pkt->dts = pkt1->pkt.dts; |
585 |
pkt->pts = pkt1->pkt.pts; |
586 |
pkt->stream_index = pkt1->pkt.stream_index; |
587 |
pkt->flags = pkt1->pkt.flags; |
588 |
pkt->pos = pkt1->pkt.pos; |
589 |
pkt->convergence_duration = pkt1->pkt.convergence_duration; |
590 |
//*pkt = pkt1->pkt;
|
591 |
|
592 |
if((pkt->data != NULL) && (pkt1->pkt.data != NULL)) |
593 |
memcpy(pkt->data, pkt1->pkt.data, pkt1->pkt.size); |
594 |
|
595 |
//HINT SEE BEFORE q->size -= pkt1->pkt.size;
|
596 |
q->first_pkt = RemoveFromQueue(q, pkt1); |
597 |
|
598 |
ret = 1;
|
599 |
|
600 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted); |
601 |
|
602 |
//update index of last frame extracted
|
603 |
q->last_frame_extracted = pkt->stream_index; |
604 |
last_video_frame_extracted = q->last_frame_extracted; |
605 |
} |
606 |
#ifdef DEBUG_QUEUE
|
607 |
else {
|
608 |
printf(" VIDEO pk1 NULL!!!!\n");
|
609 |
} |
610 |
#endif
|
611 |
} |
612 |
|
613 |
if(q->nb_packets==0 && q->queueType==AUDIO) { |
614 |
QueueFillingMode=1;
|
615 |
#ifdef DEBUG_QUEUE
|
616 |
printf("QUEUE: Get FillingMode ON\n");
|
617 |
#endif
|
618 |
} |
619 |
#ifdef DEBUG_QUEUE
|
620 |
printf("QUEUE: Get Last %s Frame Extracted = %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
621 |
#endif
|
622 |
|
623 |
SDL_UnlockMutex(q->mutex); |
624 |
return ret;
|
625 |
} |
626 |
|
627 |
int AudioDecodeFrame(uint8_t *audio_buf, int buf_size) { |
628 |
//struct timeval now;
|
629 |
int audio_pkt_size = 0; |
630 |
int compressed_size = 0; |
631 |
long long Now; |
632 |
short int DecodeAudio=0, SkipAudio=0; |
633 |
//int len1, data_size;
|
634 |
|
635 |
//gettimeofday(&now,NULL);
|
636 |
//Now = (now.tv_sec)*1000+now.tv_usec/1000;
|
637 |
Now=(long long)SDL_GetTicks(); |
638 |
struct timeval now_tv;
|
639 |
|
640 |
if(QueueFillingMode || QueueStopped)
|
641 |
{ |
642 |
//SDL_LockMutex(timing_mutex);
|
643 |
FirstTimeAudio=1;
|
644 |
FirstTime = 1;
|
645 |
//SDL_UnlockMutex(timing_mutex);
|
646 |
return -1; |
647 |
} |
648 |
|
649 |
if((FirstTime==1 || FirstTimeAudio==1) && audioq.size>0) { |
650 |
if(audioq.first_pkt->pkt.pts>0) |
651 |
{ |
652 |
//SDL_LockMutex(timing_mutex);
|
653 |
DeltaTime=Now-(long long)(audioq.first_pkt->pkt.pts); |
654 |
FirstTimeAudio = 0;
|
655 |
FirstTime = 0;
|
656 |
//SDL_UnlockMutex(timing_mutex);
|
657 |
#ifdef DEBUG_AUDIO
|
658 |
printf("AUDIO: audio_decode_frame - DeltaTimeAudio=%lld\n",DeltaTime);
|
659 |
#endif
|
660 |
} |
661 |
} |
662 |
|
663 |
#ifdef DEBUG_AUDIO
|
664 |
if(audioq.first_pkt)
|
665 |
{ |
666 |
printf("AUDIO: audio_decode_frame - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)audioq.first_pkt->pkt.pts,(long long)audioq.first_pkt->pkt.pts+DeltaTime); |
667 |
printf("AUDIO: QueueLen=%d ",(int)audioq.nb_packets); |
668 |
printf("AUDIO: QueueSize=%d\n",(int)audioq.size); |
669 |
} |
670 |
else
|
671 |
printf("AUDIO: audio_decode_frame - Empty queue\n");
|
672 |
#endif
|
673 |
|
674 |
gettimeofday(&now_tv, NULL);
|
675 |
if(audioq.nb_packets>0) |
676 |
{ |
677 |
if((long long)audioq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) |
678 |
{ |
679 |
SkipAudio = 1;
|
680 |
DecodeAudio = 0;
|
681 |
} |
682 |
else if((long long)audioq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
683 |
(long long)audioq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
684 |
SkipAudio = 0;
|
685 |
DecodeAudio = 1;
|
686 |
} |
687 |
} |
688 |
|
689 |
while(SkipAudio==1 && audioq.size>0) |
690 |
{ |
691 |
SkipAudio = 0;
|
692 |
#ifdef DEBUG_AUDIO
|
693 |
printf("AUDIO: skipaudio: queue size=%d\n",audioq.size);
|
694 |
#endif
|
695 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
696 |
return -1; |
697 |
} |
698 |
if(audioq.first_pkt)
|
699 |
{ |
700 |
ChunkerPlayerStats_UpdateAudioSkipHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
701 |
|
702 |
if((long long)audioq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) |
703 |
{ |
704 |
SkipAudio = 1;
|
705 |
DecodeAudio = 0;
|
706 |
} |
707 |
else if((long long)audioq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
708 |
(long long)audioq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
709 |
SkipAudio = 0;
|
710 |
DecodeAudio = 1;
|
711 |
} |
712 |
} |
713 |
} |
714 |
if(DecodeAudio==1) { |
715 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
716 |
return -1; |
717 |
} |
718 |
memcpy(audio_buf,AudioPkt.data,AudioPkt.size); |
719 |
audio_pkt_size = AudioPkt.size; |
720 |
#ifdef DEBUG_AUDIO
|
721 |
printf("AUDIO: Decode audio\n");
|
722 |
#endif
|
723 |
|
724 |
ChunkerPlayerStats_UpdateAudioPlayedHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
725 |
} |
726 |
|
727 |
return audio_pkt_size;
|
728 |
} |
729 |
|
730 |
int VideoCallback(void *valthread) |
731 |
{ |
732 |
//AVPacket pktvideo;
|
733 |
AVCodecContext *pCodecCtx; |
734 |
AVCodec *pCodec; |
735 |
AVFrame *pFrame; |
736 |
int frameFinished;
|
737 |
AVPicture pict; |
738 |
long long Now; |
739 |
short int SkipVideo, DecodeVideo; |
740 |
|
741 |
#ifdef SAVE_YUV
|
742 |
static AVFrame* lastSavedFrameBuffer = NULL; |
743 |
|
744 |
if(!lastSavedFrameBuffer)
|
745 |
lastSavedFrameBuffer = (AVFrame*) malloc(sizeof(AVFrame));
|
746 |
#endif
|
747 |
|
748 |
//double frame_rate = 0.0,time_between_frames=0.0;
|
749 |
//struct timeval now;
|
750 |
|
751 |
//int wait_for_sync = 1;
|
752 |
ThreadVal *tval; |
753 |
tval = (ThreadVal *)valthread; |
754 |
|
755 |
//frame_rate = tval->framerate;
|
756 |
//time_between_frames = 1.e6 / frame_rate;
|
757 |
//gettimeofday(&time_now,0);
|
758 |
|
759 |
//frecon = fopen("recondechunk.mpg","wb");
|
760 |
|
761 |
pCodecCtx=avcodec_alloc_context(); |
762 |
pCodecCtx->codec_type = CODEC_TYPE_VIDEO; |
763 |
//pCodecCtx->debug = FF_DEBUG_DCT_COEFF;
|
764 |
#ifdef H264_VIDEO_ENCODER
|
765 |
pCodecCtx->codec_id = CODEC_ID_H264; |
766 |
pCodecCtx->me_range = 16;
|
767 |
pCodecCtx->max_qdiff = 4;
|
768 |
pCodecCtx->qmin = 1;
|
769 |
pCodecCtx->qmax = 30;
|
770 |
pCodecCtx->qcompress = 0.6; |
771 |
#else
|
772 |
pCodecCtx->codec_id = CODEC_ID_MPEG4; |
773 |
#endif
|
774 |
//pCodecCtx->bit_rate = 400000;
|
775 |
// resolution must be a multiple of two
|
776 |
pCodecCtx->width = tval->width;//176;//352;
|
777 |
pCodecCtx->height = tval->height;//144;//288;
|
778 |
|
779 |
// frames per second
|
780 |
//pCodecCtx->time_base = (AVRational){1,25};
|
781 |
//pCodecCtx->gop_size = 10; // emit one intra frame every ten frames
|
782 |
//pCodecCtx->max_b_frames=1;
|
783 |
pCodecCtx->pix_fmt = PIX_FMT_YUV420P; |
784 |
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); |
785 |
|
786 |
if(pCodec==NULL) { |
787 |
fprintf(stderr, "Unsupported codec!\n");
|
788 |
return -1; // Codec not found |
789 |
} |
790 |
if(avcodec_open(pCodecCtx, pCodec) < 0) { |
791 |
fprintf(stderr, "could not open codec\n");
|
792 |
return -1; // Could not open codec |
793 |
} |
794 |
pFrame=avcodec_alloc_frame(); |
795 |
if(pFrame==NULL) { |
796 |
printf("Memory error!!!\n");
|
797 |
return -1; |
798 |
} |
799 |
|
800 |
#ifdef DEBUG_VIDEO
|
801 |
printf("VIDEO: video_callback entering main cycle\n");
|
802 |
#endif
|
803 |
|
804 |
struct timeval now_tv;
|
805 |
while(AVPlaying && !quit) {
|
806 |
if(QueueFillingMode || QueueStopped)
|
807 |
{ |
808 |
//SDL_LockMutex(timing_mutex);
|
809 |
FirstTime = 1;
|
810 |
//SDL_UnlockMutex(timing_mutex);
|
811 |
usleep(5000);
|
812 |
continue;
|
813 |
} |
814 |
|
815 |
DecodeVideo = 0;
|
816 |
SkipVideo = 0;
|
817 |
Now=(long long)SDL_GetTicks(); |
818 |
if(FirstTime==1 && videoq.size>0) { |
819 |
if(videoq.first_pkt->pkt.pts>0) |
820 |
{ |
821 |
//SDL_LockMutex(timing_mutex);
|
822 |
DeltaTime=Now-(long long)videoq.first_pkt->pkt.pts; |
823 |
FirstTime = 0;
|
824 |
//SDL_UnlockMutex(timing_mutex);
|
825 |
} |
826 |
#ifdef DEBUG_VIDEO
|
827 |
printf("VIDEO: VideoCallback - DeltaTimeAudio=%lld\n",DeltaTime);
|
828 |
#endif
|
829 |
} |
830 |
|
831 |
#ifdef DEBUG_VIDEO
|
832 |
if(videoq.first_pkt)
|
833 |
{ |
834 |
printf("VIDEO: VideoCallback - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)videoq.first_pkt->pkt.pts,(long long)videoq.first_pkt->pkt.pts+DeltaTime); |
835 |
printf("VIDEO: Index=%d ", (int)videoq.first_pkt->pkt.stream_index); |
836 |
printf("VIDEO: QueueLen=%d ", (int)videoq.nb_packets); |
837 |
printf("VIDEO: QueueSize=%d\n", (int)videoq.size); |
838 |
} |
839 |
else
|
840 |
printf("VIDEO: VideoCallback - Empty queue\n");
|
841 |
#endif
|
842 |
|
843 |
if(videoq.nb_packets>0) { |
844 |
if(((long long)videoq.first_pkt->pkt.pts+DeltaTime)<Now-(long long)MAX_TOLLERANCE) |
845 |
{ |
846 |
SkipVideo = 1;
|
847 |
DecodeVideo = 0;
|
848 |
} |
849 |
else
|
850 |
if(((long long)videoq.first_pkt->pkt.pts+DeltaTime)>=Now-(long long)MAX_TOLLERANCE && |
851 |
((long long)videoq.first_pkt->pkt.pts+DeltaTime)<=Now+(long long)MAX_TOLLERANCE) { |
852 |
SkipVideo = 0;
|
853 |
DecodeVideo = 1;
|
854 |
} |
855 |
|
856 |
// else (i.e. videoq.first_pkt->pkt.pts+DeltaTime>Now+MAX_TOLLERANCE)
|
857 |
// do nothing and continue
|
858 |
} |
859 |
#ifdef DEBUG_VIDEO
|
860 |
printf("VIDEO: skipvideo:%d decodevideo:%d\n",SkipVideo,DecodeVideo);
|
861 |
#endif
|
862 |
gettimeofday(&now_tv, NULL);
|
863 |
|
864 |
while(SkipVideo==1 && videoq.size>0) |
865 |
{ |
866 |
SkipVideo = 0;
|
867 |
#ifdef DEBUG_VIDEO
|
868 |
printf("VIDEO: Skip Video\n");
|
869 |
#endif
|
870 |
if(PacketQueueGet(&videoq,&VideoPkt,0, NULL) < 0) { |
871 |
break;
|
872 |
} |
873 |
|
874 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
875 |
|
876 |
// sometimes assertion fails, maybe the decoder change the frame type
|
877 |
//~ if(LastSourceIFrameDistance == 0)
|
878 |
//~ assert(pFrame->pict_type == 1);
|
879 |
|
880 |
if(videoq.first_pkt)
|
881 |
{ |
882 |
if((long long)videoq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) |
883 |
{ |
884 |
SkipVideo = 1;
|
885 |
DecodeVideo = 0;
|
886 |
} |
887 |
else if((long long)videoq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
888 |
(long long)videoq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
889 |
SkipVideo = 0;
|
890 |
DecodeVideo = 1;
|
891 |
} |
892 |
} |
893 |
|
894 |
ChunkerPlayerStats_UpdateVideoSkipHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame); |
895 |
|
896 |
/*if(pFrame->pict_type == 1)
|
897 |
{
|
898 |
int i1;
|
899 |
// every 23 items (23 is the qstride field in the AVFrame struct) there is 1 zero.
|
900 |
// 396/23 = 17 => 396 macroblocks + 17 zeros = 413 items
|
901 |
for(i1=0; i1< 413; i1++)
|
902 |
fprintf(qscaletable_file, "%d\t", (int)pFrame->qscale_table[i1]);
|
903 |
fprintf(qscaletable_file, "\n");
|
904 |
}*/
|
905 |
|
906 |
//ChunkerPlayerStats_UpdateVideoPlayedHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size);
|
907 |
} |
908 |
|
909 |
if(DecodeVideo==1) { |
910 |
if(PacketQueueGet(&videoq,&VideoPkt,0, NULL) > 0) { |
911 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
912 |
|
913 |
if(frameFinished)
|
914 |
{ // it must be true all the time else error
|
915 |
#ifdef DEBUG_VIDEO
|
916 |
printf("VIDEO: FrameFinished\n");
|
917 |
#endif
|
918 |
decoded_vframes++; |
919 |
|
920 |
// sometimes assertion fails, maybe the decoder change the frame type
|
921 |
//~ if(LastSourceIFrameDistance == 0)
|
922 |
//~ assert(pFrame->pict_type == 1);
|
923 |
#ifdef SAVE_YUV
|
924 |
if(LastSavedVFrame == -1) |
925 |
{ |
926 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
927 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
928 |
LastSavedVFrame = VideoPkt.stream_index; |
929 |
} |
930 |
else if(LastSavedVFrame == (VideoPkt.stream_index-1)) |
931 |
{ |
932 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
933 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
934 |
LastSavedVFrame = VideoPkt.stream_index; |
935 |
} |
936 |
else if(LastSavedVFrame >= 0) |
937 |
{ |
938 |
while(LastSavedVFrame < (VideoPkt.stream_index-1)) |
939 |
{ |
940 |
SaveFrame(lastSavedFrameBuffer, pCodecCtx->width, pCodecCtx->height); |
941 |
} |
942 |
|
943 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
944 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
945 |
LastSavedVFrame = VideoPkt.stream_index; |
946 |
} |
947 |
#endif
|
948 |
ChunkerPlayerStats_UpdateVideoPlayedHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame); |
949 |
|
950 |
if(SilentMode)
|
951 |
continue;
|
952 |
|
953 |
// Lock SDL_yuv_overlay
|
954 |
if(SDL_MUSTLOCK(MainScreen)) {
|
955 |
if(SDL_LockSurface(MainScreen) < 0) { |
956 |
continue;
|
957 |
} |
958 |
} |
959 |
|
960 |
if(SDL_LockYUVOverlay(YUVOverlay) < 0) { |
961 |
if(SDL_MUSTLOCK(MainScreen)) {
|
962 |
SDL_UnlockSurface(MainScreen); |
963 |
} |
964 |
continue;
|
965 |
} |
966 |
|
967 |
pict.data[0] = YUVOverlay->pixels[0]; |
968 |
pict.data[1] = YUVOverlay->pixels[2]; |
969 |
pict.data[2] = YUVOverlay->pixels[1]; |
970 |
|
971 |
pict.linesize[0] = YUVOverlay->pitches[0]; |
972 |
pict.linesize[1] = YUVOverlay->pitches[2]; |
973 |
pict.linesize[2] = YUVOverlay->pitches[1]; |
974 |
|
975 |
if(img_convert_ctx == NULL) { |
976 |
img_convert_ctx = sws_getContext(tval->width, tval->height, PIX_FMT_YUV420P, InitRect->w, InitRect->h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); |
977 |
if(img_convert_ctx == NULL) { |
978 |
fprintf(stderr, "Cannot initialize the conversion context!\n");
|
979 |
exit(1);
|
980 |
} |
981 |
} |
982 |
|
983 |
#ifdef VIDEO_DEINTERLACE
|
984 |
avpicture_deinterlace( |
985 |
(AVPicture*) pFrame, |
986 |
(const AVPicture*) pFrame,
|
987 |
pCodecCtx->pix_fmt, |
988 |
tval->width, tval->height); |
989 |
#endif
|
990 |
|
991 |
// let's draw the data (*yuv[3]) on a SDL screen (*screen)
|
992 |
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, tval->height, pict.data, pict.linesize);
|
993 |
SDL_UnlockYUVOverlay(YUVOverlay); |
994 |
// Show, baby, show!
|
995 |
SDL_LockMutex(OverlayMutex); |
996 |
SDL_DisplayYUVOverlay(YUVOverlay, &OverlayRect); |
997 |
SDL_UnlockMutex(OverlayMutex); |
998 |
|
999 |
//redisplay logo
|
1000 |
/**SDL_BlitSurface(image, NULL, MainScreen, &dest);*/
|
1001 |
/* Update the screen area just changed */
|
1002 |
/**SDL_UpdateRects(MainScreen, 1, &dest);*/
|
1003 |
|
1004 |
if(SDL_MUSTLOCK(MainScreen)) {
|
1005 |
SDL_UnlockSurface(MainScreen); |
1006 |
} |
1007 |
} //if FrameFinished
|
1008 |
else
|
1009 |
{ |
1010 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(videoq.PacketHistory), VideoPkt.stream_index+1, videoq.last_frame_extracted-1); |
1011 |
} |
1012 |
} // if packet_queue_get
|
1013 |
} //if DecodeVideo=1
|
1014 |
|
1015 |
usleep(5000);
|
1016 |
} |
1017 |
avcodec_close(pCodecCtx); |
1018 |
av_free(pCodecCtx); |
1019 |
av_free(pFrame); |
1020 |
//fclose(frecon);
|
1021 |
#ifdef DEBUG_VIDEO
|
1022 |
printf("VIDEO: video callback end\n");
|
1023 |
#endif
|
1024 |
|
1025 |
#ifdef SAVE_YUV
|
1026 |
if(!lastSavedFrameBuffer)
|
1027 |
free(lastSavedFrameBuffer); |
1028 |
|
1029 |
lastSavedFrameBuffer = NULL;
|
1030 |
#endif
|
1031 |
|
1032 |
return 0; |
1033 |
} |
1034 |
|
1035 |
void AudioCallback(void *userdata, Uint8 *stream, int len) |
1036 |
{ |
1037 |
//AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
|
1038 |
int audio_size;
|
1039 |
|
1040 |
static uint8_t audio_buf[AVCODEC_MAX_AUDIO_FRAME_SIZE];
|
1041 |
|
1042 |
audio_size = AudioDecodeFrame(audio_buf, sizeof(audio_buf));
|
1043 |
|
1044 |
if(SilentMode < 2) |
1045 |
if(audio_size != len) {
|
1046 |
memset(stream, 0, len);
|
1047 |
} else {
|
1048 |
memcpy(stream, (uint8_t *)audio_buf, len); |
1049 |
} |
1050 |
} |
1051 |
|
1052 |
void SaveFrame(AVFrame *pFrame, int width, int height) |
1053 |
{ |
1054 |
FILE *pFile; |
1055 |
int y;
|
1056 |
|
1057 |
// Open file
|
1058 |
pFile=fopen(YUVFileName, "ab");
|
1059 |
if(pFile==NULL) |
1060 |
return;
|
1061 |
|
1062 |
// Write header
|
1063 |
//fprintf(pFile, "P5\n%d %d\n255\n", width, height);
|
1064 |
|
1065 |
// Write Y data
|
1066 |
for(y=0; y<height; y++) |
1067 |
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile); |
1068 |
// Write U data
|
1069 |
for(y=0; y<height/2; y++) |
1070 |
fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile); |
1071 |
// Write V data
|
1072 |
for(y=0; y<height/2; y++) |
1073 |
fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile); |
1074 |
|
1075 |
// Close file
|
1076 |
fclose(pFile); |
1077 |
} |
1078 |
|
1079 |
int ChunkerPlayerCore_IsRunning()
|
1080 |
{ |
1081 |
return AVPlaying;
|
1082 |
} |
1083 |
|
1084 |
void ChunkerPlayerCore_Play()
|
1085 |
{ |
1086 |
if(AVPlaying) return; |
1087 |
AVPlaying = 1;
|
1088 |
|
1089 |
SDL_PauseAudio(0);
|
1090 |
video_thread = SDL_CreateThread(VideoCallback, &VideoCallbackThreadParams); |
1091 |
ChunkerPlayerStats_Init(); |
1092 |
stats_thread = SDL_CreateThread(CollectStatisticsThread, NULL);
|
1093 |
|
1094 |
decoded_vframes = 0;
|
1095 |
LastSavedVFrame = -1;
|
1096 |
} |
1097 |
|
1098 |
void ChunkerPlayerCore_Stop()
|
1099 |
{ |
1100 |
if(!AVPlaying) return; |
1101 |
|
1102 |
AVPlaying = 0;
|
1103 |
|
1104 |
// Stop audio&video playback
|
1105 |
SDL_WaitThread(video_thread, NULL);
|
1106 |
SDL_WaitThread(stats_thread, NULL);
|
1107 |
SDL_PauseAudio(1);
|
1108 |
SDL_CloseAudio(); |
1109 |
|
1110 |
if(YUVOverlay != NULL) |
1111 |
{ |
1112 |
SDL_FreeYUVOverlay(YUVOverlay); |
1113 |
YUVOverlay = NULL;
|
1114 |
} |
1115 |
|
1116 |
PacketQueueReset(&audioq); |
1117 |
PacketQueueReset(&videoq); |
1118 |
|
1119 |
avcodec_close(aCodecCtx); |
1120 |
av_free(aCodecCtx); |
1121 |
free(AudioPkt.data); |
1122 |
free(VideoPkt.data); |
1123 |
free(outbuf_audio); |
1124 |
free(InitRect); |
1125 |
|
1126 |
/*
|
1127 |
* Sleep two buffers' worth of audio before closing, in order
|
1128 |
* to allow the playback to finish. This isn't always enough;
|
1129 |
* perhaps SDL needs a way to explicitly wait for device drain?
|
1130 |
*/
|
1131 |
int delay = 2 * 1000 * CurrentAudioSamples / CurrentAudioFreq; |
1132 |
// printf("SDL_Delay(%d)\n", delay*10);
|
1133 |
SDL_Delay(delay*10);
|
1134 |
} |
1135 |
|
1136 |
void ChunkerPlayerCore_Pause()
|
1137 |
{ |
1138 |
if(!AVPlaying) return; |
1139 |
|
1140 |
AVPlaying = 0;
|
1141 |
|
1142 |
// Stop audio&video playback
|
1143 |
SDL_WaitThread(video_thread, NULL);
|
1144 |
SDL_PauseAudio(1);
|
1145 |
|
1146 |
PacketQueueReset(&audioq); |
1147 |
PacketQueueReset(&videoq); |
1148 |
} |
1149 |
|
1150 |
int ChunkerPlayerCore_AudioEnded()
|
1151 |
{ |
1152 |
return (audioq.nb_packets==0 && audioq.last_frame_extracted>0); |
1153 |
} |
1154 |
|
1155 |
void ChunkerPlayerCore_ResetAVQueues()
|
1156 |
{ |
1157 |
#ifdef DEBUG_QUEUE
|
1158 |
printf("QUEUE: MAIN SHOULD RESET\n");
|
1159 |
#endif
|
1160 |
PacketQueueReset(&audioq); |
1161 |
PacketQueueReset(&videoq); |
1162 |
} |
1163 |
|
1164 |
int ChunkerPlayerCore_EnqueueBlocks(const uint8_t *block, const int block_size) |
1165 |
{ |
1166 |
#ifdef EMULATE_CHUNK_LOSS
|
1167 |
static time_t loss_cycle_start_time = 0, now = 0; |
1168 |
static int early_losses = 0; |
1169 |
static int clp_frames = 0; |
1170 |
|
1171 |
if(ScheduledChunkLosses)
|
1172 |
{ |
1173 |
static unsigned int random_threshold; |
1174 |
now=time(NULL);
|
1175 |
if(!loss_cycle_start_time)
|
1176 |
loss_cycle_start_time = now; |
1177 |
|
1178 |
if(((now-loss_cycle_start_time) >= ScheduledChunkLosses[((CurrChunkLossIndex+1)%NScheduledChunkLosses)].Time) && (NScheduledChunkLosses>1 || CurrChunkLossIndex==-1)) |
1179 |
{ |
1180 |
CurrChunkLossIndex = ((CurrChunkLossIndex+1)%NScheduledChunkLosses);
|
1181 |
if(CurrChunkLossIndex == (NScheduledChunkLosses-1)) |
1182 |
loss_cycle_start_time = now; |
1183 |
|
1184 |
if(ScheduledChunkLosses[CurrChunkLossIndex].Value == -1) |
1185 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].MinValue + (rand() % (ScheduledChunkLosses[CurrChunkLossIndex].MaxValue-ScheduledChunkLosses[CurrChunkLossIndex].MinValue)); |
1186 |
else
|
1187 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].Value; |
1188 |
|
1189 |
printf("new ScheduledChunkLoss, time: %d, value: %d\n", (int)ScheduledChunkLosses[CurrChunkLossIndex].Time, random_threshold); |
1190 |
} |
1191 |
|
1192 |
if(clp_frames > 0) |
1193 |
{ |
1194 |
clp_frames--; |
1195 |
return PLAYER_FAIL_RETURN;
|
1196 |
} |
1197 |
if((rand() % 100) < random_threshold) |
1198 |
{ |
1199 |
if(early_losses > 0) |
1200 |
early_losses--; |
1201 |
else
|
1202 |
{ |
1203 |
clp_frames=early_losses=(ScheduledChunkLosses[CurrChunkLossIndex].Burstiness-1);
|
1204 |
return PLAYER_FAIL_RETURN;
|
1205 |
} |
1206 |
} |
1207 |
} |
1208 |
#endif
|
1209 |
|
1210 |
Chunk *gchunk = NULL;
|
1211 |
int decoded_size = -1; |
1212 |
uint8_t *tempdata, *buffer; |
1213 |
int j;
|
1214 |
Frame *frame = NULL;
|
1215 |
AVPacket packet, packetaudio; |
1216 |
|
1217 |
uint16_t *audio_bufQ = NULL;
|
1218 |
|
1219 |
//the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
|
1220 |
static int sizeFrameHeader = 5*sizeof(int32_t); |
1221 |
static int ExternalChunk_header_size = 5*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 1*CHUNK_TRANSCODING_INT_SIZE*2; |
1222 |
|
1223 |
static int chunks_out_of_order = 0; |
1224 |
static int last_chunk_id = -1; |
1225 |
|
1226 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
1227 |
if(!audio_bufQ) {
|
1228 |
printf("Memory error in audio_bufQ!\n");
|
1229 |
return PLAYER_FAIL_RETURN;
|
1230 |
} |
1231 |
|
1232 |
gchunk = (Chunk *)malloc(sizeof(Chunk));
|
1233 |
if(!gchunk) {
|
1234 |
printf("Memory error in gchunk!\n");
|
1235 |
av_free(audio_bufQ); |
1236 |
return PLAYER_FAIL_RETURN;
|
1237 |
} |
1238 |
|
1239 |
decoded_size = decodeChunk(gchunk, block, block_size); |
1240 |
|
1241 |
if(last_chunk_id == -1) |
1242 |
last_chunk_id = gchunk->id; |
1243 |
|
1244 |
if(gchunk->id > (last_chunk_id+1)) { |
1245 |
chunks_out_of_order += gchunk->id - last_chunk_id - 1;
|
1246 |
} |
1247 |
last_chunk_id = gchunk->id; |
1248 |
|
1249 |
#ifdef DEBUG_CHUNKER
|
1250 |
printf("CHUNKER: enqueueBlock: id %d decoded_size %d target size %d - out_of_order %d\n", gchunk->id, decoded_size, GRAPES_ENCODED_CHUNK_HEADER_SIZE + ExternalChunk_header_size + gchunk->size, chunks_out_of_order);
|
1251 |
#endif
|
1252 |
if(decoded_size < 0) { |
1253 |
//HINT here i should differentiate between various return values of the decode
|
1254 |
//in order to free what has been allocated there
|
1255 |
printf("chunk probably corrupted!\n");
|
1256 |
av_free(audio_bufQ); |
1257 |
free(gchunk); |
1258 |
return PLAYER_FAIL_RETURN;
|
1259 |
} |
1260 |
|
1261 |
frame = (Frame *)malloc(sizeof(Frame));
|
1262 |
if(!frame) {
|
1263 |
printf("Memory error in Frame!\n");
|
1264 |
if(gchunk) {
|
1265 |
if(gchunk->attributes) {
|
1266 |
free(gchunk->attributes); |
1267 |
} |
1268 |
free(gchunk); |
1269 |
} |
1270 |
av_free(audio_bufQ); |
1271 |
return PLAYER_FAIL_RETURN;
|
1272 |
} |
1273 |
|
1274 |
tempdata = gchunk->data; //let it point to first frame of payload
|
1275 |
j=gchunk->size; |
1276 |
while(j>0 && !quit) { |
1277 |
frame->number = bit32_encoded_pull(tempdata); |
1278 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1279 |
frame->timestamp.tv_sec = bit32_encoded_pull(tempdata); |
1280 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1281 |
frame->timestamp.tv_usec = bit32_encoded_pull(tempdata); |
1282 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1283 |
frame->size = bit32_encoded_pull(tempdata); |
1284 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1285 |
frame->type = bit32_encoded_pull(tempdata); |
1286 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1287 |
|
1288 |
buffer = tempdata; // here coded frame information
|
1289 |
tempdata += frame->size; //let it point to the next frame
|
1290 |
|
1291 |
if(frame->type < 5) { // video frame |
1292 |
av_init_packet(&packet); |
1293 |
packet.data = buffer;//video_bufQ;
|
1294 |
packet.size = frame->size; |
1295 |
packet.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1296 |
packet.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1297 |
packet.stream_index = frame->number; // use of stream_index for number frame
|
1298 |
//packet.duration = frame->timestamp.tv_sec;
|
1299 |
if(packet.size > 0) |
1300 |
ChunkerPlayerCore_PacketQueuePut(&videoq, &packet); //the _put makes a copy of the packet
|
1301 |
|
1302 |
#ifdef DEBUG_SOURCE
|
1303 |
printf("SOURCE: Insert video in queue pts=%lld %d %d sindex:%d\n",packet.pts,(int)frame->timestamp.tv_sec,(int)frame->timestamp.tv_usec,packet.stream_index); |
1304 |
#endif
|
1305 |
} |
1306 |
else if(frame->type == 5) { // audio frame |
1307 |
av_init_packet(&packetaudio); |
1308 |
packetaudio.data = buffer; |
1309 |
packetaudio.size = frame->size; |
1310 |
packetaudio.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1311 |
packetaudio.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1312 |
//packetaudio.duration = frame->timestamp.tv_sec;
|
1313 |
packetaudio.stream_index = frame->number; // use of stream_index for number frame
|
1314 |
packetaudio.flags = 1;
|
1315 |
packetaudio.pos = -1;
|
1316 |
|
1317 |
//instead of -1, in order to signal it is not decoded yet
|
1318 |
packetaudio.convergence_duration = 0;
|
1319 |
|
1320 |
// insert the audio frame into the queue
|
1321 |
if(packetaudio.size > 0) |
1322 |
ChunkerPlayerCore_PacketQueuePut(&audioq, &packetaudio);//makes a copy of the packet so i can free here
|
1323 |
|
1324 |
#ifdef DEBUG_SOURCE
|
1325 |
printf("SOURCE: Insert audio in queue pts=%lld sindex:%d\n", packetaudio.pts, packetaudio.stream_index);
|
1326 |
#endif
|
1327 |
} |
1328 |
else {
|
1329 |
printf("SOURCE: Unknown frame type %d. Size %d\n", frame->type, frame->size);
|
1330 |
} |
1331 |
if(frame->size > 0) |
1332 |
j = j - sizeFrameHeader - frame->size; |
1333 |
else {
|
1334 |
printf("SOURCE: Corrupt frames (size %d) in chunk. Skipping it...\n", frame->size);
|
1335 |
j = -1;
|
1336 |
} |
1337 |
} |
1338 |
//chunk ingestion terminated!
|
1339 |
if(gchunk) {
|
1340 |
if(gchunk->attributes) {
|
1341 |
free(gchunk->attributes); |
1342 |
} |
1343 |
if(gchunk->data)
|
1344 |
free(gchunk->data); |
1345 |
free(gchunk); |
1346 |
} |
1347 |
if(frame)
|
1348 |
free(frame); |
1349 |
if(audio_bufQ)
|
1350 |
av_free(audio_bufQ); |
1351 |
|
1352 |
return PLAYER_OK_RETURN;
|
1353 |
} |
1354 |
|
1355 |
void ChunkerPlayerCore_SetupOverlay(int width, int height) |
1356 |
{ |
1357 |
// if(!MainScreen && !SilentMode)
|
1358 |
// {
|
1359 |
// printf("Cannot find main screen, exiting...\n");
|
1360 |
// exit(1);
|
1361 |
// }
|
1362 |
|
1363 |
if(SilentMode)
|
1364 |
return;
|
1365 |
|
1366 |
SDL_LockMutex(OverlayMutex); |
1367 |
if(YUVOverlay != NULL) |
1368 |
{ |
1369 |
SDL_FreeYUVOverlay(YUVOverlay); |
1370 |
YUVOverlay = NULL;
|
1371 |
} |
1372 |
|
1373 |
// create video overlay for display of video frames
|
1374 |
// printf("SDL_CreateYUVOverlay(%d, %d, SDL_YV12_OVERLAY, MainScreen)\n", width, height);
|
1375 |
YUVOverlay = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, MainScreen); |
1376 |
// YUVOverlay = SDL_CreateYUVOverlay(OverlayRect.w, OverlayRect.h, SDL_YV12_OVERLAY, MainScreen);
|
1377 |
if ( YUVOverlay == NULL ) |
1378 |
{ |
1379 |
fprintf(stderr,"SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError());
|
1380 |
exit(1);
|
1381 |
} |
1382 |
|
1383 |
if ( YUVOverlay->hw_overlay )
|
1384 |
fprintf(stderr,"SDL: Using hardware overlay.\n");
|
1385 |
// OverlayRect.x = (screen_w - width) / 2;
|
1386 |
|
1387 |
SDL_DisplayYUVOverlay(YUVOverlay, &OverlayRect); |
1388 |
|
1389 |
SDL_UnlockMutex(OverlayMutex); |
1390 |
} |
1391 |
|
1392 |
int CollectStatisticsThread(void *params) |
1393 |
{ |
1394 |
struct timeval last_stats_evaluation, now, last_trace, last_qoe_evaluation;
|
1395 |
gettimeofday(&last_stats_evaluation, NULL);
|
1396 |
last_trace = last_stats_evaluation; |
1397 |
last_qoe_evaluation = last_stats_evaluation; |
1398 |
|
1399 |
double video_qdensity;
|
1400 |
double audio_qdensity;
|
1401 |
char audio_stats_text[255]; |
1402 |
char video_stats_text[255]; |
1403 |
int loss_changed = 0; |
1404 |
int density_changed = 0; |
1405 |
SStats audio_statistics, video_statistics; |
1406 |
double qoe = 0; |
1407 |
int sleep_time = STATS_THREAD_GRANULARITY*1000; |
1408 |
|
1409 |
while(AVPlaying && !quit)
|
1410 |
{ |
1411 |
usleep(sleep_time); |
1412 |
|
1413 |
gettimeofday(&now, NULL);
|
1414 |
|
1415 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_stats_evaluation.tv_sec*1000)+(last_stats_evaluation.tv_usec/1000))) > GUI_PRINTSTATS_INTERVAL) |
1416 |
{ |
1417 |
// estimate audio queue stats
|
1418 |
int audio_stats_changed = ChunkerPlayerStats_GetStats(&(audioq.PacketHistory), &audio_statistics);
|
1419 |
|
1420 |
// estimate video queue stats
|
1421 |
int video_stats_changed = ChunkerPlayerStats_GetStats(&(videoq.PacketHistory), &video_statistics);
|
1422 |
|
1423 |
#ifdef DEBUG_STATS
|
1424 |
printf("VIDEO: %d Kbit/sec; ", video_statistics.Bitrate);
|
1425 |
printf("AUDIO: %d Kbit/sec\n", audio_statistics.Bitrate);
|
1426 |
#endif
|
1427 |
|
1428 |
// QUEUE DENSITY EVALUATION
|
1429 |
if((audioq.last_pkt != NULL) && (audioq.first_pkt != NULL)) |
1430 |
if(audioq.last_pkt->pkt.stream_index >= audioq.first_pkt->pkt.stream_index)
|
1431 |
{ |
1432 |
//plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1433 |
audio_qdensity = (double)audioq.nb_packets / (double)(audioq.last_pkt->pkt.stream_index - audioq.first_pkt->pkt.stream_index + 1) * 100.0; |
1434 |
} |
1435 |
|
1436 |
if((videoq.last_pkt != NULL) && (videoq.first_pkt != NULL)) |
1437 |
if(videoq.last_pkt->pkt.stream_index >= videoq.first_pkt->pkt.stream_index)
|
1438 |
{ |
1439 |
// plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1440 |
video_qdensity = (double)videoq.nb_packets / (double)(videoq.last_pkt->pkt.stream_index - videoq.first_pkt->pkt.stream_index + 1) * 100.0; |
1441 |
} |
1442 |
|
1443 |
if(LogTraces)
|
1444 |
{ |
1445 |
ChunkerPlayerStats_PrintHistoryTrace(&(audioq.PacketHistory), AudioTraceFilename); |
1446 |
ChunkerPlayerStats_PrintHistoryTrace(&(videoq.PacketHistory), VideoTraceFilename); |
1447 |
|
1448 |
//if(SilentMode != 1 && SilentMode != 2)
|
1449 |
ChunkerPlayerStats_PrintContextFile(); |
1450 |
} |
1451 |
|
1452 |
// PRINT STATISTICS ON GUI
|
1453 |
if(!Audio_ON)
|
1454 |
sprintf(audio_stats_text, "AUDIO MUTED");
|
1455 |
else if(audio_stats_changed) |
1456 |
sprintf(audio_stats_text, "[AUDIO] qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)", (int)audio_qdensity, (int)audio_statistics.Lossrate, audioq.PacketHistory.LostCount, audio_statistics.Skiprate, audioq.PacketHistory.SkipCount); |
1457 |
else
|
1458 |
sprintf(audio_stats_text, "waiting for incoming audio packets...");
|
1459 |
|
1460 |
if(video_stats_changed)
|
1461 |
{ |
1462 |
char est_psnr_string[255]; |
1463 |
sprintf(est_psnr_string, "");
|
1464 |
if(qoe)
|
1465 |
{ |
1466 |
sprintf(est_psnr_string, " - Est. Mean PSNR: %.1f db", (float)qoe); |
1467 |
#ifdef PSNR_PUBLICATION
|
1468 |
// Publish measure into repository
|
1469 |
if(RepoAddress[0]!='\0') |
1470 |
{ |
1471 |
MeasurementRecord r; |
1472 |
r.originator = NetworkID; |
1473 |
r.targetA = NetworkID; |
1474 |
r.targetB = NULL;
|
1475 |
r.published_name = "PSNR_MEAN";
|
1476 |
r.value = qoe; |
1477 |
r.string_value = NULL;
|
1478 |
r.channel = Channels[SelectedChannel].Title; |
1479 |
gettimeofday(&(r.timestamp), NULL);
|
1480 |
// One update every REPO_UPDATE_INTERVALL seconds
|
1481 |
struct timeval ElapsedTime;
|
1482 |
timeval_subtract(&(r.timestamp),&LastTimeRepoPublish,&ElapsedTime); |
1483 |
if(ElapsedTime.tv_sec>=PSNR_REPO_UPDATE_INTERVALL)
|
1484 |
{ |
1485 |
LastTimeRepoPublish=r.timestamp; |
1486 |
if(repPublish(repoclient,NULL,NULL,&r)!=NULL) { |
1487 |
#ifdef DEBUG_PSNR
|
1488 |
printf("PSNR publish: %s %e %s\n",r.originator,qoe,r.channel);
|
1489 |
#endif
|
1490 |
} |
1491 |
} |
1492 |
} |
1493 |
#endif
|
1494 |
} |
1495 |
|
1496 |
sprintf(video_stats_text, "[VIDEO] qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)%s", (int)video_qdensity, video_statistics.Lossrate, videoq.PacketHistory.LostCount, video_statistics.Skiprate, videoq.PacketHistory.SkipCount, est_psnr_string); |
1497 |
} |
1498 |
else
|
1499 |
sprintf(video_stats_text, "waiting for incoming video packets...");
|
1500 |
|
1501 |
if(qoe)
|
1502 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,(qoe>LED_THRS_YELLOW?LED_GREEN:((qoe<=LED_THRS_YELLOW && qoe>LED_THRS_RED)?LED_YELLOW:LED_RED))); |
1503 |
else
|
1504 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,LED_GREEN); |
1505 |
|
1506 |
|
1507 |
last_stats_evaluation = now; |
1508 |
} |
1509 |
|
1510 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_qoe_evaluation.tv_sec*1000)+(last_qoe_evaluation.tv_usec/1000))) > EVAL_QOE_INTERVAL) |
1511 |
{ |
1512 |
// ESTIMATE QoE
|
1513 |
ChunkerPlayerStats_GetMeanVideoQuality(&(videoq.PacketHistory), &qoe); |
1514 |
|
1515 |
#ifdef DEBUG_STATS
|
1516 |
printf("QoE index: %f\n", (float) qoe); |
1517 |
#endif
|
1518 |
last_qoe_evaluation = now; |
1519 |
} |
1520 |
} |
1521 |
} |