chunker-player / chunker_player / player_core.c @ f77cdd7e
History | View | Annotate | Download (49.4 KB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
|
3 |
* Copyright (c) 2010-2011 Csaba Kiraly
|
4 |
* developed for the Napa-Wine EU project. See www.napa-wine.eu
|
5 |
*
|
6 |
* This is free software; see lgpl-2.1.txt
|
7 |
*/
|
8 |
|
9 |
#include <libavcodec/avcodec.h> |
10 |
#include <libavformat/avformat.h> |
11 |
#include <libswscale/swscale.h> |
12 |
|
13 |
#include <stdio.h> |
14 |
#include <unistd.h> |
15 |
#include "external_chunk_transcoding.h" |
16 |
#include "frame.h" |
17 |
#include <SDL.h> |
18 |
#include <SDL_thread.h> |
19 |
#include <SDL_mutex.h> |
20 |
// #include <SDL_ttf.h>
|
21 |
// #include <SDL_image.h>
|
22 |
#include <SDL_video.h> |
23 |
#include <assert.h> |
24 |
#include <time.h> |
25 |
|
26 |
#include "player_stats.h" |
27 |
#include "player_defines.h" |
28 |
#include "chunker_player.h" |
29 |
#include "player_gui.h" |
30 |
#include "player_core.h" |
31 |
#include "player_stats.h" |
32 |
|
33 |
#define MAX(A,B) ((A)>(B) ? (A) : (B))
|
34 |
#define MIN(A,B) ((A)<(B) ? (A) : (B))
|
35 |
|
36 |
SDL_Overlay *YUVOverlay; |
37 |
|
38 |
typedef struct PacketQueue { |
39 |
AVPacketList *first_pkt; |
40 |
AVPacket *minpts_pkt; |
41 |
AVPacketList *last_pkt; |
42 |
int nb_packets;
|
43 |
int size;
|
44 |
SDL_mutex *mutex; |
45 |
short int queueType; |
46 |
int last_frame_extracted; //HINT THIS SHOULD BE MORE THAN 4 BYTES |
47 |
//total frames lost, as seen from the queue, since last queue init
|
48 |
int total_lost_frames;
|
49 |
long cumulative_bitrate;
|
50 |
long cumulative_samples;
|
51 |
|
52 |
SHistory PacketHistory; |
53 |
|
54 |
double density;
|
55 |
char stats_message[255]; |
56 |
} PacketQueue; |
57 |
|
58 |
AVCodecContext *aCodecCtx; |
59 |
SDL_Thread *video_thread; |
60 |
SDL_Thread *stats_thread; |
61 |
uint8_t *outbuf_audio; |
62 |
// short int QueueFillingMode=1;
|
63 |
short int QueueStopped; |
64 |
ThreadVal VideoCallbackThreadParams; |
65 |
|
66 |
int AudioQueueOffset;
|
67 |
PacketQueue audioq; |
68 |
PacketQueue videoq; |
69 |
AVPacket AudioPkt, VideoPkt; |
70 |
int AVPlaying;
|
71 |
int CurrentAudioFreq;
|
72 |
int CurrentAudioSamples;
|
73 |
uint8_t CurrentAudioSilence; |
74 |
|
75 |
int GotSigInt;
|
76 |
|
77 |
long long DeltaTime; |
78 |
short int FirstTimeAudio, FirstTime; |
79 |
|
80 |
int dimAudioQ;
|
81 |
float deltaAudioQ;
|
82 |
float deltaAudioQError;
|
83 |
|
84 |
int SaveYUV;
|
85 |
char YUVFileName[256]; |
86 |
int SaveLoss;
|
87 |
|
88 |
char VideoFrameLossRateLogFilename[256]; |
89 |
char VideoFrameSkipRateLogFilename[256]; |
90 |
|
91 |
long int decoded_vframes; |
92 |
long int LastSavedVFrame; |
93 |
|
94 |
void SaveFrame(AVFrame *pFrame, int width, int height); |
95 |
int VideoCallback(void *valthread); |
96 |
int CollectStatisticsThread(void *params); |
97 |
void AudioCallback(void *userdata, Uint8 *stream, int len); |
98 |
void PacketQueueClearStats(PacketQueue *q);
|
99 |
|
100 |
//int lastCheckedVideoFrame = -1;
|
101 |
long int last_video_frame_extracted = -1; |
102 |
|
103 |
int timeval_subtract(struct timeval* x, struct timeval* y, struct timeval* result) |
104 |
{ |
105 |
// Perform the carry for the later subtraction by updating y.
|
106 |
if (x->tv_usec < y->tv_usec)
|
107 |
{ |
108 |
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; |
109 |
y->tv_usec -= 1000000 * nsec;
|
110 |
y->tv_sec += nsec; |
111 |
} |
112 |
if (x->tv_usec - y->tv_usec > 1000000) |
113 |
{ |
114 |
int nsec = (x->tv_usec - y->tv_usec) / 1000000; |
115 |
y->tv_usec += 1000000 * nsec;
|
116 |
y->tv_sec -= nsec; |
117 |
} |
118 |
|
119 |
// Compute the time remaining to wait. tv_usec is certainly positive.
|
120 |
result->tv_sec = x->tv_sec - y->tv_sec; |
121 |
result->tv_usec = x->tv_usec - y->tv_usec; |
122 |
|
123 |
// Return 1 if result is negative.
|
124 |
return x->tv_sec < y->tv_sec;
|
125 |
} |
126 |
|
127 |
|
128 |
void PacketQueueInit(PacketQueue *q, short int Type) |
129 |
{ |
130 |
#ifdef DEBUG_QUEUE
|
131 |
printf("QUEUE: INIT BEGIN: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
132 |
#endif
|
133 |
memset(q,0,sizeof(PacketQueue)); |
134 |
q->mutex = SDL_CreateMutex(); |
135 |
QueueFillingMode=1;
|
136 |
q->queueType=Type; |
137 |
q->last_frame_extracted = -1;
|
138 |
q->first_pkt= NULL;
|
139 |
q->minpts_pkt= NULL;
|
140 |
//q->last_pkt = NULL;
|
141 |
q->nb_packets = 0;
|
142 |
q->size = 0;
|
143 |
q->density= 0.0; |
144 |
FirstTime = 1;
|
145 |
FirstTimeAudio = 1;
|
146 |
//init up statistics
|
147 |
|
148 |
q->PacketHistory.Mutex = SDL_CreateMutex(); |
149 |
PacketQueueClearStats(q); |
150 |
|
151 |
#ifdef DEBUG_QUEUE
|
152 |
printf("QUEUE: INIT END: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
153 |
#endif
|
154 |
} |
155 |
|
156 |
void PacketQueueReset(PacketQueue *q)
|
157 |
{ |
158 |
AVPacketList *tmp,*tmp1; |
159 |
#ifdef DEBUG_QUEUE
|
160 |
printf("QUEUE: RESET BEGIN: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
161 |
#endif
|
162 |
SDL_LockMutex(q->mutex); |
163 |
|
164 |
tmp = q->first_pkt; |
165 |
while(tmp) {
|
166 |
tmp1 = tmp; |
167 |
tmp = tmp->next; |
168 |
av_free_packet(&(tmp1->pkt)); |
169 |
av_free(tmp1); |
170 |
#ifdef DEBUG_QUEUE
|
171 |
printf("F ");
|
172 |
#endif
|
173 |
q->PacketHistory.LostCount++; |
174 |
} |
175 |
#ifdef DEBUG_QUEUE
|
176 |
printf("\n");
|
177 |
#endif
|
178 |
|
179 |
QueueFillingMode=1;
|
180 |
q->last_frame_extracted = -1;
|
181 |
|
182 |
// on queue reset do not reset loss count
|
183 |
// (loss count reset is done on queue init, ie channel switch)
|
184 |
q->density=0.0; |
185 |
q->first_pkt= NULL;
|
186 |
q->minpts_pkt= NULL;
|
187 |
//q->last_pkt = NULL;
|
188 |
q->nb_packets = 0;
|
189 |
q->size = 0;
|
190 |
FirstTime = 1;
|
191 |
FirstTimeAudio = 1;
|
192 |
//clean up statistics
|
193 |
PacketQueueClearStats(q); |
194 |
#ifdef DEBUG_QUEUE
|
195 |
printf("QUEUE: RESET END: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
196 |
#endif
|
197 |
SDL_UnlockMutex(q->mutex); |
198 |
} |
199 |
|
200 |
void PacketQueueClearStats(PacketQueue *q)
|
201 |
{ |
202 |
sprintf(q->stats_message, "%s", "\n"); |
203 |
int i;
|
204 |
memset((void*)q->PacketHistory.History, 0, sizeof(SHistoryElement)*QUEUE_HISTORY_SIZE); |
205 |
for(i=0; i<QUEUE_HISTORY_SIZE; i++) |
206 |
{ |
207 |
q->PacketHistory.History[i].Statistics.LastIFrameDistance = -1;
|
208 |
q->PacketHistory.History[i].Status = -1;
|
209 |
} |
210 |
q->PacketHistory.Index = q->PacketHistory.LogIndex = 0;
|
211 |
q->PacketHistory.Index = q->PacketHistory.QoEIndex = 0;
|
212 |
q->PacketHistory.LostCount = q->PacketHistory.PlayedCount = q->PacketHistory.SkipCount = 0;
|
213 |
} |
214 |
|
215 |
int ChunkerPlayerCore_PacketQueuePut(PacketQueue *q, AVPacket *pkt)
|
216 |
{ |
217 |
//~ printf("\tSTREAM_INDEX=%d\n", pkt->stream_index);
|
218 |
short int skip = 0; |
219 |
AVPacketList *pkt1, *tmp, *prevtmp; |
220 |
int res = 0; |
221 |
|
222 |
if(q->nb_packets > queue_filling_threshold*QUEUE_MAX_GROW_FACTOR) {
|
223 |
#ifdef DEBUG_QUEUE
|
224 |
printf("QUEUE: PUT i have TOO MANY packets %d Type=%s, RESETTING\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
225 |
#endif
|
226 |
PacketQueueReset(q); |
227 |
} |
228 |
|
229 |
//make a copy of the incoming packet
|
230 |
if(av_dup_packet(pkt) < 0) { |
231 |
#ifdef DEBUG_QUEUE
|
232 |
printf("QUEUE: PUT in Queue cannot duplicate in packet : NPackets=%d Type=%s\n",q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
233 |
#endif
|
234 |
return -1; |
235 |
} |
236 |
pkt1 = av_malloc(sizeof(AVPacketList));
|
237 |
|
238 |
if(!pkt1) {
|
239 |
av_free_packet(pkt); |
240 |
return -1; |
241 |
} |
242 |
pkt1->pkt = *pkt; |
243 |
pkt1->next = NULL;
|
244 |
|
245 |
static time_t last_auto_switch = 0; |
246 |
|
247 |
if(
|
248 |
(pkt->stream_index < last_video_frame_extracted) |
249 |
&& (pkt->stream_index <= RESTART_FRAME_NUMBER_THRESHOLD) |
250 |
&& ((time(NULL) - last_auto_switch) > 10) |
251 |
) |
252 |
{ |
253 |
printf("file streaming loop detected => re-tune channel and start grabbing statistics\n");
|
254 |
last_auto_switch = time(NULL);
|
255 |
SDL_LockMutex(q->mutex); |
256 |
ReTune(&(Channels[SelectedChannel])); |
257 |
SDL_UnlockMutex(q->mutex); |
258 |
} |
259 |
|
260 |
else
|
261 |
{ |
262 |
SDL_LockMutex(q->mutex); |
263 |
|
264 |
// INSERTION SORT ALGORITHM
|
265 |
// before inserting pkt, check if pkt.stream_index is <= current_extracted_frame.
|
266 |
if(pkt->stream_index > q->last_frame_extracted)
|
267 |
{ |
268 |
// either checking starting from the first_pkt or needed other struct like AVPacketList with next and prev....
|
269 |
//if (!q->last_pkt)
|
270 |
if(!q->first_pkt) {
|
271 |
q->first_pkt = pkt1; |
272 |
q->last_pkt = pkt1; |
273 |
} |
274 |
else if(pkt->stream_index < q->first_pkt->pkt.stream_index) { |
275 |
//the packet that has arrived is earlier than the first we got some time ago!
|
276 |
//we need to put it at the head of the queue
|
277 |
pkt1->next = q->first_pkt; |
278 |
q->first_pkt = pkt1; |
279 |
} |
280 |
else {
|
281 |
tmp = q->first_pkt; |
282 |
while(tmp->pkt.stream_index < pkt->stream_index) {
|
283 |
prevtmp = tmp; |
284 |
tmp = tmp->next; |
285 |
|
286 |
if(!tmp) {
|
287 |
break;
|
288 |
} |
289 |
} |
290 |
if(tmp && tmp->pkt.stream_index == pkt->stream_index) {
|
291 |
//we already have a frame with that index
|
292 |
skip = 1;
|
293 |
#ifdef DEBUG_QUEUE
|
294 |
printf("%s QUEUE: PUT: we already have frame with index %d, skipping\n", ((q->queueType == AUDIO) ? "AUDIO" : "VIDEO"), pkt->stream_index); |
295 |
#endif
|
296 |
} |
297 |
else {
|
298 |
prevtmp->next = pkt1; |
299 |
pkt1->next = tmp; |
300 |
if(pkt1->next == NULL) |
301 |
q->last_pkt = pkt1; |
302 |
} |
303 |
//q->last_pkt->next = pkt1; // It was uncommented when not insertion sort
|
304 |
} |
305 |
if(skip == 0) { |
306 |
//q->last_pkt = pkt1;
|
307 |
q->nb_packets++; |
308 |
q->size += pkt1->pkt.size; |
309 |
if(q->nb_packets>=queue_filling_threshold && QueueFillingMode) // && q->queueType==AUDIO) |
310 |
{ |
311 |
QueueFillingMode=0;
|
312 |
#ifdef DEBUG_QUEUE
|
313 |
printf("QUEUE: PUT: FillingMode set to zero\n");
|
314 |
#endif
|
315 |
} |
316 |
//set min
|
317 |
if (!q->minpts_pkt || (pkt1->pkt.pts < q->minpts_pkt->pts)) {
|
318 |
q->minpts_pkt = &(pkt1->pkt); |
319 |
} |
320 |
} |
321 |
} |
322 |
else {
|
323 |
av_free_packet(&pkt1->pkt); |
324 |
av_free(pkt1); |
325 |
#ifdef DEBUG_QUEUE
|
326 |
printf("QUEUE: PUT: NOT inserting because index %d <= last extracted %d\n", pkt->stream_index, q->last_frame_extracted);
|
327 |
#endif
|
328 |
res = 1;
|
329 |
} |
330 |
SDL_UnlockMutex(q->mutex); |
331 |
} |
332 |
|
333 |
return res;
|
334 |
} |
335 |
|
336 |
int OpenACodec (char *audio_codec, int sample_rate, short int audio_channels) |
337 |
{ |
338 |
AVCodec *aCodec; |
339 |
|
340 |
aCodecCtx = avcodec_alloc_context(); |
341 |
//aCodecCtx->bit_rate = 64000;
|
342 |
aCodecCtx->sample_rate = sample_rate; |
343 |
aCodecCtx->channels = audio_channels; |
344 |
aCodec = avcodec_find_decoder_by_name(audio_codec); |
345 |
if(!aCodec) {
|
346 |
printf("Codec not found!\n");
|
347 |
return -1; |
348 |
} |
349 |
if(avcodec_open(aCodecCtx, aCodec)<0) { |
350 |
fprintf(stderr, "could not open codec\n");
|
351 |
return -1; // Could not open codec |
352 |
} |
353 |
printf("using audio Codecid: %d ",aCodecCtx->codec_id);
|
354 |
printf("samplerate: %d ",aCodecCtx->sample_rate);
|
355 |
printf("channels: %d\n",aCodecCtx->channels);
|
356 |
|
357 |
return 1; |
358 |
} |
359 |
|
360 |
int OpenAudio(AVCodecContext *aCodecCtx)
|
361 |
{ |
362 |
SDL_AudioSpec *wanted_spec; |
363 |
static SDL_AudioSpec *wanted_spec_old = NULL; |
364 |
|
365 |
if (! (wanted_spec = malloc(sizeof(*wanted_spec)))) { |
366 |
perror("error initializing audio");
|
367 |
return -1; |
368 |
} |
369 |
wanted_spec->freq = aCodecCtx->sample_rate; |
370 |
wanted_spec->format = AUDIO_S16SYS; |
371 |
wanted_spec->channels = aCodecCtx->channels; |
372 |
wanted_spec->silence = 0;
|
373 |
wanted_spec->samples = SDL_AUDIO_BUFFER_SIZE; |
374 |
wanted_spec->callback = AudioCallback; |
375 |
wanted_spec->userdata = aCodecCtx; |
376 |
|
377 |
#ifdef DEBUG_AUDIO
|
378 |
printf("wanted freq:%d\n",wanted_spec->freq);
|
379 |
printf("wanted format:%d\n",wanted_spec->format);
|
380 |
printf("wanted channels:%d\n",wanted_spec->channels);
|
381 |
printf("wanted silence:%d\n",wanted_spec->silence);
|
382 |
printf("wanted samples:%d\n",wanted_spec->samples);
|
383 |
#endif
|
384 |
|
385 |
if (wanted_spec_old &&
|
386 |
(wanted_spec->freq == wanted_spec_old->freq) && |
387 |
(wanted_spec->channels == wanted_spec_old->channels)) { //do not reinit audio if the wanted specification is the same as before
|
388 |
return 1; |
389 |
} |
390 |
|
391 |
if(wanted_spec_old) {
|
392 |
SDL_CloseAudio(); |
393 |
} |
394 |
|
395 |
if (! (wanted_spec_old = malloc(sizeof(*wanted_spec_old)))) { |
396 |
perror("error initializing audio");
|
397 |
return -1; |
398 |
} |
399 |
memcpy(wanted_spec_old, wanted_spec, sizeof(*wanted_spec));
|
400 |
|
401 |
if (SDL_OpenAudio(wanted_spec,NULL)<0) { |
402 |
fprintf(stderr,"SDL_OpenAudio: %s\n", SDL_GetError());
|
403 |
return -1; |
404 |
} |
405 |
|
406 |
CurrentAudioFreq = wanted_spec->freq; |
407 |
CurrentAudioSamples = wanted_spec->samples; |
408 |
dimAudioQ = wanted_spec->size; |
409 |
deltaAudioQ = (float)((float)wanted_spec->samples)*1000/wanted_spec->freq; //in ms |
410 |
CurrentAudioSilence = wanted_spec->silence; |
411 |
|
412 |
#ifdef DEBUG_AUDIO
|
413 |
printf("freq:%d\n",wanted_spec->freq);
|
414 |
printf("format:%d\n",wanted_spec->format);
|
415 |
printf("channels:%d\n",wanted_spec->channels);
|
416 |
printf("silence:%d\n",wanted_spec->silence);
|
417 |
printf("samples:%d\n",wanted_spec->samples);
|
418 |
printf("size:%d\n",wanted_spec->size);
|
419 |
printf("deltaAudioQ: %f\n",deltaAudioQ);
|
420 |
#endif
|
421 |
|
422 |
return 1; |
423 |
} |
424 |
|
425 |
int ChunkerPlayerCore_InitAudioCodecs(char *audio_codec, int sample_rate, short int audio_channels) |
426 |
{ |
427 |
// some initializations
|
428 |
QueueStopped = 0;
|
429 |
AudioQueueOffset=0;
|
430 |
AVPlaying = 0;
|
431 |
GotSigInt = 0;
|
432 |
FirstTimeAudio=1;
|
433 |
FirstTime = 1;
|
434 |
deltaAudioQError=0;
|
435 |
|
436 |
|
437 |
if (OpenACodec(audio_codec, sample_rate, audio_channels) < 0) { |
438 |
return -1; |
439 |
} |
440 |
|
441 |
if (OpenAudio(aCodecCtx) < 1) { |
442 |
return -1; |
443 |
} |
444 |
|
445 |
outbuf_audio = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
446 |
|
447 |
//initialize the audio queue
|
448 |
PacketQueueInit(&audioq, AUDIO); |
449 |
|
450 |
// Init audio buffers
|
451 |
av_init_packet(&AudioPkt); |
452 |
//printf("AVCODEC_MAX_AUDIO_FRAME_SIZE=%d\n", AVCODEC_MAX_AUDIO_FRAME_SIZE);
|
453 |
AudioPkt.data=(uint8_t *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
454 |
if(!AudioPkt.data) return -1; |
455 |
|
456 |
return 0; |
457 |
} |
458 |
|
459 |
int ChunkerPlayerCore_InitVideoCodecs(char *v_codec, int width, int height) |
460 |
{ |
461 |
|
462 |
memset(&VideoCallbackThreadParams, 0, sizeof(ThreadVal)); |
463 |
|
464 |
VideoCallbackThreadParams.width = width; |
465 |
VideoCallbackThreadParams.height = height; |
466 |
VideoCallbackThreadParams.video_codec = strdup(v_codec); |
467 |
|
468 |
//initialize the video queue
|
469 |
PacketQueueInit(&videoq, VIDEO); |
470 |
|
471 |
// Init video buffers
|
472 |
av_init_packet(&VideoPkt); |
473 |
|
474 |
VideoPkt.data=(uint8_t *)malloc(width*height*3/2); |
475 |
if(!VideoPkt.data) return -1; |
476 |
|
477 |
return 0; |
478 |
} |
479 |
|
480 |
int ChunkerPlayerCore_InitCodecs(char *v_codec, int width, int height, char *audio_codec, int sample_rate, short int audio_channels) |
481 |
{ |
482 |
char audio_stats[255], video_stats[255]; |
483 |
|
484 |
// Register all formats and codecs
|
485 |
avcodec_init(); |
486 |
av_register_all(); |
487 |
|
488 |
if (ChunkerPlayerCore_InitAudioCodecs(audio_codec, sample_rate, audio_channels) < 0) { |
489 |
return -1; |
490 |
} |
491 |
|
492 |
if (ChunkerPlayerCore_InitVideoCodecs(v_codec, width, height) < 0) { |
493 |
return -1; |
494 |
} |
495 |
|
496 |
sprintf(audio_stats, "waiting for incoming audio packets...");
|
497 |
sprintf(video_stats, "waiting for incoming video packets...");
|
498 |
ChunkerPlayerGUI_SetStatsText(audio_stats, video_stats,qoe_led ? LED_GREEN : LED_NONE); |
499 |
} |
500 |
|
501 |
int DecodeEnqueuedAudio(AVPacket *pkt, PacketQueue *q, int* size) |
502 |
{ |
503 |
uint16_t *audio_bufQ = NULL;
|
504 |
int16_t *dataQ = NULL;
|
505 |
int data_sizeQ = AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
506 |
int lenQ;
|
507 |
int ret = 0; |
508 |
|
509 |
//set the flag to decoded anyway
|
510 |
pkt->convergence_duration = -1;
|
511 |
|
512 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
513 |
if(audio_bufQ) {
|
514 |
#ifdef DEBUG_AUDIO_BUFFER
|
515 |
printf("AUDIO_BUFFER: about to decode packet %d, size %d, data %d\n", pkt->stream_index, pkt->size, pkt->data);
|
516 |
#endif
|
517 |
//decode the packet data
|
518 |
lenQ = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_bufQ, &data_sizeQ, pkt); |
519 |
if(lenQ > 0) { |
520 |
dataQ = (int16_t *)av_malloc(data_sizeQ); //this will be free later at the time of playback
|
521 |
if(dataQ) {
|
522 |
memcpy(dataQ, audio_bufQ, data_sizeQ); |
523 |
if(pkt->data != NULL) |
524 |
{ |
525 |
//discard the old encoded bytes
|
526 |
av_free(pkt->data); |
527 |
} |
528 |
//subtract them from queue size
|
529 |
q->size -= pkt->size; |
530 |
*size = pkt->size; |
531 |
pkt->data = (uint8_t *)dataQ; |
532 |
pkt->size = data_sizeQ; |
533 |
//add new size to queue size
|
534 |
q->size += pkt->size; |
535 |
ret = 1;
|
536 |
} |
537 |
else {
|
538 |
#ifdef DEBUG_AUDIO_BUFFER
|
539 |
printf("AUDIO_BUFFER: cannot alloc space for decoded packet %d\n", pkt->stream_index);
|
540 |
#endif
|
541 |
} |
542 |
} |
543 |
else {
|
544 |
#ifdef DEBUG_AUDIO_BUFFER
|
545 |
printf("AUDIO_BUFFER: cannot decode packet %d\n", pkt->stream_index);
|
546 |
#endif
|
547 |
} |
548 |
av_free(audio_bufQ); |
549 |
} |
550 |
else {
|
551 |
#ifdef DEBUG_AUDIO_BUFFER
|
552 |
printf("AUDIO_BUFFER: cannot alloc decode buffer for packet %d\n", pkt->stream_index);
|
553 |
#endif
|
554 |
} |
555 |
return ret; //problems occurred |
556 |
} |
557 |
|
558 |
/**
|
559 |
* removes a packet from the list and returns the next
|
560 |
* */
|
561 |
AVPacketList *RemoveFromQueue(PacketQueue *q, AVPacketList *p) |
562 |
{ |
563 |
AVPacketList *p1; |
564 |
|
565 |
if (q->first_pkt == p) {
|
566 |
q->first_pkt = p->next; |
567 |
} |
568 |
if (&(p->pkt) == q->minpts_pkt) {
|
569 |
q->minpts_pkt = NULL;
|
570 |
} |
571 |
|
572 |
AVPacketList *retpk = p->next; |
573 |
q->nb_packets--; |
574 |
//adjust size here and not in the various cases of the dequeue
|
575 |
q->size -= p->pkt.size; |
576 |
if(&p->pkt)
|
577 |
{ |
578 |
av_free_packet(&p->pkt); |
579 |
} |
580 |
if(p) {
|
581 |
av_free(p); |
582 |
} |
583 |
|
584 |
//updating min info
|
585 |
for (p1 = q->first_pkt; p1; p1 = p1->next) {
|
586 |
if (!q->minpts_pkt || p1->pkt.pts < q->minpts_pkt->pts) {
|
587 |
q->minpts_pkt = &(p1->pkt); |
588 |
} |
589 |
} |
590 |
|
591 |
return retpk;
|
592 |
} |
593 |
|
594 |
AVPacketList *SeekAndDecodePacketStartingFrom(AVPacketList *p, PacketQueue *q, int* size)
|
595 |
{ |
596 |
while(p) {
|
597 |
//check if audio packet has been already decoded
|
598 |
if(p->pkt.convergence_duration == 0) { |
599 |
//not decoded yet, try to decode it
|
600 |
if( !DecodeEnqueuedAudio(&(p->pkt), q, size) ) {
|
601 |
//it was not possible to decode this packet, return next one
|
602 |
p = RemoveFromQueue(q, p); |
603 |
} |
604 |
else
|
605 |
return p;
|
606 |
} |
607 |
else
|
608 |
return p;
|
609 |
} |
610 |
return NULL; |
611 |
} |
612 |
|
613 |
int PacketQueueGet(PacketQueue *q, AVPacket *pkt, short int av, int* size) |
614 |
{ |
615 |
//AVPacket tmp;
|
616 |
AVPacketList *pkt1 = NULL;
|
617 |
int ret=-1; |
618 |
int SizeToCopy=0; |
619 |
int reqsize;
|
620 |
|
621 |
SDL_LockMutex(q->mutex); |
622 |
|
623 |
#ifdef DEBUG_QUEUE
|
624 |
printf("QUEUE: Get NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
625 |
#endif
|
626 |
|
627 |
if((q->queueType==AUDIO && QueueFillingMode) || QueueStopped)
|
628 |
{ |
629 |
SDL_UnlockMutex(q->mutex); |
630 |
return -1; |
631 |
} |
632 |
|
633 |
if(av==1) { //somebody requested an audio packet, q is the audio queue |
634 |
reqsize = dimAudioQ; //TODO pass this as parameter, not garanteed by SDL to be exactly dimAudioQ
|
635 |
pkt->size = 0;
|
636 |
pkt->dts = 0;
|
637 |
pkt->pts = 0;
|
638 |
//try to dequeue the first packet of the audio queue
|
639 |
pkt1 = q->first_pkt; |
640 |
while (pkt->size < reqsize && pkt1 && SeekAndDecodePacketStartingFrom(pkt1, q, size)) {
|
641 |
AVPacketList *next = pkt1->next; //save it here since we could delete pkt1 later
|
642 |
if (!pkt->dts) pkt->dts = pkt1->pkt.dts;
|
643 |
if (!pkt->pts) pkt->pts = pkt1->pkt.pts;
|
644 |
pkt->stream_index = pkt1->pkt.stream_index; |
645 |
pkt->flags = 1;
|
646 |
pkt->pos = -1;
|
647 |
pkt->convergence_duration = -1;
|
648 |
if (pkt1->pkt.size - AudioQueueOffset <= reqsize - pkt->size) { //we need the whole packet |
649 |
SizeToCopy = pkt1->pkt.size - AudioQueueOffset; //packet might be partial
|
650 |
memcpy(pkt->data + pkt->size, pkt1->pkt.data + AudioQueueOffset, SizeToCopy); |
651 |
pkt->size += SizeToCopy; |
652 |
AudioQueueOffset = 0;
|
653 |
RemoveFromQueue(q, pkt1); |
654 |
} else {
|
655 |
SizeToCopy = reqsize - pkt->size; //partial packet remains
|
656 |
memcpy(pkt->data + pkt->size, pkt1->pkt.data + AudioQueueOffset, SizeToCopy); |
657 |
pkt->size += SizeToCopy; |
658 |
AudioQueueOffset += SizeToCopy; |
659 |
pkt1->pkt.dts += SizeToCopy/(dimAudioQ/CurrentAudioSamples)/(CurrentAudioFreq/1000);
|
660 |
pkt1->pkt.pts += SizeToCopy/(dimAudioQ/CurrentAudioSamples)/(CurrentAudioFreq/1000);
|
661 |
} |
662 |
|
663 |
#ifdef DEBUG_AUDIO_BUFFER
|
664 |
printf("2: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
665 |
#endif
|
666 |
|
667 |
//update index of last frame extracted
|
668 |
//ChunkerPlayerStats_UpdateAudioLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted);
|
669 |
q->last_frame_extracted = pkt->stream_index; |
670 |
|
671 |
pkt1 = next; |
672 |
} |
673 |
ret = 1; //TODO: check some conditions |
674 |
} else { //somebody requested a video packet, q is the video queue |
675 |
pkt1 = q->first_pkt; |
676 |
if(pkt1) {
|
677 |
#ifdef DEBUG_QUEUE_DEEP
|
678 |
printf(" AV not 1\n");
|
679 |
#endif
|
680 |
pkt->size = pkt1->pkt.size; |
681 |
pkt->dts = pkt1->pkt.dts; |
682 |
pkt->pts = pkt1->pkt.pts; |
683 |
pkt->stream_index = pkt1->pkt.stream_index; |
684 |
pkt->flags = pkt1->pkt.flags; |
685 |
pkt->pos = pkt1->pkt.pos; |
686 |
pkt->convergence_duration = pkt1->pkt.convergence_duration; |
687 |
//*pkt = pkt1->pkt;
|
688 |
|
689 |
if((pkt->data != NULL) && (pkt1->pkt.data != NULL)) |
690 |
memcpy(pkt->data, pkt1->pkt.data, pkt1->pkt.size); |
691 |
|
692 |
//HINT SEE BEFORE q->size -= pkt1->pkt.size;
|
693 |
RemoveFromQueue(q, pkt1); |
694 |
|
695 |
ret = 1;
|
696 |
|
697 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted); |
698 |
|
699 |
//update index of last frame extracted
|
700 |
q->last_frame_extracted = pkt->stream_index; |
701 |
last_video_frame_extracted = q->last_frame_extracted; |
702 |
} |
703 |
#ifdef DEBUG_QUEUE
|
704 |
else {
|
705 |
printf(" VIDEO pk1 NULL!!!!\n");
|
706 |
} |
707 |
#endif
|
708 |
} |
709 |
|
710 |
if(q->nb_packets==0 && q->queueType==AUDIO) { |
711 |
QueueFillingMode=1;
|
712 |
#ifdef DEBUG_QUEUE
|
713 |
printf("QUEUE: Get FillingMode ON\n");
|
714 |
#endif
|
715 |
} |
716 |
#ifdef DEBUG_QUEUE
|
717 |
printf("QUEUE: Get Last %s Frame Extracted = %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
718 |
#endif
|
719 |
|
720 |
SDL_UnlockMutex(q->mutex); |
721 |
return ret;
|
722 |
} |
723 |
|
724 |
int AudioDecodeFrame(uint8_t *audio_buf, int buf_size) { |
725 |
//struct timeval now;
|
726 |
int audio_pkt_size = 0; |
727 |
int compressed_size = 0; |
728 |
long long Now; |
729 |
short int DecodeAudio=0, SkipAudio=0; |
730 |
//int len1, data_size;
|
731 |
|
732 |
//gettimeofday(&now,NULL);
|
733 |
//Now = (now.tv_sec)*1000+now.tv_usec/1000;
|
734 |
Now=(long long)SDL_GetTicks(); |
735 |
|
736 |
if(QueueFillingMode || QueueStopped)
|
737 |
{ |
738 |
//SDL_LockMutex(timing_mutex);
|
739 |
FirstTimeAudio=1;
|
740 |
FirstTime = 1;
|
741 |
//SDL_UnlockMutex(timing_mutex);
|
742 |
return -1; |
743 |
} |
744 |
|
745 |
if((FirstTime==1 || FirstTimeAudio==1) && audioq.size>0) { |
746 |
if(audioq.first_pkt->pkt.pts>0) |
747 |
{ |
748 |
//SDL_LockMutex(timing_mutex);
|
749 |
DeltaTime=Now-(long long)(audioq.first_pkt->pkt.pts); |
750 |
FirstTimeAudio = 0;
|
751 |
FirstTime = 0;
|
752 |
//SDL_UnlockMutex(timing_mutex);
|
753 |
#ifdef DEBUG_AUDIO
|
754 |
printf("AUDIO: audio_decode_frame - DeltaTimeAudio=%lld\n",DeltaTime);
|
755 |
#endif
|
756 |
} |
757 |
} |
758 |
|
759 |
#ifdef DEBUG_AUDIO
|
760 |
if(audioq.first_pkt)
|
761 |
{ |
762 |
printf("AUDIO: audio_decode_frame - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)audioq.first_pkt->pkt.pts,(long long)audioq.first_pkt->pkt.pts+DeltaTime); |
763 |
printf("AUDIO: QueueLen=%d ",(int)audioq.nb_packets); |
764 |
printf("AUDIO: QueueSize=%d\n",(int)audioq.size); |
765 |
} |
766 |
else
|
767 |
printf("AUDIO: audio_decode_frame - Empty queue\n");
|
768 |
#endif
|
769 |
|
770 |
if(audioq.nb_packets>0) |
771 |
{ |
772 |
if((double)audioq.first_pkt->pkt.pts+DeltaTime<Now+deltaAudioQ) //too late ... TODO: figure out the right number |
773 |
{ |
774 |
SkipAudio = 1;
|
775 |
DecodeAudio = 0;
|
776 |
} |
777 |
else if((double)audioq.first_pkt->pkt.pts+DeltaTime>=Now+deltaAudioQ && //TODO: figure out the right number |
778 |
(double)audioq.first_pkt->pkt.pts+DeltaTime<=Now+deltaAudioQ+3*deltaAudioQ) { //TODO: how much in future? On some systems, SDL asks for more buffers in a raw |
779 |
SkipAudio = 0;
|
780 |
DecodeAudio = 1;
|
781 |
} |
782 |
} |
783 |
|
784 |
while(SkipAudio==1 && audioq.size>0) |
785 |
{ |
786 |
SkipAudio = 0;
|
787 |
#ifdef DEBUG_AUDIO
|
788 |
printf("AUDIO: skipaudio: queue size=%d\n",audioq.size);
|
789 |
#endif
|
790 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
791 |
return -1; |
792 |
} |
793 |
if(audioq.first_pkt)
|
794 |
{ |
795 |
ChunkerPlayerStats_UpdateAudioSkipHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
796 |
|
797 |
if((double)audioq.first_pkt->pkt.pts+DeltaTime<Now+deltaAudioQ) //TODO: figure out the right number |
798 |
{ |
799 |
SkipAudio = 1;
|
800 |
DecodeAudio = 0;
|
801 |
} |
802 |
else if((double)audioq.first_pkt->pkt.pts+DeltaTime>=Now+deltaAudioQ && //TODO: figure out the right number |
803 |
(double)audioq.first_pkt->pkt.pts+DeltaTime<=Now+deltaAudioQ+3*deltaAudioQ) { //TODO: how much in future? |
804 |
SkipAudio = 0;
|
805 |
DecodeAudio = 1;
|
806 |
} |
807 |
} |
808 |
} |
809 |
if(DecodeAudio==1) { |
810 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
811 |
return -1; |
812 |
} |
813 |
#ifdef DEBUG_SYNC
|
814 |
fprintf(stderr, "AUDIO delay =%lld ms\n",(long long)AudioPkt.pts+DeltaTime-Now); |
815 |
#endif
|
816 |
memcpy(audio_buf,AudioPkt.data,AudioPkt.size); |
817 |
audio_pkt_size = AudioPkt.size; |
818 |
#ifdef DEBUG_AUDIO
|
819 |
printf("AUDIO: Decode audio\n");
|
820 |
#endif
|
821 |
|
822 |
ChunkerPlayerStats_UpdateAudioPlayedHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
823 |
} |
824 |
|
825 |
return audio_pkt_size;
|
826 |
} |
827 |
|
828 |
// Render a Frame to a YUV Overlay. Note that the Overlay is already bound to an SDL Surface
|
829 |
// Note that width, height would not be needed in new ffmpeg versions where this info is contained in AVFrame
|
830 |
// see: [FFmpeg-devel] [PATCH] lavc: add width and height fields to AVFrame
|
831 |
int RenderFrame2Overlay(AVFrame *pFrame, int frame_width, int frame_height, SDL_Overlay *YUVOverlay) |
832 |
{ |
833 |
AVPicture pict; |
834 |
static struct SwsContext *img_convert_ctx = NULL; //if the function is used for more streams, this could be made part of some context passed as a parameter (to optimize performance) |
835 |
|
836 |
if(SDL_LockYUVOverlay(YUVOverlay) < 0) { |
837 |
return -1; |
838 |
} |
839 |
|
840 |
pict.data[0] = YUVOverlay->pixels[0]; |
841 |
pict.data[1] = YUVOverlay->pixels[2]; |
842 |
pict.data[2] = YUVOverlay->pixels[1]; |
843 |
|
844 |
pict.linesize[0] = YUVOverlay->pitches[0]; |
845 |
pict.linesize[1] = YUVOverlay->pitches[2]; |
846 |
pict.linesize[2] = YUVOverlay->pitches[1]; |
847 |
|
848 |
img_convert_ctx = sws_getCachedContext(img_convert_ctx, frame_width, frame_height, PIX_FMT_YUV420P, YUVOverlay->w, YUVOverlay->h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); |
849 |
if(img_convert_ctx == NULL) { |
850 |
fprintf(stderr, "Cannot initialize the conversion context!\n");
|
851 |
exit(1);
|
852 |
} |
853 |
|
854 |
// let's draw the data (*yuv[3]) on a SDL screen (*screen)
|
855 |
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, frame_height, pict.data, pict.linesize);
|
856 |
SDL_UnlockYUVOverlay(YUVOverlay); |
857 |
|
858 |
return 0; |
859 |
} |
860 |
|
861 |
// Render a YUV Overlay to the specified Rect of the Surface. Note that the Overlay is already bound to an SDL Surface.
|
862 |
int RenderOverlay2Rect(SDL_Overlay *YUVOverlay, SDL_Rect *Rect)
|
863 |
{ |
864 |
|
865 |
// Lock SDL_yuv_overlay
|
866 |
if(SDL_MUSTLOCK(MainScreen)) {
|
867 |
if(SDL_LockSurface(MainScreen) < 0) { |
868 |
return -1; |
869 |
} |
870 |
} |
871 |
|
872 |
// Show, baby, show!
|
873 |
SDL_LockMutex(OverlayMutex); |
874 |
SDL_DisplayYUVOverlay(YUVOverlay, Rect); |
875 |
SDL_UnlockMutex(OverlayMutex); |
876 |
|
877 |
if(SDL_MUSTLOCK(MainScreen)) {
|
878 |
SDL_UnlockSurface(MainScreen); |
879 |
} |
880 |
|
881 |
return 0; |
882 |
|
883 |
} |
884 |
|
885 |
|
886 |
int VideoCallback(void *valthread) |
887 |
{ |
888 |
//AVPacket pktvideo;
|
889 |
AVCodecContext *pCodecCtx; |
890 |
AVCodec *pCodec; |
891 |
AVFrame *pFrame; |
892 |
int frameFinished;
|
893 |
long long Now; |
894 |
long long Last = 0; |
895 |
short int SkipVideo, DecodeVideo; |
896 |
uint64_t last_pts = 0;
|
897 |
long long decode_delay = 0; |
898 |
int queue_size_checked = 0; |
899 |
|
900 |
#ifdef SAVE_YUV
|
901 |
static AVFrame* lastSavedFrameBuffer = NULL; |
902 |
|
903 |
if(!lastSavedFrameBuffer)
|
904 |
lastSavedFrameBuffer = (AVFrame*) malloc(sizeof(AVFrame));
|
905 |
#endif
|
906 |
|
907 |
//double frame_rate = 0.0,time_between_frames=0.0;
|
908 |
//struct timeval now;
|
909 |
|
910 |
//int wait_for_sync = 1;
|
911 |
ThreadVal *tval; |
912 |
tval = (ThreadVal *)valthread; |
913 |
|
914 |
//frame_rate = tval->framerate;
|
915 |
//time_between_frames = 1.e6 / frame_rate;
|
916 |
//gettimeofday(&time_now,0);
|
917 |
|
918 |
//frecon = fopen("recondechunk.mpg","wb");
|
919 |
|
920 |
//setup video decoder
|
921 |
pCodec = avcodec_find_decoder_by_name(tval->video_codec); |
922 |
if (pCodec) {
|
923 |
fprintf(stderr, "INIT: Setting VIDEO codecID to: %d\n",pCodec->id);
|
924 |
} else {
|
925 |
fprintf(stderr, "INIT: Unknown VIDEO codec: %s!\n", tval->video_codec);
|
926 |
return -1; // Codec not found |
927 |
} |
928 |
|
929 |
pCodecCtx=avcodec_alloc_context(); |
930 |
pCodecCtx->codec_type = CODEC_TYPE_VIDEO; |
931 |
//pCodecCtx->debug = FF_DEBUG_DCT_COEFF;
|
932 |
pCodecCtx->codec_id = pCodec->id; |
933 |
|
934 |
//pCodecCtx->bit_rate = 400000;
|
935 |
// resolution must be a multiple of two
|
936 |
pCodecCtx->width = tval->width;//176;//352;
|
937 |
pCodecCtx->height = tval->height;//144;//288;
|
938 |
|
939 |
// frames per second
|
940 |
//pCodecCtx->time_base = (AVRational){1,25};
|
941 |
//pCodecCtx->gop_size = 10; // emit one intra frame every ten frames
|
942 |
//pCodecCtx->max_b_frames=1;
|
943 |
pCodecCtx->pix_fmt = PIX_FMT_YUV420P; |
944 |
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); |
945 |
|
946 |
if(pCodec==NULL) { |
947 |
fprintf(stderr, "Unsupported codec!\n");
|
948 |
return -1; // Codec not found |
949 |
} |
950 |
if(avcodec_open(pCodecCtx, pCodec) < 0) { |
951 |
fprintf(stderr, "could not open codec\n");
|
952 |
return -1; // Could not open codec |
953 |
} |
954 |
pFrame=avcodec_alloc_frame(); |
955 |
if(pFrame==NULL) { |
956 |
printf("Memory error!!!\n");
|
957 |
return -1; |
958 |
} |
959 |
|
960 |
#ifdef DEBUG_VIDEO
|
961 |
printf("VIDEO: video_callback entering main cycle\n");
|
962 |
#endif
|
963 |
|
964 |
while(AVPlaying && !quit) {
|
965 |
|
966 |
if(QueueFillingMode || QueueStopped)
|
967 |
{ |
968 |
//SDL_LockMutex(timing_mutex);
|
969 |
FirstTime = 1;
|
970 |
//SDL_UnlockMutex(timing_mutex);
|
971 |
usleep(5000);
|
972 |
continue;
|
973 |
} |
974 |
|
975 |
DecodeVideo = 0;
|
976 |
SkipVideo = 0;
|
977 |
Now=(long long)SDL_GetTicks(); |
978 |
if(FirstTime==1 && videoq.size>0) { |
979 |
if(videoq.first_pkt->pkt.pts>0) |
980 |
{ |
981 |
//SDL_LockMutex(timing_mutex);
|
982 |
DeltaTime=Now-(long long)videoq.first_pkt->pkt.pts; |
983 |
FirstTime = 0;
|
984 |
FirstTimeAudio = 0;
|
985 |
//SDL_UnlockMutex(timing_mutex);
|
986 |
} |
987 |
#ifdef DEBUG_VIDEO
|
988 |
printf("VIDEO: VideoCallback - DeltaTimeAudio=%lld\n",DeltaTime);
|
989 |
#endif
|
990 |
} |
991 |
|
992 |
#ifdef DEBUG_VIDEO
|
993 |
if(videoq.first_pkt)
|
994 |
{ |
995 |
printf("VIDEO: VideoCallback - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)videoq.first_pkt->pkt.pts,(long long)videoq.first_pkt->pkt.pts+DeltaTime); |
996 |
printf("VIDEO: Index=%d ", (int)videoq.first_pkt->pkt.stream_index); |
997 |
printf("VIDEO: QueueLen=%d ", (int)videoq.nb_packets); |
998 |
printf("VIDEO: QueueSize=%d\n", (int)videoq.size); |
999 |
} |
1000 |
else
|
1001 |
printf("VIDEO: VideoCallback - Empty queue\n");
|
1002 |
#endif
|
1003 |
|
1004 |
#ifdef DEBUG_VIDEO
|
1005 |
printf("VIDEO: skipvideo:%d decodevideo:%d\n",SkipVideo,DecodeVideo);
|
1006 |
#endif
|
1007 |
// ChunkerPlayerStats_UpdateVideoSkipHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame);
|
1008 |
|
1009 |
if(videoq.nb_packets>0) { |
1010 |
if (!queue_size_checked && videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts < decode_delay) { //queue too short |
1011 |
#ifdef DEBUG_SYNC
|
1012 |
fprintf(stderr, "VIDEO queue too short,diff(%lld) < decode_delay(%lld), increasing delta from \n",videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts, decode_delay, DeltaTime);
|
1013 |
#endif
|
1014 |
DeltaTime += decode_delay - (videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts); |
1015 |
queue_size_checked = 1; //make sure we do not increase the delay several times bacause of the same frame |
1016 |
} |
1017 |
if (videoq.first_pkt->pkt.pts + DeltaTime - Now < decode_delay) { //time to decode, should be based on DTS |
1018 |
if (PacketQueueGet(&videoq,&VideoPkt,0, NULL) > 0) { |
1019 |
queue_size_checked = 0;
|
1020 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
1021 |
#ifdef DEBUG_SYNC
|
1022 |
fprintf(stderr, "VIDEO delta =%lld ms; dt=%lld \n",(long long) pFrame->pkt_pts - last_pts, Now - Last); |
1023 |
#endif
|
1024 |
last_pts = pFrame->pkt_pts; |
1025 |
if (pFrame->pkt_pts) decode_delay = MAX(decode_delay, VideoPkt.pts - pFrame->pkt_pts); //TODO: base this on dts |
1026 |
decode_delay = MIN(decode_delay, 40 * 5); //TODO, this workaround would not be needed if decode_delay would be based on DTS |
1027 |
#ifdef DEBUG_SYNC
|
1028 |
fprintf(stderr, "VIDEO t=%lld ms ptsin=%lld ptsout=%lld \n",Now, (long long)VideoPkt.pts+DeltaTime, pFrame->pkt_pts+DeltaTime); |
1029 |
fprintf(stderr, "VIDEO delay =%lld ms ; %lld ms \n",(long long)VideoPkt.pts+DeltaTime-Now, pFrame->pkt_pts+DeltaTime-Now); |
1030 |
#endif
|
1031 |
|
1032 |
if(frameFinished)
|
1033 |
{ // it must be true all the time else error
|
1034 |
|
1035 |
long long target_pts = pFrame->pkt_pts + DeltaTime; |
1036 |
long long earlier = target_pts - Now; |
1037 |
|
1038 |
#ifdef DEBUG_VIDEO
|
1039 |
printf("VIDEO: FrameFinished\n");
|
1040 |
#endif
|
1041 |
decoded_vframes++; |
1042 |
|
1043 |
|
1044 |
#ifdef VIDEO_DEINTERLACE
|
1045 |
avpicture_deinterlace( |
1046 |
(AVPicture*) pFrame, |
1047 |
(const AVPicture*) pFrame,
|
1048 |
pCodecCtx->pix_fmt, |
1049 |
tval->width, tval->height); |
1050 |
#endif
|
1051 |
|
1052 |
#ifdef SAVE_YUV
|
1053 |
if(LastSavedVFrame == -1) |
1054 |
{ |
1055 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1056 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1057 |
LastSavedVFrame = VideoPkt.stream_index; |
1058 |
} |
1059 |
else if(LastSavedVFrame == (VideoPkt.stream_index-1)) |
1060 |
{ |
1061 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1062 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1063 |
LastSavedVFrame = VideoPkt.stream_index; |
1064 |
} |
1065 |
else if(LastSavedVFrame >= 0) |
1066 |
{ |
1067 |
while(LastSavedVFrame < (VideoPkt.stream_index-1)) |
1068 |
{ |
1069 |
SaveFrame(lastSavedFrameBuffer, pCodecCtx->width, pCodecCtx->height); |
1070 |
} |
1071 |
|
1072 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1073 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1074 |
LastSavedVFrame = VideoPkt.stream_index; |
1075 |
} |
1076 |
#endif
|
1077 |
ChunkerPlayerStats_UpdateVideoPlayedHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame); |
1078 |
|
1079 |
if(SilentMode)
|
1080 |
continue;
|
1081 |
|
1082 |
SDL_LockMutex(OverlayMutex); |
1083 |
if (RenderFrame2Overlay(pFrame, pCodecCtx->width, pCodecCtx->height, YUVOverlay) < 0){ |
1084 |
SDL_UnlockMutex(OverlayMutex); |
1085 |
continue;
|
1086 |
} |
1087 |
|
1088 |
//wait for the playback time
|
1089 |
#ifdef DEBUG_SYNC
|
1090 |
fprintf(stderr, "VIDEO earlier =%lld ms\n",earlier);
|
1091 |
#endif
|
1092 |
if (earlier > 0) { |
1093 |
usleep(MIN(earlier,1000) * 1000); |
1094 |
// } else if (earlier < 0) {
|
1095 |
// fprintf(stderr, "should increase delay2 : pFrame->pkt_pts=%lld, DeltaTime=%lld, Now=%lld, earlier=%lld\n", pFrame->pkt_pts, DeltaTime, Now, earlier);
|
1096 |
// DeltaTime -= earlier;
|
1097 |
} |
1098 |
|
1099 |
Last = Now; |
1100 |
|
1101 |
if (RenderOverlay2Rect(YUVOverlay, ChunkerPlayerGUI_GetMainOverlayRect()) < 0) { |
1102 |
SDL_UnlockMutex(OverlayMutex); |
1103 |
continue;
|
1104 |
} |
1105 |
SDL_UnlockMutex(OverlayMutex); |
1106 |
|
1107 |
//redisplay logo
|
1108 |
/**SDL_BlitSurface(image, NULL, MainScreen, &dest);*/
|
1109 |
/* Update the screen area just changed */
|
1110 |
/**SDL_UpdateRects(MainScreen, 1, &dest);*/
|
1111 |
} //if FrameFinished
|
1112 |
else
|
1113 |
{ |
1114 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(videoq.PacketHistory), VideoPkt.stream_index+1, videoq.last_frame_extracted-1); |
1115 |
} |
1116 |
} |
1117 |
} |
1118 |
usleep(5000);
|
1119 |
} |
1120 |
usleep(5000);
|
1121 |
} |
1122 |
avcodec_close(pCodecCtx); |
1123 |
av_free(pCodecCtx); |
1124 |
av_free(pFrame); |
1125 |
//fclose(frecon);
|
1126 |
#ifdef DEBUG_VIDEO
|
1127 |
printf("VIDEO: video callback end\n");
|
1128 |
#endif
|
1129 |
|
1130 |
#ifdef SAVE_YUV
|
1131 |
if(!lastSavedFrameBuffer)
|
1132 |
free(lastSavedFrameBuffer); |
1133 |
|
1134 |
lastSavedFrameBuffer = NULL;
|
1135 |
#endif
|
1136 |
|
1137 |
return 0; |
1138 |
} |
1139 |
|
1140 |
void AudioCallback(void *userdata, Uint8 *stream, int len) |
1141 |
{ |
1142 |
//AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
|
1143 |
int audio_size;
|
1144 |
|
1145 |
static uint8_t audio_buf[AVCODEC_MAX_AUDIO_FRAME_SIZE];
|
1146 |
|
1147 |
memset(audio_buf, CurrentAudioSilence, sizeof(audio_buf));
|
1148 |
audio_size = AudioDecodeFrame(audio_buf, sizeof(audio_buf));
|
1149 |
|
1150 |
if(SilentMode < 2) { |
1151 |
if(audio_size != len) {
|
1152 |
memset(stream, CurrentAudioSilence, len); |
1153 |
} else {
|
1154 |
memcpy(stream, (uint8_t *)audio_buf, len); |
1155 |
} |
1156 |
} |
1157 |
} |
1158 |
|
1159 |
void SaveFrame(AVFrame *pFrame, int width, int height) |
1160 |
{ |
1161 |
FILE *pFile; |
1162 |
int y;
|
1163 |
|
1164 |
// Open file
|
1165 |
pFile=fopen(YUVFileName, "ab");
|
1166 |
if(pFile==NULL) |
1167 |
return;
|
1168 |
|
1169 |
// Write header
|
1170 |
//fprintf(pFile, "P5\n%d %d\n255\n", width, height);
|
1171 |
|
1172 |
// Write Y data
|
1173 |
for(y=0; y<height; y++) |
1174 |
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile); |
1175 |
// Write U data
|
1176 |
for(y=0; y<height/2; y++) |
1177 |
fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile); |
1178 |
// Write V data
|
1179 |
for(y=0; y<height/2; y++) |
1180 |
fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile); |
1181 |
|
1182 |
// Close file
|
1183 |
fclose(pFile); |
1184 |
} |
1185 |
|
1186 |
int ChunkerPlayerCore_IsRunning()
|
1187 |
{ |
1188 |
return AVPlaying;
|
1189 |
} |
1190 |
|
1191 |
void ChunkerPlayerCore_Play()
|
1192 |
{ |
1193 |
if(AVPlaying) return; |
1194 |
AVPlaying = 1;
|
1195 |
|
1196 |
SDL_PauseAudio(0);
|
1197 |
video_thread = SDL_CreateThread(VideoCallback, &VideoCallbackThreadParams); |
1198 |
ChunkerPlayerStats_Init(&VideoCallbackThreadParams); |
1199 |
stats_thread = SDL_CreateThread(CollectStatisticsThread, NULL);
|
1200 |
|
1201 |
decoded_vframes = 0;
|
1202 |
LastSavedVFrame = -1;
|
1203 |
} |
1204 |
|
1205 |
void ChunkerPlayerCore_Stop()
|
1206 |
{ |
1207 |
if(!AVPlaying) return; |
1208 |
|
1209 |
AVPlaying = 0;
|
1210 |
|
1211 |
// Stop audio&video playback
|
1212 |
SDL_WaitThread(video_thread, NULL);
|
1213 |
SDL_WaitThread(stats_thread, NULL);
|
1214 |
SDL_PauseAudio(1);
|
1215 |
|
1216 |
if(YUVOverlay != NULL) |
1217 |
{ |
1218 |
SDL_FreeYUVOverlay(YUVOverlay); |
1219 |
YUVOverlay = NULL;
|
1220 |
} |
1221 |
|
1222 |
PacketQueueReset(&audioq); |
1223 |
PacketQueueReset(&videoq); |
1224 |
|
1225 |
avcodec_close(aCodecCtx); |
1226 |
av_free(aCodecCtx); |
1227 |
free(AudioPkt.data); |
1228 |
free(VideoPkt.data); |
1229 |
free(outbuf_audio); |
1230 |
|
1231 |
/*
|
1232 |
* Sleep two buffers' worth of audio before closing, in order
|
1233 |
* to allow the playback to finish. This isn't always enough;
|
1234 |
* perhaps SDL needs a way to explicitly wait for device drain?
|
1235 |
* Doesn't seem to be necessary -> disabled
|
1236 |
*/
|
1237 |
//int delay = 2 * 1000 * CurrentAudioSamples / CurrentAudioFreq;
|
1238 |
// printf("SDL_Delay(%d)\n", delay*10);
|
1239 |
//SDL_Delay(delay*10);
|
1240 |
} |
1241 |
|
1242 |
void ChunkerPlayerCore_Finalize()
|
1243 |
{ |
1244 |
if(YUVOverlay != NULL) |
1245 |
{ |
1246 |
SDL_FreeYUVOverlay(YUVOverlay); |
1247 |
YUVOverlay = NULL;
|
1248 |
} |
1249 |
|
1250 |
SDL_CloseAudio(); |
1251 |
} |
1252 |
|
1253 |
void ChunkerPlayerCore_Pause()
|
1254 |
{ |
1255 |
if(!AVPlaying) return; |
1256 |
|
1257 |
AVPlaying = 0;
|
1258 |
|
1259 |
// Stop audio&video playback
|
1260 |
SDL_WaitThread(video_thread, NULL);
|
1261 |
SDL_PauseAudio(1);
|
1262 |
|
1263 |
PacketQueueReset(&audioq); |
1264 |
PacketQueueReset(&videoq); |
1265 |
} |
1266 |
|
1267 |
int ChunkerPlayerCore_AudioEnded()
|
1268 |
{ |
1269 |
return (audioq.nb_packets==0 && audioq.last_frame_extracted>0); |
1270 |
} |
1271 |
|
1272 |
void ChunkerPlayerCore_ResetAVQueues()
|
1273 |
{ |
1274 |
#ifdef DEBUG_QUEUE
|
1275 |
printf("QUEUE: MAIN SHOULD RESET\n");
|
1276 |
#endif
|
1277 |
PacketQueueReset(&audioq); |
1278 |
PacketQueueReset(&videoq); |
1279 |
} |
1280 |
|
1281 |
int ChunkerPlayerCore_EnqueueBlocks(const uint8_t *block, const int block_size) |
1282 |
{ |
1283 |
#ifdef EMULATE_CHUNK_LOSS
|
1284 |
static time_t loss_cycle_start_time = 0, now = 0; |
1285 |
static int early_losses = 0; |
1286 |
static int clp_frames = 0; |
1287 |
|
1288 |
if(ScheduledChunkLosses)
|
1289 |
{ |
1290 |
static unsigned int random_threshold; |
1291 |
now=time(NULL);
|
1292 |
if(!loss_cycle_start_time)
|
1293 |
loss_cycle_start_time = now; |
1294 |
|
1295 |
if(((now-loss_cycle_start_time) >= ScheduledChunkLosses[((CurrChunkLossIndex+1)%NScheduledChunkLosses)].Time) && (NScheduledChunkLosses>1 || CurrChunkLossIndex==-1)) |
1296 |
{ |
1297 |
CurrChunkLossIndex = ((CurrChunkLossIndex+1)%NScheduledChunkLosses);
|
1298 |
if(CurrChunkLossIndex == (NScheduledChunkLosses-1)) |
1299 |
loss_cycle_start_time = now; |
1300 |
|
1301 |
if(ScheduledChunkLosses[CurrChunkLossIndex].Value == -1) |
1302 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].MinValue + (rand() % (ScheduledChunkLosses[CurrChunkLossIndex].MaxValue-ScheduledChunkLosses[CurrChunkLossIndex].MinValue)); |
1303 |
else
|
1304 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].Value; |
1305 |
|
1306 |
printf("new ScheduledChunkLoss, time: %d, value: %d\n", (int)ScheduledChunkLosses[CurrChunkLossIndex].Time, random_threshold); |
1307 |
} |
1308 |
|
1309 |
if(clp_frames > 0) |
1310 |
{ |
1311 |
clp_frames--; |
1312 |
return PLAYER_FAIL_RETURN;
|
1313 |
} |
1314 |
if((rand() % 100) < random_threshold) |
1315 |
{ |
1316 |
if(early_losses > 0) |
1317 |
early_losses--; |
1318 |
else
|
1319 |
{ |
1320 |
clp_frames=early_losses=(ScheduledChunkLosses[CurrChunkLossIndex].Burstiness-1);
|
1321 |
return PLAYER_FAIL_RETURN;
|
1322 |
} |
1323 |
} |
1324 |
} |
1325 |
#endif
|
1326 |
|
1327 |
Chunk *gchunk = NULL;
|
1328 |
int decoded_size = -1; |
1329 |
uint8_t *tempdata, *buffer; |
1330 |
int j;
|
1331 |
Frame *frame = NULL;
|
1332 |
AVPacket packet, packetaudio; |
1333 |
|
1334 |
uint16_t *audio_bufQ = NULL;
|
1335 |
|
1336 |
//the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
|
1337 |
static int sizeFrameHeader = 5*sizeof(int32_t); |
1338 |
//the following we dont need anymore
|
1339 |
//static int ExternalChunk_header_size = 5*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 1*CHUNK_TRANSCODING_INT_SIZE*2;
|
1340 |
|
1341 |
static int chunks_out_of_order = 0; |
1342 |
static int last_chunk_id = -1; |
1343 |
|
1344 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
1345 |
if(!audio_bufQ) {
|
1346 |
printf("Memory error in audio_bufQ!\n");
|
1347 |
return PLAYER_FAIL_RETURN;
|
1348 |
} |
1349 |
|
1350 |
gchunk = (Chunk *)malloc(sizeof(Chunk));
|
1351 |
if(!gchunk) {
|
1352 |
printf("Memory error in gchunk!\n");
|
1353 |
av_free(audio_bufQ); |
1354 |
return PLAYER_FAIL_RETURN;
|
1355 |
} |
1356 |
|
1357 |
decoded_size = decodeChunk(gchunk, block, block_size); |
1358 |
|
1359 |
if(last_chunk_id == -1) |
1360 |
last_chunk_id = gchunk->id; |
1361 |
|
1362 |
if(gchunk->id > (last_chunk_id+1)) { |
1363 |
chunks_out_of_order += gchunk->id - last_chunk_id - 1;
|
1364 |
} |
1365 |
last_chunk_id = gchunk->id; |
1366 |
|
1367 |
#ifdef DEBUG_CHUNKER
|
1368 |
printf("CHUNKER: enqueueBlock: id %d decoded_size %d target size %d - out_of_order %d\n", gchunk->id, decoded_size, GRAPES_ENCODED_CHUNK_HEADER_SIZE + ExternalChunk_header_size + gchunk->size, chunks_out_of_order);
|
1369 |
#endif
|
1370 |
if(decoded_size < 0) { |
1371 |
//HINT here i should differentiate between various return values of the decode
|
1372 |
//in order to free what has been allocated there
|
1373 |
printf("chunk probably corrupted!\n");
|
1374 |
av_free(audio_bufQ); |
1375 |
free(gchunk); |
1376 |
return PLAYER_FAIL_RETURN;
|
1377 |
} |
1378 |
|
1379 |
frame = (Frame *)malloc(sizeof(Frame));
|
1380 |
if(!frame) {
|
1381 |
printf("Memory error in Frame!\n");
|
1382 |
if(gchunk) {
|
1383 |
if(gchunk->attributes) {
|
1384 |
free(gchunk->attributes); |
1385 |
} |
1386 |
free(gchunk); |
1387 |
} |
1388 |
av_free(audio_bufQ); |
1389 |
return PLAYER_FAIL_RETURN;
|
1390 |
} |
1391 |
|
1392 |
tempdata = gchunk->data; //let it point to first frame of payload
|
1393 |
j=gchunk->size; |
1394 |
while(j>0 && !quit) { |
1395 |
frame->number = bit32_encoded_pull(tempdata); |
1396 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1397 |
frame->timestamp.tv_sec = bit32_encoded_pull(tempdata); |
1398 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1399 |
frame->timestamp.tv_usec = bit32_encoded_pull(tempdata); |
1400 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1401 |
frame->size = bit32_encoded_pull(tempdata); |
1402 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1403 |
frame->type = bit32_encoded_pull(tempdata); |
1404 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1405 |
|
1406 |
buffer = tempdata; // here coded frame information
|
1407 |
tempdata += frame->size; //let it point to the next frame
|
1408 |
|
1409 |
if(frame->type < 5) { // video frame |
1410 |
av_init_packet(&packet); |
1411 |
packet.data = buffer;//video_bufQ;
|
1412 |
packet.size = frame->size; |
1413 |
packet.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1414 |
packet.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1415 |
packet.stream_index = frame->number; // use of stream_index for number frame
|
1416 |
//packet.duration = frame->timestamp.tv_sec;
|
1417 |
if(packet.size > 0) { |
1418 |
int ret = ChunkerPlayerCore_PacketQueuePut(&videoq, &packet); //the _put makes a copy of the packet |
1419 |
if (ret == 1) { //TODO: check and correct return values |
1420 |
fprintf(stderr, "late chunk received, increasing delay to %lld\n", DeltaTime);
|
1421 |
DeltaTime += 5; //TODO: handle audio skip; verify this value |
1422 |
} |
1423 |
} |
1424 |
|
1425 |
#ifdef DEBUG_SOURCE
|
1426 |
printf("SOURCE: Insert video in queue pts=%lld %d %d sindex:%d\n",packet.pts,(int)frame->timestamp.tv_sec,(int)frame->timestamp.tv_usec,packet.stream_index); |
1427 |
#endif
|
1428 |
} |
1429 |
else if(frame->type == 5) { // audio frame |
1430 |
av_init_packet(&packetaudio); |
1431 |
packetaudio.data = buffer; |
1432 |
packetaudio.size = frame->size; |
1433 |
packetaudio.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1434 |
packetaudio.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1435 |
//packetaudio.duration = frame->timestamp.tv_sec;
|
1436 |
packetaudio.stream_index = frame->number; // use of stream_index for number frame
|
1437 |
packetaudio.flags = 1;
|
1438 |
packetaudio.pos = -1;
|
1439 |
|
1440 |
//instead of -1, in order to signal it is not decoded yet
|
1441 |
packetaudio.convergence_duration = 0;
|
1442 |
|
1443 |
// insert the audio frame into the queue
|
1444 |
if(packetaudio.size > 0) { |
1445 |
int ret = ChunkerPlayerCore_PacketQueuePut(&audioq, &packetaudio);//makes a copy of the packet so i can free here |
1446 |
if (ret == 1) { //TODO: check and correct return values |
1447 |
fprintf(stderr, "late chunk received, increasing delay to %lld\n", DeltaTime);
|
1448 |
DeltaTime += 5; //TODO: handle audio skip; verify this value |
1449 |
} |
1450 |
} |
1451 |
|
1452 |
#ifdef DEBUG_SOURCE
|
1453 |
printf("SOURCE: Insert audio in queue pts=%lld sindex:%d\n", packetaudio.pts, packetaudio.stream_index);
|
1454 |
#endif
|
1455 |
} |
1456 |
else {
|
1457 |
printf("SOURCE: Unknown frame type %d. Size %d\n", frame->type, frame->size);
|
1458 |
} |
1459 |
if(frame->size > 0) |
1460 |
j = j - sizeFrameHeader - frame->size; |
1461 |
else {
|
1462 |
printf("SOURCE: Corrupt frames (size %d) in chunk. Skipping it...\n", frame->size);
|
1463 |
j = -1;
|
1464 |
} |
1465 |
} |
1466 |
//chunk ingestion terminated!
|
1467 |
if(gchunk) {
|
1468 |
if(gchunk->attributes) {
|
1469 |
free(gchunk->attributes); |
1470 |
} |
1471 |
if(gchunk->data)
|
1472 |
free(gchunk->data); |
1473 |
free(gchunk); |
1474 |
} |
1475 |
if(frame)
|
1476 |
free(frame); |
1477 |
if(audio_bufQ)
|
1478 |
av_free(audio_bufQ); |
1479 |
|
1480 |
return PLAYER_OK_RETURN;
|
1481 |
} |
1482 |
|
1483 |
void ChunkerPlayerCore_SetupOverlay(int width, int height) |
1484 |
{ |
1485 |
// if(!MainScreen && !SilentMode)
|
1486 |
// {
|
1487 |
// printf("Cannot find main screen, exiting...\n");
|
1488 |
// exit(1);
|
1489 |
// }
|
1490 |
|
1491 |
if(SilentMode)
|
1492 |
return;
|
1493 |
|
1494 |
//TODO: return with error if size is too small
|
1495 |
width = MAX(width, 8);
|
1496 |
height = MAX(height, 8);
|
1497 |
|
1498 |
SDL_LockMutex(OverlayMutex); |
1499 |
if(YUVOverlay != NULL) |
1500 |
{ |
1501 |
SDL_FreeYUVOverlay(YUVOverlay); |
1502 |
YUVOverlay = NULL;
|
1503 |
} |
1504 |
|
1505 |
// create video overlay for display of video frames
|
1506 |
// printf("SDL_CreateYUVOverlay(%d, %d, SDL_YV12_OVERLAY, MainScreen)\n", width, height);
|
1507 |
YUVOverlay = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, MainScreen); |
1508 |
if ( YUVOverlay == NULL ) |
1509 |
{ |
1510 |
fprintf(stderr,"SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError());
|
1511 |
exit(1);
|
1512 |
} |
1513 |
|
1514 |
if ( YUVOverlay->hw_overlay )
|
1515 |
fprintf(stderr,"SDL: Using hardware overlay.\n");
|
1516 |
|
1517 |
SDL_DisplayYUVOverlay(YUVOverlay, ChunkerPlayerGUI_GetMainOverlayRect()); |
1518 |
|
1519 |
SDL_UnlockMutex(OverlayMutex); |
1520 |
} |
1521 |
|
1522 |
int CollectStatisticsThread(void *params) |
1523 |
{ |
1524 |
struct timeval last_stats_evaluation, now, last_trace, last_qoe_evaluation;
|
1525 |
gettimeofday(&last_stats_evaluation, NULL);
|
1526 |
last_trace = last_stats_evaluation; |
1527 |
last_qoe_evaluation = last_stats_evaluation; |
1528 |
|
1529 |
double video_qdensity;
|
1530 |
double audio_qdensity;
|
1531 |
char audio_stats_text[255]; |
1532 |
char video_stats_text[255]; |
1533 |
SStats audio_statistics, video_statistics; |
1534 |
double qoe = 0; |
1535 |
int sleep_time = STATS_THREAD_GRANULARITY*1000; |
1536 |
int audio_avg_bitrate = 0; |
1537 |
int video_avg_bitrate = 0; |
1538 |
|
1539 |
while(AVPlaying && !quit)
|
1540 |
{ |
1541 |
usleep(sleep_time); |
1542 |
|
1543 |
gettimeofday(&now, NULL);
|
1544 |
|
1545 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_stats_evaluation.tv_sec*1000)+(last_stats_evaluation.tv_usec/1000))) > GUI_PRINTSTATS_INTERVAL) |
1546 |
{ |
1547 |
// estimate audio queue stats
|
1548 |
int audio_stats_changed = ChunkerPlayerStats_GetStats(&(audioq.PacketHistory), &audio_statistics);
|
1549 |
|
1550 |
// estimate video queue stats
|
1551 |
int video_stats_changed = ChunkerPlayerStats_GetStats(&(videoq.PacketHistory), &video_statistics);
|
1552 |
|
1553 |
// compute avg bitrate up to now
|
1554 |
audioq.cumulative_bitrate += audio_statistics.Bitrate; |
1555 |
audioq.cumulative_samples++; |
1556 |
audio_avg_bitrate = (int)( ((double)audioq.cumulative_bitrate) / ((double)audioq.cumulative_samples) ); |
1557 |
videoq.cumulative_bitrate += video_statistics.Bitrate; |
1558 |
videoq.cumulative_samples++; |
1559 |
video_avg_bitrate = (int)( ((double)videoq.cumulative_bitrate) / ((double)videoq.cumulative_samples) ); |
1560 |
|
1561 |
#ifdef DEBUG_STATS
|
1562 |
printf("VIDEO: %d Kbit/sec; ", video_statistics.Bitrate);
|
1563 |
printf("AUDIO: %d Kbit/sec\n", audio_statistics.Bitrate);
|
1564 |
#endif
|
1565 |
|
1566 |
// QUEUE DENSITY EVALUATION
|
1567 |
if((audioq.last_pkt != NULL) && (audioq.first_pkt != NULL)) |
1568 |
if(audioq.last_pkt->pkt.stream_index >= audioq.first_pkt->pkt.stream_index)
|
1569 |
{ |
1570 |
//plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1571 |
audio_qdensity = (double)audioq.nb_packets / (double)(audioq.last_pkt->pkt.stream_index - audioq.first_pkt->pkt.stream_index + 1) * 100.0; |
1572 |
} |
1573 |
|
1574 |
if((videoq.last_pkt != NULL) && (videoq.first_pkt != NULL)) |
1575 |
if(videoq.last_pkt->pkt.stream_index >= videoq.first_pkt->pkt.stream_index)
|
1576 |
{ |
1577 |
// plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1578 |
video_qdensity = (double)videoq.nb_packets / (double)(videoq.last_pkt->pkt.stream_index - videoq.first_pkt->pkt.stream_index + 1) * 100.0; |
1579 |
} |
1580 |
|
1581 |
if(LogTraces)
|
1582 |
{ |
1583 |
ChunkerPlayerStats_PrintHistoryTrace(&(audioq.PacketHistory), AudioTraceFilename); |
1584 |
ChunkerPlayerStats_PrintHistoryTrace(&(videoq.PacketHistory), VideoTraceFilename); |
1585 |
|
1586 |
//if(SilentMode != 1 && SilentMode != 2)
|
1587 |
ChunkerPlayerStats_PrintContextFile(); |
1588 |
} |
1589 |
|
1590 |
// PRINT STATISTICS ON GUI
|
1591 |
if(!Audio_ON)
|
1592 |
sprintf(audio_stats_text, "AUDIO MUTED");
|
1593 |
else if(audio_stats_changed) |
1594 |
// sprintf(audio_stats_text, "[AUDIO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)", (int)audioq.nb_packets, (int)audio_qdensity, (int)audio_statistics.Lossrate, audioq.PacketHistory.LostCount, audio_statistics.Skiprate, audioq.PacketHistory.SkipCount);
|
1595 |
sprintf(audio_stats_text, "[AUDIO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - rate: %d kbits/sec (avg: %d)", (int)audioq.nb_packets, (int)audio_qdensity, (int)audio_statistics.Lossrate, audioq.PacketHistory.LostCount, audio_statistics.Bitrate, audio_avg_bitrate); |
1596 |
else
|
1597 |
sprintf(audio_stats_text, "waiting for incoming audio packets...");
|
1598 |
|
1599 |
if(video_stats_changed)
|
1600 |
{ |
1601 |
char est_psnr_string[255]; |
1602 |
sprintf(est_psnr_string, ".");
|
1603 |
if(qoe)
|
1604 |
{ |
1605 |
sprintf(est_psnr_string, " - Est. Mean PSNR: %.1f db", (float)qoe); |
1606 |
#ifdef PSNR_PUBLICATION
|
1607 |
// Publish measure into repository
|
1608 |
if(RepoAddress[0]!='\0') |
1609 |
{ |
1610 |
MeasurementRecord r; |
1611 |
r.originator = NetworkID; |
1612 |
r.targetA = NetworkID; |
1613 |
r.targetB = NULL;
|
1614 |
r.published_name = "PSNR_MEAN";
|
1615 |
r.value = qoe; |
1616 |
r.string_value = NULL;
|
1617 |
r.channel = Channels[SelectedChannel].Title; |
1618 |
gettimeofday(&(r.timestamp), NULL);
|
1619 |
// One update every REPO_UPDATE_INTERVALL seconds
|
1620 |
struct timeval ElapsedTime;
|
1621 |
timeval_subtract(&(r.timestamp),&LastTimeRepoPublish,&ElapsedTime); |
1622 |
if(ElapsedTime.tv_sec>=PSNR_REPO_UPDATE_INTERVALL)
|
1623 |
{ |
1624 |
LastTimeRepoPublish=r.timestamp; |
1625 |
if(repPublish(repoclient,NULL,NULL,&r)!=NULL) { |
1626 |
#ifdef DEBUG_PSNR
|
1627 |
printf("PSNR publish: %s %e %s\n",r.originator,qoe,r.channel);
|
1628 |
#endif
|
1629 |
} |
1630 |
} |
1631 |
} |
1632 |
#endif
|
1633 |
} |
1634 |
|
1635 |
// sprintf(video_stats_text, "[VIDEO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)%s", (int)videoq.nb_packets, (int)video_qdensity, video_statistics.Lossrate, videoq.PacketHistory.LostCount, video_statistics.Skiprate, videoq.PacketHistory.SkipCount, est_psnr_string);
|
1636 |
sprintf(video_stats_text, "[VIDEO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - rate: %d kbits/sec (avg: %d) %s", (int)videoq.nb_packets, (int)video_qdensity, video_statistics.Lossrate, videoq.PacketHistory.LostCount, video_statistics.Bitrate, video_avg_bitrate, est_psnr_string); |
1637 |
} |
1638 |
else
|
1639 |
sprintf(video_stats_text, "waiting for incoming video packets...");
|
1640 |
|
1641 |
if(qoe && qoe_led) {
|
1642 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,(qoe>LED_THRS_YELLOW?LED_GREEN:((qoe<=LED_THRS_YELLOW && qoe>LED_THRS_RED)?LED_YELLOW:LED_RED))); |
1643 |
} else {
|
1644 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,LED_NONE); |
1645 |
} |
1646 |
|
1647 |
last_stats_evaluation = now; |
1648 |
} |
1649 |
|
1650 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_qoe_evaluation.tv_sec*1000)+(last_qoe_evaluation.tv_usec/1000))) > EVAL_QOE_INTERVAL) |
1651 |
{ |
1652 |
// ESTIMATE QoE
|
1653 |
//ChunkerPlayerStats_GetMeanVideoQuality(&(videoq.PacketHistory), &qoe);
|
1654 |
// ESTIMATE QoE using real-time computed cumulative average bitrate
|
1655 |
// (plus a diminshing contribution of the instantaneous bitrate, until the cumulative avg stabilizes)
|
1656 |
int input_bitrate = 0; |
1657 |
// stabilize after circa 30 seconds
|
1658 |
if(videoq.cumulative_samples < 30*(1000/GUI_PRINTSTATS_INTERVAL)) |
1659 |
input_bitrate = video_statistics.Bitrate; |
1660 |
else
|
1661 |
input_bitrate = video_avg_bitrate; |
1662 |
//double a = 1 / ((double)videoq.cumulative_samples);
|
1663 |
//double b = 1-a;
|
1664 |
//double input_bitrate = a*((double)video_statistics.Bitrate) + b*((double)video_avg_bitrate);
|
1665 |
ChunkerPlayerStats_GetMeanVideoQuality(&(videoq.PacketHistory), input_bitrate, &qoe); |
1666 |
#ifdef DEBUG_STATS
|
1667 |
printf("rate %d avg %d wghtd %d cum_samp %d PSNR %f\n", video_statistics.Bitrate, video_avg_bitrate, (int)input_bitrate, videoq.cumulative_samples, (float)qoe); |
1668 |
#endif
|
1669 |
last_qoe_evaluation = now; |
1670 |
} |
1671 |
} |
1672 |
return 0; |
1673 |
} |
1674 |
|
1675 |
void ChunkerPlayerCore_ChangeDelay(int ms) |
1676 |
{ |
1677 |
DeltaTime += ms; |
1678 |
} |