chunker-player / chunker_player / player_core.c @ b07bcb88
History | View | Annotate | Download (49.6 KB)
1 |
/*
|
---|---|
2 |
* Copyright (c) 2009-2011 Carmelo Daniele, Dario Marchese, Diego Reforgiato, Giuseppe Tropea
|
3 |
* Copyright (c) 2010-2011 Csaba Kiraly
|
4 |
* developed for the Napa-Wine EU project. See www.napa-wine.eu
|
5 |
*
|
6 |
* This is free software; see lgpl-2.1.txt
|
7 |
*/
|
8 |
|
9 |
#include <libavcodec/avcodec.h> |
10 |
#include <libavformat/avformat.h> |
11 |
#include <libswscale/swscale.h> |
12 |
#include "libav-compat.h" |
13 |
|
14 |
#include <stdio.h> |
15 |
#include <unistd.h> |
16 |
#include "external_chunk_transcoding.h" |
17 |
#include "frame.h" |
18 |
#include <SDL.h> |
19 |
#include <SDL_thread.h> |
20 |
#include <SDL_mutex.h> |
21 |
// #include <SDL_ttf.h>
|
22 |
// #include <SDL_image.h>
|
23 |
#include <SDL_video.h> |
24 |
#include <assert.h> |
25 |
#include <time.h> |
26 |
|
27 |
#include "player_stats.h" |
28 |
#include "player_defines.h" |
29 |
#include "chunker_player.h" |
30 |
#include "player_gui.h" |
31 |
#include "player_core.h" |
32 |
#include "player_stats.h" |
33 |
|
34 |
#define MAX(A,B) ((A)>(B) ? (A) : (B))
|
35 |
#define MIN(A,B) ((A)<(B) ? (A) : (B))
|
36 |
|
37 |
SDL_Overlay *YUVOverlay; |
38 |
|
39 |
typedef struct PacketQueue { |
40 |
AVPacketList *first_pkt; |
41 |
AVPacket *minpts_pkt; |
42 |
AVPacketList *last_pkt; |
43 |
int nb_packets;
|
44 |
int size;
|
45 |
SDL_mutex *mutex; |
46 |
short int queueType; |
47 |
int last_frame_extracted; //HINT THIS SHOULD BE MORE THAN 4 BYTES |
48 |
//total frames lost, as seen from the queue, since last queue init
|
49 |
int total_lost_frames;
|
50 |
long cumulative_bitrate;
|
51 |
long cumulative_samples;
|
52 |
|
53 |
SHistory PacketHistory; |
54 |
|
55 |
double density;
|
56 |
char stats_message[255]; |
57 |
} PacketQueue; |
58 |
|
59 |
AVCodecContext *aCodecCtx; |
60 |
SDL_Thread *video_thread; |
61 |
SDL_Thread *stats_thread; |
62 |
uint8_t *outbuf_audio; |
63 |
// short int QueueFillingMode=1;
|
64 |
short int QueueStopped; |
65 |
ThreadVal VideoCallbackThreadParams; |
66 |
|
67 |
int AudioQueueOffset;
|
68 |
PacketQueue audioq; |
69 |
PacketQueue videoq; |
70 |
AVPacket AudioPkt, VideoPkt; |
71 |
int AVPlaying;
|
72 |
int CurrentAudioFreq;
|
73 |
int CurrentAudioSamples;
|
74 |
uint8_t CurrentAudioSilence; |
75 |
|
76 |
int GotSigInt;
|
77 |
|
78 |
long long DeltaTime; |
79 |
short int FirstTimeAudio, FirstTime; |
80 |
|
81 |
int dimAudioQ;
|
82 |
float deltaAudioQ;
|
83 |
float deltaAudioQError;
|
84 |
|
85 |
int SaveYUV;
|
86 |
char YUVFileName[256]; |
87 |
int SaveLoss;
|
88 |
|
89 |
char VideoFrameLossRateLogFilename[256]; |
90 |
char VideoFrameSkipRateLogFilename[256]; |
91 |
|
92 |
long int decoded_vframes; |
93 |
long int LastSavedVFrame; |
94 |
|
95 |
void SaveFrame(AVFrame *pFrame, int width, int height); |
96 |
int VideoCallback(void *valthread); |
97 |
int CollectStatisticsThread(void *params); |
98 |
void AudioCallback(void *userdata, Uint8 *stream, int len); |
99 |
void PacketQueueClearStats(PacketQueue *q);
|
100 |
|
101 |
//int lastCheckedVideoFrame = -1;
|
102 |
long int last_video_frame_extracted = -1; |
103 |
|
104 |
int timeval_subtract(struct timeval* x, struct timeval* y, struct timeval* result) |
105 |
{ |
106 |
// Perform the carry for the later subtraction by updating y.
|
107 |
if (x->tv_usec < y->tv_usec)
|
108 |
{ |
109 |
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; |
110 |
y->tv_usec -= 1000000 * nsec;
|
111 |
y->tv_sec += nsec; |
112 |
} |
113 |
if (x->tv_usec - y->tv_usec > 1000000) |
114 |
{ |
115 |
int nsec = (x->tv_usec - y->tv_usec) / 1000000; |
116 |
y->tv_usec += 1000000 * nsec;
|
117 |
y->tv_sec -= nsec; |
118 |
} |
119 |
|
120 |
// Compute the time remaining to wait. tv_usec is certainly positive.
|
121 |
result->tv_sec = x->tv_sec - y->tv_sec; |
122 |
result->tv_usec = x->tv_usec - y->tv_usec; |
123 |
|
124 |
// Return 1 if result is negative.
|
125 |
return x->tv_sec < y->tv_sec;
|
126 |
} |
127 |
|
128 |
|
129 |
void PacketQueueInit(PacketQueue *q, short int Type) |
130 |
{ |
131 |
#ifdef DEBUG_QUEUE
|
132 |
printf("QUEUE: INIT BEGIN: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
133 |
#endif
|
134 |
memset(q,0,sizeof(PacketQueue)); |
135 |
q->mutex = SDL_CreateMutex(); |
136 |
QueueFillingMode=1;
|
137 |
q->queueType=Type; |
138 |
q->last_frame_extracted = -1;
|
139 |
q->first_pkt= NULL;
|
140 |
q->minpts_pkt= NULL;
|
141 |
//q->last_pkt = NULL;
|
142 |
q->nb_packets = 0;
|
143 |
q->size = 0;
|
144 |
q->density= 0.0; |
145 |
FirstTime = 1;
|
146 |
FirstTimeAudio = 1;
|
147 |
//init up statistics
|
148 |
|
149 |
q->PacketHistory.Mutex = SDL_CreateMutex(); |
150 |
PacketQueueClearStats(q); |
151 |
|
152 |
#ifdef DEBUG_QUEUE
|
153 |
printf("QUEUE: INIT END: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
154 |
#endif
|
155 |
} |
156 |
|
157 |
void PacketQueueReset(PacketQueue *q)
|
158 |
{ |
159 |
AVPacketList *tmp,*tmp1; |
160 |
#ifdef DEBUG_QUEUE
|
161 |
printf("QUEUE: RESET BEGIN: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
162 |
#endif
|
163 |
SDL_LockMutex(q->mutex); |
164 |
|
165 |
tmp = q->first_pkt; |
166 |
while(tmp) {
|
167 |
tmp1 = tmp; |
168 |
tmp = tmp->next; |
169 |
av_free_packet(&(tmp1->pkt)); |
170 |
av_free(tmp1); |
171 |
#ifdef DEBUG_QUEUE
|
172 |
printf("F ");
|
173 |
#endif
|
174 |
q->PacketHistory.LostCount++; |
175 |
} |
176 |
#ifdef DEBUG_QUEUE
|
177 |
printf("\n");
|
178 |
#endif
|
179 |
|
180 |
QueueFillingMode=1;
|
181 |
q->last_frame_extracted = -1;
|
182 |
|
183 |
// on queue reset do not reset loss count
|
184 |
// (loss count reset is done on queue init, ie channel switch)
|
185 |
q->density=0.0; |
186 |
q->first_pkt= NULL;
|
187 |
q->minpts_pkt= NULL;
|
188 |
//q->last_pkt = NULL;
|
189 |
q->nb_packets = 0;
|
190 |
q->size = 0;
|
191 |
FirstTime = 1;
|
192 |
FirstTimeAudio = 1;
|
193 |
//clean up statistics
|
194 |
PacketQueueClearStats(q); |
195 |
#ifdef DEBUG_QUEUE
|
196 |
printf("QUEUE: RESET END: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
197 |
#endif
|
198 |
SDL_UnlockMutex(q->mutex); |
199 |
} |
200 |
|
201 |
void PacketQueueClearStats(PacketQueue *q)
|
202 |
{ |
203 |
sprintf(q->stats_message, "%s", "\n"); |
204 |
int i;
|
205 |
memset((void*)q->PacketHistory.History, 0, sizeof(SHistoryElement)*QUEUE_HISTORY_SIZE); |
206 |
for(i=0; i<QUEUE_HISTORY_SIZE; i++) |
207 |
{ |
208 |
q->PacketHistory.History[i].Statistics.LastIFrameDistance = -1;
|
209 |
q->PacketHistory.History[i].Status = -1;
|
210 |
} |
211 |
q->PacketHistory.Index = q->PacketHistory.LogIndex = 0;
|
212 |
q->PacketHistory.Index = q->PacketHistory.QoEIndex = 0;
|
213 |
q->PacketHistory.LostCount = q->PacketHistory.PlayedCount = q->PacketHistory.SkipCount = 0;
|
214 |
} |
215 |
|
216 |
int ChunkerPlayerCore_PacketQueuePut(PacketQueue *q, AVPacket *pkt)
|
217 |
{ |
218 |
//~ printf("\tSTREAM_INDEX=%d\n", pkt->stream_index);
|
219 |
short int skip = 0; |
220 |
AVPacketList *pkt1, *tmp, *prevtmp; |
221 |
int res = 0; |
222 |
|
223 |
if(q->nb_packets > queue_filling_threshold*QUEUE_MAX_GROW_FACTOR) {
|
224 |
#ifdef DEBUG_QUEUE
|
225 |
printf("QUEUE: PUT i have TOO MANY packets %d Type=%s, RESETTING\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
226 |
#endif
|
227 |
PacketQueueReset(q); |
228 |
} |
229 |
|
230 |
//make a copy of the incoming packet
|
231 |
if(av_dup_packet(pkt) < 0) { |
232 |
#ifdef DEBUG_QUEUE
|
233 |
printf("QUEUE: PUT in Queue cannot duplicate in packet : NPackets=%d Type=%s\n",q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
234 |
#endif
|
235 |
return -1; |
236 |
} |
237 |
pkt1 = av_malloc(sizeof(AVPacketList));
|
238 |
|
239 |
if(!pkt1) {
|
240 |
av_free_packet(pkt); |
241 |
return -1; |
242 |
} |
243 |
pkt1->pkt = *pkt; |
244 |
pkt1->next = NULL;
|
245 |
|
246 |
static time_t last_auto_switch = 0; |
247 |
|
248 |
if(
|
249 |
(pkt->stream_index < last_video_frame_extracted) |
250 |
&& (pkt->stream_index <= RESTART_FRAME_NUMBER_THRESHOLD) |
251 |
&& ((time(NULL) - last_auto_switch) > 10) |
252 |
) |
253 |
{ |
254 |
printf("file streaming loop detected => re-tune channel and start grabbing statistics\n");
|
255 |
last_auto_switch = time(NULL);
|
256 |
SDL_LockMutex(q->mutex); |
257 |
ReTune(&(Channels[SelectedChannel])); |
258 |
SDL_UnlockMutex(q->mutex); |
259 |
} |
260 |
|
261 |
else
|
262 |
{ |
263 |
SDL_LockMutex(q->mutex); |
264 |
|
265 |
// INSERTION SORT ALGORITHM
|
266 |
// before inserting pkt, check if pkt.stream_index is <= current_extracted_frame.
|
267 |
if(pkt->stream_index > q->last_frame_extracted)
|
268 |
{ |
269 |
// either checking starting from the first_pkt or needed other struct like AVPacketList with next and prev....
|
270 |
//if (!q->last_pkt)
|
271 |
if(!q->first_pkt) {
|
272 |
q->first_pkt = pkt1; |
273 |
q->last_pkt = pkt1; |
274 |
} |
275 |
else if(pkt->stream_index < q->first_pkt->pkt.stream_index) { |
276 |
//the packet that has arrived is earlier than the first we got some time ago!
|
277 |
//we need to put it at the head of the queue
|
278 |
pkt1->next = q->first_pkt; |
279 |
q->first_pkt = pkt1; |
280 |
} |
281 |
else {
|
282 |
tmp = q->first_pkt; |
283 |
while(tmp->pkt.stream_index < pkt->stream_index) {
|
284 |
prevtmp = tmp; |
285 |
tmp = tmp->next; |
286 |
|
287 |
if(!tmp) {
|
288 |
break;
|
289 |
} |
290 |
} |
291 |
if(tmp && tmp->pkt.stream_index == pkt->stream_index) {
|
292 |
//we already have a frame with that index
|
293 |
skip = 1;
|
294 |
#ifdef DEBUG_QUEUE
|
295 |
printf("%s QUEUE: PUT: we already have frame with index %d, skipping\n", ((q->queueType == AUDIO) ? "AUDIO" : "VIDEO"), pkt->stream_index); |
296 |
#endif
|
297 |
} |
298 |
else {
|
299 |
prevtmp->next = pkt1; |
300 |
pkt1->next = tmp; |
301 |
if(pkt1->next == NULL) |
302 |
q->last_pkt = pkt1; |
303 |
} |
304 |
//q->last_pkt->next = pkt1; // It was uncommented when not insertion sort
|
305 |
} |
306 |
if(skip == 0) { |
307 |
//q->last_pkt = pkt1;
|
308 |
q->nb_packets++; |
309 |
q->size += pkt1->pkt.size; |
310 |
if(q->nb_packets>=queue_filling_threshold && QueueFillingMode) // && q->queueType==AUDIO) |
311 |
{ |
312 |
QueueFillingMode=0;
|
313 |
#ifdef DEBUG_QUEUE
|
314 |
printf("QUEUE: PUT: FillingMode set to zero\n");
|
315 |
#endif
|
316 |
} |
317 |
//set min
|
318 |
if (!q->minpts_pkt || (pkt1->pkt.pts < q->minpts_pkt->pts)) {
|
319 |
q->minpts_pkt = &(pkt1->pkt); |
320 |
} |
321 |
} |
322 |
} |
323 |
else {
|
324 |
av_free_packet(&pkt1->pkt); |
325 |
av_free(pkt1); |
326 |
#ifdef DEBUG_QUEUE
|
327 |
printf("QUEUE: PUT: NOT inserting because index %d <= last extracted %d\n", pkt->stream_index, q->last_frame_extracted);
|
328 |
#endif
|
329 |
res = 1;
|
330 |
} |
331 |
SDL_UnlockMutex(q->mutex); |
332 |
} |
333 |
|
334 |
return res;
|
335 |
} |
336 |
|
337 |
int OpenACodec (char *audio_codec, int sample_rate, short int audio_channels) |
338 |
{ |
339 |
AVCodec *aCodec; |
340 |
|
341 |
aCodecCtx = avcodec_alloc_context(); |
342 |
//aCodecCtx->bit_rate = 64000;
|
343 |
aCodecCtx->sample_rate = sample_rate; |
344 |
aCodecCtx->channels = audio_channels; |
345 |
aCodec = avcodec_find_decoder_by_name(audio_codec); |
346 |
if(!aCodec) {
|
347 |
printf("Codec not found!\n");
|
348 |
return -1; |
349 |
} |
350 |
if(avcodec_open(aCodecCtx, aCodec)<0) { |
351 |
fprintf(stderr, "could not open codec\n");
|
352 |
return -1; // Could not open codec |
353 |
} |
354 |
printf("using audio Codecid: %d ",aCodecCtx->codec_id);
|
355 |
printf("samplerate: %d ",aCodecCtx->sample_rate);
|
356 |
printf("channels: %d\n",aCodecCtx->channels);
|
357 |
|
358 |
return 1; |
359 |
} |
360 |
|
361 |
int OpenAudio(AVCodecContext *aCodecCtx)
|
362 |
{ |
363 |
SDL_AudioSpec *wanted_spec; |
364 |
static SDL_AudioSpec *wanted_spec_old = NULL; |
365 |
|
366 |
if (! (wanted_spec = malloc(sizeof(*wanted_spec)))) { |
367 |
perror("error initializing audio");
|
368 |
return -1; |
369 |
} |
370 |
wanted_spec->freq = aCodecCtx->sample_rate; |
371 |
wanted_spec->format = AUDIO_S16SYS; |
372 |
wanted_spec->channels = aCodecCtx->channels; |
373 |
wanted_spec->silence = 0;
|
374 |
wanted_spec->samples = SDL_AUDIO_BUFFER_SIZE; |
375 |
wanted_spec->callback = AudioCallback; |
376 |
wanted_spec->userdata = aCodecCtx; |
377 |
|
378 |
#ifdef DEBUG_AUDIO
|
379 |
printf("wanted freq:%d\n",wanted_spec->freq);
|
380 |
printf("wanted format:%d\n",wanted_spec->format);
|
381 |
printf("wanted channels:%d\n",wanted_spec->channels);
|
382 |
printf("wanted silence:%d\n",wanted_spec->silence);
|
383 |
printf("wanted samples:%d\n",wanted_spec->samples);
|
384 |
#endif
|
385 |
|
386 |
if (wanted_spec_old &&
|
387 |
(wanted_spec->freq == wanted_spec_old->freq) && |
388 |
(wanted_spec->channels == wanted_spec_old->channels)) { //do not reinit audio if the wanted specification is the same as before
|
389 |
return 1; |
390 |
} |
391 |
|
392 |
if(wanted_spec_old) {
|
393 |
SDL_CloseAudio(); |
394 |
} |
395 |
|
396 |
if (! (wanted_spec_old = malloc(sizeof(*wanted_spec_old)))) { |
397 |
perror("error initializing audio");
|
398 |
return -1; |
399 |
} |
400 |
memcpy(wanted_spec_old, wanted_spec, sizeof(*wanted_spec));
|
401 |
|
402 |
if (SDL_OpenAudio(wanted_spec,NULL)<0) { |
403 |
fprintf(stderr,"SDL_OpenAudio: %s\n", SDL_GetError());
|
404 |
return -1; |
405 |
} |
406 |
|
407 |
CurrentAudioFreq = wanted_spec->freq; |
408 |
CurrentAudioSamples = wanted_spec->samples; |
409 |
dimAudioQ = wanted_spec->size; |
410 |
deltaAudioQ = (float)((float)wanted_spec->samples)*1000/wanted_spec->freq; //in ms |
411 |
CurrentAudioSilence = wanted_spec->silence; |
412 |
|
413 |
#ifdef DEBUG_AUDIO
|
414 |
printf("freq:%d\n",wanted_spec->freq);
|
415 |
printf("format:%d\n",wanted_spec->format);
|
416 |
printf("channels:%d\n",wanted_spec->channels);
|
417 |
printf("silence:%d\n",wanted_spec->silence);
|
418 |
printf("samples:%d\n",wanted_spec->samples);
|
419 |
printf("size:%d\n",wanted_spec->size);
|
420 |
printf("deltaAudioQ: %f\n",deltaAudioQ);
|
421 |
#endif
|
422 |
|
423 |
return 1; |
424 |
} |
425 |
|
426 |
int ChunkerPlayerCore_InitAudioCodecs(char *audio_codec, int sample_rate, short int audio_channels) |
427 |
{ |
428 |
// some initializations
|
429 |
QueueStopped = 0;
|
430 |
AudioQueueOffset=0;
|
431 |
AVPlaying = 0;
|
432 |
GotSigInt = 0;
|
433 |
FirstTimeAudio=1;
|
434 |
FirstTime = 1;
|
435 |
deltaAudioQError=0;
|
436 |
|
437 |
|
438 |
if (OpenACodec(audio_codec, sample_rate, audio_channels) < 0) { |
439 |
return -1; |
440 |
} |
441 |
|
442 |
if (OpenAudio(aCodecCtx) < 1) { |
443 |
return -1; |
444 |
} |
445 |
|
446 |
outbuf_audio = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
447 |
|
448 |
//initialize the audio queue
|
449 |
PacketQueueInit(&audioq, AUDIO); |
450 |
|
451 |
// Init audio buffers
|
452 |
av_init_packet(&AudioPkt); |
453 |
//printf("AVCODEC_MAX_AUDIO_FRAME_SIZE=%d\n", AVCODEC_MAX_AUDIO_FRAME_SIZE);
|
454 |
AudioPkt.data=(uint8_t *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
455 |
if(!AudioPkt.data) return -1; |
456 |
|
457 |
return 0; |
458 |
} |
459 |
|
460 |
int ChunkerPlayerCore_InitVideoCodecs(char *v_codec, int width, int height) |
461 |
{ |
462 |
|
463 |
memset(&VideoCallbackThreadParams, 0, sizeof(ThreadVal)); |
464 |
|
465 |
VideoCallbackThreadParams.width = width; |
466 |
VideoCallbackThreadParams.height = height; |
467 |
VideoCallbackThreadParams.video_codec = strdup(v_codec); |
468 |
|
469 |
//initialize the video queue
|
470 |
PacketQueueInit(&videoq, VIDEO); |
471 |
|
472 |
// Init video buffers
|
473 |
av_init_packet(&VideoPkt); |
474 |
|
475 |
VideoPkt.data=(uint8_t *)malloc(width*height*3/2); |
476 |
if(!VideoPkt.data) return -1; |
477 |
|
478 |
return 0; |
479 |
} |
480 |
|
481 |
int ChunkerPlayerCore_InitCodecs(char *v_codec, int width, int height, char *audio_codec, int sample_rate, short int audio_channels) |
482 |
{ |
483 |
char audio_stats[255], video_stats[255]; |
484 |
|
485 |
// Register all formats and codecs
|
486 |
av_log_set_level(AV_LOG_WARNING); |
487 |
avcodec_init(); |
488 |
av_register_all(); |
489 |
|
490 |
if (ChunkerPlayerCore_InitAudioCodecs(audio_codec, sample_rate, audio_channels) < 0) { |
491 |
return -1; |
492 |
} |
493 |
|
494 |
if (ChunkerPlayerCore_InitVideoCodecs(v_codec, width, height) < 0) { |
495 |
return -1; |
496 |
} |
497 |
|
498 |
av_log_set_level(AV_LOG_FATAL); |
499 |
|
500 |
sprintf(audio_stats, "waiting for incoming audio packets...");
|
501 |
sprintf(video_stats, "waiting for incoming video packets...");
|
502 |
ChunkerPlayerGUI_SetStatsText(audio_stats, video_stats,qoe_led ? LED_GREEN : LED_NONE); |
503 |
} |
504 |
|
505 |
int DecodeEnqueuedAudio(AVPacket *pkt, PacketQueue *q, int* size) |
506 |
{ |
507 |
uint16_t *audio_bufQ = NULL;
|
508 |
int16_t *dataQ = NULL;
|
509 |
int data_sizeQ = AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
510 |
int lenQ;
|
511 |
int ret = 0; |
512 |
|
513 |
//set the flag to decoded anyway
|
514 |
pkt->convergence_duration = -1;
|
515 |
|
516 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
517 |
if(audio_bufQ) {
|
518 |
#ifdef DEBUG_AUDIO_BUFFER
|
519 |
printf("AUDIO_BUFFER: about to decode packet %d, size %d, data %d\n", pkt->stream_index, pkt->size, pkt->data);
|
520 |
#endif
|
521 |
//decode the packet data
|
522 |
lenQ = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_bufQ, &data_sizeQ, pkt); |
523 |
if(lenQ > 0) { |
524 |
dataQ = (int16_t *)av_malloc(data_sizeQ); //this will be free later at the time of playback
|
525 |
if(dataQ) {
|
526 |
memcpy(dataQ, audio_bufQ, data_sizeQ); |
527 |
if(pkt->data != NULL) |
528 |
{ |
529 |
//discard the old encoded bytes
|
530 |
av_free(pkt->data); |
531 |
} |
532 |
//subtract them from queue size
|
533 |
q->size -= pkt->size; |
534 |
*size = pkt->size; |
535 |
pkt->data = (uint8_t *)dataQ; |
536 |
pkt->size = data_sizeQ; |
537 |
//add new size to queue size
|
538 |
q->size += pkt->size; |
539 |
ret = 1;
|
540 |
} |
541 |
else {
|
542 |
#ifdef DEBUG_AUDIO_BUFFER
|
543 |
printf("AUDIO_BUFFER: cannot alloc space for decoded packet %d\n", pkt->stream_index);
|
544 |
#endif
|
545 |
} |
546 |
} |
547 |
else {
|
548 |
#ifdef DEBUG_AUDIO_BUFFER
|
549 |
printf("AUDIO_BUFFER: cannot decode packet %d\n", pkt->stream_index);
|
550 |
#endif
|
551 |
} |
552 |
av_free(audio_bufQ); |
553 |
} |
554 |
else {
|
555 |
#ifdef DEBUG_AUDIO_BUFFER
|
556 |
printf("AUDIO_BUFFER: cannot alloc decode buffer for packet %d\n", pkt->stream_index);
|
557 |
#endif
|
558 |
} |
559 |
return ret; //problems occurred |
560 |
} |
561 |
|
562 |
/**
|
563 |
* removes a packet from the list and returns the next
|
564 |
* */
|
565 |
AVPacketList *RemoveFromQueue(PacketQueue *q, AVPacketList *p) |
566 |
{ |
567 |
AVPacketList *p1; |
568 |
|
569 |
if (q->first_pkt == p) {
|
570 |
q->first_pkt = p->next; |
571 |
} |
572 |
if (&(p->pkt) == q->minpts_pkt) {
|
573 |
q->minpts_pkt = NULL;
|
574 |
} |
575 |
|
576 |
AVPacketList *retpk = p->next; |
577 |
q->nb_packets--; |
578 |
//adjust size here and not in the various cases of the dequeue
|
579 |
q->size -= p->pkt.size; |
580 |
if(&p->pkt)
|
581 |
{ |
582 |
av_free_packet(&p->pkt); |
583 |
} |
584 |
if(p) {
|
585 |
av_free(p); |
586 |
} |
587 |
|
588 |
//updating min info
|
589 |
for (p1 = q->first_pkt; p1; p1 = p1->next) {
|
590 |
if (!q->minpts_pkt || p1->pkt.pts < q->minpts_pkt->pts) {
|
591 |
q->minpts_pkt = &(p1->pkt); |
592 |
} |
593 |
} |
594 |
|
595 |
return retpk;
|
596 |
} |
597 |
|
598 |
AVPacketList *SeekAndDecodePacketStartingFrom(AVPacketList *p, PacketQueue *q, int* size)
|
599 |
{ |
600 |
while(p) {
|
601 |
//check if audio packet has been already decoded
|
602 |
if(p->pkt.convergence_duration == 0) { |
603 |
//not decoded yet, try to decode it
|
604 |
if( !DecodeEnqueuedAudio(&(p->pkt), q, size) ) {
|
605 |
//it was not possible to decode this packet, return next one
|
606 |
p = RemoveFromQueue(q, p); |
607 |
} |
608 |
else
|
609 |
return p;
|
610 |
} |
611 |
else
|
612 |
return p;
|
613 |
} |
614 |
return NULL; |
615 |
} |
616 |
|
617 |
int PacketQueueGet(PacketQueue *q, AVPacket *pkt, short int av, int* size) |
618 |
{ |
619 |
//AVPacket tmp;
|
620 |
AVPacketList *pkt1 = NULL;
|
621 |
int ret=-1; |
622 |
int SizeToCopy=0; |
623 |
int reqsize;
|
624 |
|
625 |
SDL_LockMutex(q->mutex); |
626 |
|
627 |
#ifdef DEBUG_QUEUE
|
628 |
printf("QUEUE: Get NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
629 |
#endif
|
630 |
|
631 |
if((q->queueType==AUDIO && QueueFillingMode) || QueueStopped)
|
632 |
{ |
633 |
SDL_UnlockMutex(q->mutex); |
634 |
return -1; |
635 |
} |
636 |
|
637 |
if(av==1) { //somebody requested an audio packet, q is the audio queue |
638 |
reqsize = dimAudioQ; //TODO pass this as parameter, not garanteed by SDL to be exactly dimAudioQ
|
639 |
pkt->size = 0;
|
640 |
pkt->dts = 0;
|
641 |
pkt->pts = 0;
|
642 |
//try to dequeue the first packet of the audio queue
|
643 |
pkt1 = q->first_pkt; |
644 |
while (pkt->size < reqsize && pkt1 && SeekAndDecodePacketStartingFrom(pkt1, q, size)) {
|
645 |
AVPacketList *next = pkt1->next; //save it here since we could delete pkt1 later
|
646 |
if (!pkt->dts) pkt->dts = pkt1->pkt.dts;
|
647 |
if (!pkt->pts) pkt->pts = pkt1->pkt.pts;
|
648 |
pkt->stream_index = pkt1->pkt.stream_index; |
649 |
pkt->flags = 1;
|
650 |
pkt->pos = -1;
|
651 |
pkt->convergence_duration = -1;
|
652 |
if (pkt1->pkt.size - AudioQueueOffset <= reqsize - pkt->size) { //we need the whole packet |
653 |
SizeToCopy = pkt1->pkt.size - AudioQueueOffset; //packet might be partial
|
654 |
memcpy(pkt->data + pkt->size, pkt1->pkt.data + AudioQueueOffset, SizeToCopy); |
655 |
pkt->size += SizeToCopy; |
656 |
AudioQueueOffset = 0;
|
657 |
RemoveFromQueue(q, pkt1); |
658 |
} else {
|
659 |
SizeToCopy = reqsize - pkt->size; //partial packet remains
|
660 |
memcpy(pkt->data + pkt->size, pkt1->pkt.data + AudioQueueOffset, SizeToCopy); |
661 |
pkt->size += SizeToCopy; |
662 |
AudioQueueOffset += SizeToCopy; |
663 |
pkt1->pkt.dts += SizeToCopy/(dimAudioQ/CurrentAudioSamples)/(CurrentAudioFreq/1000);
|
664 |
pkt1->pkt.pts += SizeToCopy/(dimAudioQ/CurrentAudioSamples)/(CurrentAudioFreq/1000);
|
665 |
} |
666 |
|
667 |
#ifdef DEBUG_AUDIO_BUFFER
|
668 |
printf("2: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
669 |
#endif
|
670 |
|
671 |
//update index of last frame extracted
|
672 |
//ChunkerPlayerStats_UpdateAudioLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted);
|
673 |
q->last_frame_extracted = pkt->stream_index; |
674 |
|
675 |
pkt1 = next; |
676 |
} |
677 |
ret = 1; //TODO: check some conditions |
678 |
} else { //somebody requested a video packet, q is the video queue |
679 |
pkt1 = q->first_pkt; |
680 |
if(pkt1) {
|
681 |
#ifdef DEBUG_QUEUE_DEEP
|
682 |
printf(" AV not 1\n");
|
683 |
#endif
|
684 |
pkt->size = pkt1->pkt.size; |
685 |
pkt->dts = pkt1->pkt.dts; |
686 |
pkt->pts = pkt1->pkt.pts; |
687 |
pkt->stream_index = pkt1->pkt.stream_index; |
688 |
pkt->flags = pkt1->pkt.flags; |
689 |
pkt->pos = pkt1->pkt.pos; |
690 |
pkt->convergence_duration = pkt1->pkt.convergence_duration; |
691 |
//*pkt = pkt1->pkt;
|
692 |
|
693 |
if((pkt->data != NULL) && (pkt1->pkt.data != NULL)) |
694 |
memcpy(pkt->data, pkt1->pkt.data, pkt1->pkt.size); |
695 |
|
696 |
//HINT SEE BEFORE q->size -= pkt1->pkt.size;
|
697 |
RemoveFromQueue(q, pkt1); |
698 |
|
699 |
ret = 1;
|
700 |
|
701 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(q->PacketHistory), pkt->stream_index, q->last_frame_extracted); |
702 |
|
703 |
//update index of last frame extracted
|
704 |
q->last_frame_extracted = pkt->stream_index; |
705 |
last_video_frame_extracted = q->last_frame_extracted; |
706 |
} |
707 |
#ifdef DEBUG_QUEUE
|
708 |
else {
|
709 |
printf(" VIDEO pk1 NULL!!!!\n");
|
710 |
} |
711 |
#endif
|
712 |
} |
713 |
|
714 |
if(q->nb_packets==0 && q->queueType==AUDIO) { |
715 |
QueueFillingMode=1;
|
716 |
#ifdef DEBUG_QUEUE
|
717 |
printf("QUEUE: Get FillingMode ON\n");
|
718 |
#endif
|
719 |
} |
720 |
#ifdef DEBUG_QUEUE
|
721 |
printf("QUEUE: Get Last %s Frame Extracted = %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
722 |
#endif
|
723 |
|
724 |
SDL_UnlockMutex(q->mutex); |
725 |
return ret;
|
726 |
} |
727 |
|
728 |
int AudioDecodeFrame(uint8_t *audio_buf, int buf_size) { |
729 |
//struct timeval now;
|
730 |
int audio_pkt_size = 0; |
731 |
int compressed_size = 0; |
732 |
long long Now; |
733 |
short int DecodeAudio=0, SkipAudio=0; |
734 |
//int len1, data_size;
|
735 |
|
736 |
//gettimeofday(&now,NULL);
|
737 |
//Now = (now.tv_sec)*1000+now.tv_usec/1000;
|
738 |
Now=(long long)SDL_GetTicks(); |
739 |
|
740 |
if(QueueFillingMode || QueueStopped)
|
741 |
{ |
742 |
//SDL_LockMutex(timing_mutex);
|
743 |
FirstTimeAudio=1;
|
744 |
FirstTime = 1;
|
745 |
//SDL_UnlockMutex(timing_mutex);
|
746 |
return -1; |
747 |
} |
748 |
|
749 |
if((FirstTime==1 || FirstTimeAudio==1) && audioq.size>0) { |
750 |
if(audioq.first_pkt->pkt.pts>0) |
751 |
{ |
752 |
//SDL_LockMutex(timing_mutex);
|
753 |
DeltaTime=Now-(long long)(audioq.first_pkt->pkt.pts); |
754 |
FirstTimeAudio = 0;
|
755 |
FirstTime = 0;
|
756 |
//SDL_UnlockMutex(timing_mutex);
|
757 |
#ifdef DEBUG_AUDIO
|
758 |
printf("AUDIO: audio_decode_frame - DeltaTimeAudio=%lld\n",DeltaTime);
|
759 |
#endif
|
760 |
} |
761 |
} |
762 |
|
763 |
#ifdef DEBUG_AUDIO
|
764 |
if(audioq.first_pkt)
|
765 |
{ |
766 |
printf("AUDIO: audio_decode_frame - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)audioq.first_pkt->pkt.pts,(long long)audioq.first_pkt->pkt.pts+DeltaTime); |
767 |
printf("AUDIO: QueueLen=%d ",(int)audioq.nb_packets); |
768 |
printf("AUDIO: QueueSize=%d\n",(int)audioq.size); |
769 |
} |
770 |
else
|
771 |
printf("AUDIO: audio_decode_frame - Empty queue\n");
|
772 |
#endif
|
773 |
|
774 |
if(audioq.nb_packets>0) |
775 |
{ |
776 |
if((double)audioq.first_pkt->pkt.pts+DeltaTime<Now+deltaAudioQ) //too late ... TODO: figure out the right number |
777 |
{ |
778 |
SkipAudio = 1;
|
779 |
DecodeAudio = 0;
|
780 |
} |
781 |
else if((double)audioq.first_pkt->pkt.pts+DeltaTime>=Now+deltaAudioQ && //TODO: figure out the right number |
782 |
(double)audioq.first_pkt->pkt.pts+DeltaTime<=Now+deltaAudioQ+3*deltaAudioQ) { //TODO: how much in future? On some systems, SDL asks for more buffers in a raw |
783 |
SkipAudio = 0;
|
784 |
DecodeAudio = 1;
|
785 |
} |
786 |
} |
787 |
|
788 |
while(SkipAudio==1 && audioq.size>0) |
789 |
{ |
790 |
SkipAudio = 0;
|
791 |
#ifdef DEBUG_AUDIO
|
792 |
printf("AUDIO: skipaudio: queue size=%d\n",audioq.size);
|
793 |
#endif
|
794 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
795 |
return -1; |
796 |
} |
797 |
if(audioq.first_pkt)
|
798 |
{ |
799 |
ChunkerPlayerStats_UpdateAudioSkipHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
800 |
|
801 |
if((double)audioq.first_pkt->pkt.pts+DeltaTime<Now+deltaAudioQ) //TODO: figure out the right number |
802 |
{ |
803 |
SkipAudio = 1;
|
804 |
DecodeAudio = 0;
|
805 |
} |
806 |
else if((double)audioq.first_pkt->pkt.pts+DeltaTime>=Now+deltaAudioQ && //TODO: figure out the right number |
807 |
(double)audioq.first_pkt->pkt.pts+DeltaTime<=Now+deltaAudioQ+3*deltaAudioQ) { //TODO: how much in future? |
808 |
SkipAudio = 0;
|
809 |
DecodeAudio = 1;
|
810 |
} |
811 |
} |
812 |
} |
813 |
if(DecodeAudio==1) { |
814 |
if(PacketQueueGet(&audioq,&AudioPkt,1, &compressed_size) < 0) { |
815 |
return -1; |
816 |
} |
817 |
#ifdef DEBUG_SYNC
|
818 |
fprintf(stderr, "AUDIO delay =%lld ms\n",(long long)AudioPkt.pts+DeltaTime-Now); |
819 |
#endif
|
820 |
memcpy(audio_buf,AudioPkt.data,AudioPkt.size); |
821 |
audio_pkt_size = AudioPkt.size; |
822 |
#ifdef DEBUG_AUDIO
|
823 |
printf("AUDIO: Decode audio\n");
|
824 |
#endif
|
825 |
|
826 |
ChunkerPlayerStats_UpdateAudioPlayedHistory(&(audioq.PacketHistory), AudioPkt.stream_index, compressed_size); |
827 |
} |
828 |
|
829 |
return audio_pkt_size;
|
830 |
} |
831 |
|
832 |
// Render a Frame to a YUV Overlay. Note that the Overlay is already bound to an SDL Surface
|
833 |
// Note that width, height would not be needed in new ffmpeg versions where this info is contained in AVFrame
|
834 |
// see: [FFmpeg-devel] [PATCH] lavc: add width and height fields to AVFrame
|
835 |
int RenderFrame2Overlay(AVFrame *pFrame, int frame_width, int frame_height, int tcrop, int bcrop, SDL_Overlay *YUVOverlay) |
836 |
{ |
837 |
AVPicture pict; |
838 |
unsigned int ycrop = 50; |
839 |
static struct SwsContext *img_convert_ctx = NULL; //if the function is used for more streams, this could be made part of some context passed as a parameter (to optimize performance) |
840 |
|
841 |
if(SDL_LockYUVOverlay(YUVOverlay) < 0) { |
842 |
return -1; |
843 |
} |
844 |
|
845 |
pict.data[0] = YUVOverlay->pixels[0]; |
846 |
pict.data[1] = YUVOverlay->pixels[2]; |
847 |
pict.data[2] = YUVOverlay->pixels[1]; |
848 |
|
849 |
pict.linesize[0] = YUVOverlay->pitches[0]; |
850 |
pict.linesize[1] = YUVOverlay->pitches[2]; |
851 |
pict.linesize[2] = YUVOverlay->pitches[1]; |
852 |
|
853 |
img_convert_ctx = sws_getCachedContext(img_convert_ctx, frame_width, frame_height - tcrop - bcrop, PIX_FMT_YUV420P, YUVOverlay->w, YUVOverlay->h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); |
854 |
if(img_convert_ctx == NULL) { |
855 |
fprintf(stderr, "Cannot initialize the conversion context!\n");
|
856 |
exit(1);
|
857 |
} |
858 |
|
859 |
// let's draw the data (*yuv[3]) on a SDL screen (*screen)
|
860 |
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, -tcrop, frame_height - bcrop, pict.data, pict.linesize); |
861 |
SDL_UnlockYUVOverlay(YUVOverlay); |
862 |
|
863 |
return 0; |
864 |
} |
865 |
|
866 |
// Render a YUV Overlay to the specified Rect of the Surface. Note that the Overlay is already bound to an SDL Surface.
|
867 |
int RenderOverlay2Rect(SDL_Overlay *YUVOverlay, SDL_Rect *Rect)
|
868 |
{ |
869 |
|
870 |
// Lock SDL_yuv_overlay
|
871 |
if(SDL_MUSTLOCK(MainScreen)) {
|
872 |
if(SDL_LockSurface(MainScreen) < 0) { |
873 |
return -1; |
874 |
} |
875 |
} |
876 |
|
877 |
// Show, baby, show!
|
878 |
SDL_LockMutex(OverlayMutex); |
879 |
SDL_DisplayYUVOverlay(YUVOverlay, Rect); |
880 |
SDL_UnlockMutex(OverlayMutex); |
881 |
|
882 |
if(SDL_MUSTLOCK(MainScreen)) {
|
883 |
SDL_UnlockSurface(MainScreen); |
884 |
} |
885 |
|
886 |
return 0; |
887 |
|
888 |
} |
889 |
|
890 |
|
891 |
int VideoCallback(void *valthread) |
892 |
{ |
893 |
//AVPacket pktvideo;
|
894 |
AVCodecContext *pCodecCtx; |
895 |
AVCodec *pCodec; |
896 |
AVFrame *pFrame; |
897 |
int frameFinished;
|
898 |
long long Now; |
899 |
long long Last = 0; |
900 |
short int SkipVideo, DecodeVideo; |
901 |
uint64_t last_pts = 0;
|
902 |
long long decode_delay = 0; |
903 |
int queue_size_checked = 0; |
904 |
|
905 |
#ifdef SAVE_YUV
|
906 |
static AVFrame* lastSavedFrameBuffer = NULL; |
907 |
|
908 |
if(!lastSavedFrameBuffer)
|
909 |
lastSavedFrameBuffer = (AVFrame*) malloc(sizeof(AVFrame));
|
910 |
#endif
|
911 |
|
912 |
//double frame_rate = 0.0,time_between_frames=0.0;
|
913 |
//struct timeval now;
|
914 |
|
915 |
//int wait_for_sync = 1;
|
916 |
ThreadVal *tval; |
917 |
tval = (ThreadVal *)valthread; |
918 |
|
919 |
//frame_rate = tval->framerate;
|
920 |
//time_between_frames = 1.e6 / frame_rate;
|
921 |
//gettimeofday(&time_now,0);
|
922 |
|
923 |
//frecon = fopen("recondechunk.mpg","wb");
|
924 |
|
925 |
//setup video decoder
|
926 |
pCodec = avcodec_find_decoder_by_name(tval->video_codec); |
927 |
if (pCodec) {
|
928 |
fprintf(stderr, "INIT: Setting VIDEO codecID to: %d\n",pCodec->id);
|
929 |
} else {
|
930 |
fprintf(stderr, "INIT: Unknown VIDEO codec: %s!\n", tval->video_codec);
|
931 |
return -1; // Codec not found |
932 |
} |
933 |
|
934 |
pCodecCtx=avcodec_alloc_context(); |
935 |
pCodecCtx->codec_type = CODEC_TYPE_VIDEO; |
936 |
//pCodecCtx->debug = FF_DEBUG_DCT_COEFF;
|
937 |
pCodecCtx->codec_id = pCodec->id; |
938 |
|
939 |
//pCodecCtx->bit_rate = 400000;
|
940 |
// resolution must be a multiple of two
|
941 |
pCodecCtx->width = tval->width;//176;//352;
|
942 |
pCodecCtx->height = tval->height;//144;//288;
|
943 |
|
944 |
// frames per second
|
945 |
//pCodecCtx->time_base = (AVRational){1,25};
|
946 |
//pCodecCtx->gop_size = 10; // emit one intra frame every ten frames
|
947 |
//pCodecCtx->max_b_frames=1;
|
948 |
pCodecCtx->pix_fmt = PIX_FMT_YUV420P; |
949 |
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); |
950 |
|
951 |
if(pCodec==NULL) { |
952 |
fprintf(stderr, "Unsupported codec!\n");
|
953 |
return -1; // Codec not found |
954 |
} |
955 |
if(avcodec_open(pCodecCtx, pCodec) < 0) { |
956 |
fprintf(stderr, "could not open codec\n");
|
957 |
return -1; // Could not open codec |
958 |
} |
959 |
pFrame=avcodec_alloc_frame(); |
960 |
if(pFrame==NULL) { |
961 |
printf("Memory error!!!\n");
|
962 |
return -1; |
963 |
} |
964 |
|
965 |
#ifdef DEBUG_VIDEO
|
966 |
printf("VIDEO: video_callback entering main cycle\n");
|
967 |
#endif
|
968 |
|
969 |
while(AVPlaying && !quit) {
|
970 |
|
971 |
if(QueueFillingMode || QueueStopped)
|
972 |
{ |
973 |
//SDL_LockMutex(timing_mutex);
|
974 |
FirstTime = 1;
|
975 |
//SDL_UnlockMutex(timing_mutex);
|
976 |
usleep(5000);
|
977 |
continue;
|
978 |
} |
979 |
|
980 |
DecodeVideo = 0;
|
981 |
SkipVideo = 0;
|
982 |
Now=(long long)SDL_GetTicks(); |
983 |
if(FirstTime==1 && videoq.size>0) { |
984 |
if(videoq.first_pkt->pkt.pts>0) |
985 |
{ |
986 |
//SDL_LockMutex(timing_mutex);
|
987 |
DeltaTime=Now-(long long)videoq.first_pkt->pkt.pts; |
988 |
FirstTime = 0;
|
989 |
FirstTimeAudio = 0;
|
990 |
//SDL_UnlockMutex(timing_mutex);
|
991 |
} |
992 |
#ifdef DEBUG_VIDEO
|
993 |
printf("VIDEO: VideoCallback - DeltaTimeAudio=%lld\n",DeltaTime);
|
994 |
#endif
|
995 |
} |
996 |
|
997 |
#ifdef DEBUG_VIDEO
|
998 |
if(videoq.first_pkt)
|
999 |
{ |
1000 |
printf("VIDEO: VideoCallback - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)videoq.first_pkt->pkt.pts,(long long)videoq.first_pkt->pkt.pts+DeltaTime); |
1001 |
printf("VIDEO: Index=%d ", (int)videoq.first_pkt->pkt.stream_index); |
1002 |
printf("VIDEO: QueueLen=%d ", (int)videoq.nb_packets); |
1003 |
printf("VIDEO: QueueSize=%d\n", (int)videoq.size); |
1004 |
} |
1005 |
else
|
1006 |
printf("VIDEO: VideoCallback - Empty queue\n");
|
1007 |
#endif
|
1008 |
|
1009 |
#ifdef DEBUG_VIDEO
|
1010 |
printf("VIDEO: skipvideo:%d decodevideo:%d\n",SkipVideo,DecodeVideo);
|
1011 |
#endif
|
1012 |
// ChunkerPlayerStats_UpdateVideoSkipHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame);
|
1013 |
|
1014 |
if(videoq.nb_packets>0) { |
1015 |
if (!queue_size_checked && videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts < decode_delay) { //queue too short |
1016 |
#ifdef DEBUG_SYNC
|
1017 |
fprintf(stderr, "VIDEO queue too short,diff(%lld) < decode_delay(%lld), increasing delta from \n",videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts, decode_delay, DeltaTime);
|
1018 |
#endif
|
1019 |
DeltaTime += decode_delay - (videoq.last_pkt->pkt.pts - videoq.first_pkt->pkt.pts); |
1020 |
queue_size_checked = 1; //make sure we do not increase the delay several times bacause of the same frame |
1021 |
} |
1022 |
if (videoq.first_pkt->pkt.pts + DeltaTime - Now < decode_delay) { //time to decode, should be based on DTS |
1023 |
if (PacketQueueGet(&videoq,&VideoPkt,0, NULL) > 0) { |
1024 |
queue_size_checked = 0;
|
1025 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
1026 |
#ifdef DEBUG_SYNC
|
1027 |
fprintf(stderr, "VIDEO delta =%lld ms; dt=%lld \n",(long long) pFrame->pkt_pts - last_pts, Now - Last); |
1028 |
#endif
|
1029 |
last_pts = pFrame->pkt_pts; |
1030 |
if (pFrame->pkt_pts) decode_delay = MAX(decode_delay, VideoPkt.pts - pFrame->pkt_pts); //TODO: base this on dts |
1031 |
decode_delay = MIN(decode_delay, 40 * 5); //TODO, this workaround would not be needed if decode_delay would be based on DTS |
1032 |
#ifdef DEBUG_SYNC
|
1033 |
fprintf(stderr, "VIDEO t=%lld ms ptsin=%lld ptsout=%lld \n",Now, (long long)VideoPkt.pts+DeltaTime, pFrame->pkt_pts+DeltaTime); |
1034 |
fprintf(stderr, "VIDEO delay =%lld ms ; %lld ms \n",(long long)VideoPkt.pts+DeltaTime-Now, pFrame->pkt_pts+DeltaTime-Now); |
1035 |
#endif
|
1036 |
|
1037 |
if(frameFinished)
|
1038 |
{ // it must be true all the time else error
|
1039 |
|
1040 |
long long target_pts = pFrame->pkt_pts + DeltaTime; |
1041 |
long long earlier = target_pts - Now; |
1042 |
|
1043 |
#ifdef DEBUG_VIDEO
|
1044 |
printf("VIDEO: FrameFinished\n");
|
1045 |
#endif
|
1046 |
decoded_vframes++; |
1047 |
|
1048 |
|
1049 |
#ifdef VIDEO_DEINTERLACE
|
1050 |
avpicture_deinterlace( |
1051 |
(AVPicture*) pFrame, |
1052 |
(const AVPicture*) pFrame,
|
1053 |
pCodecCtx->pix_fmt, |
1054 |
tval->width, tval->height); |
1055 |
#endif
|
1056 |
|
1057 |
#ifdef SAVE_YUV
|
1058 |
if(LastSavedVFrame == -1) |
1059 |
{ |
1060 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1061 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1062 |
LastSavedVFrame = VideoPkt.stream_index; |
1063 |
} |
1064 |
else if(LastSavedVFrame == (VideoPkt.stream_index-1)) |
1065 |
{ |
1066 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1067 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1068 |
LastSavedVFrame = VideoPkt.stream_index; |
1069 |
} |
1070 |
else if(LastSavedVFrame >= 0) |
1071 |
{ |
1072 |
while(LastSavedVFrame < (VideoPkt.stream_index-1)) |
1073 |
{ |
1074 |
SaveFrame(lastSavedFrameBuffer, pCodecCtx->width, pCodecCtx->height); |
1075 |
} |
1076 |
|
1077 |
memcpy(lastSavedFrameBuffer, pFrame, sizeof(AVFrame));
|
1078 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1079 |
LastSavedVFrame = VideoPkt.stream_index; |
1080 |
} |
1081 |
#endif
|
1082 |
ChunkerPlayerStats_UpdateVideoPlayedHistory(&(videoq.PacketHistory), VideoPkt.stream_index, pFrame->pict_type, VideoPkt.size, pFrame); |
1083 |
|
1084 |
if(SilentMode)
|
1085 |
continue;
|
1086 |
|
1087 |
SDL_LockMutex(OverlayMutex); |
1088 |
if (RenderFrame2Overlay(pFrame, pCodecCtx->width, pCodecCtx->height, 0, 0, YUVOverlay) < 0){ |
1089 |
SDL_UnlockMutex(OverlayMutex); |
1090 |
continue;
|
1091 |
} |
1092 |
|
1093 |
//wait for the playback time
|
1094 |
#ifdef DEBUG_SYNC
|
1095 |
fprintf(stderr, "VIDEO earlier =%lld ms\n",earlier);
|
1096 |
#endif
|
1097 |
if (earlier > 0) { |
1098 |
usleep(MIN(earlier,1000) * 1000); |
1099 |
// } else if (earlier < 0) {
|
1100 |
// fprintf(stderr, "should increase delay2 : pFrame->pkt_pts=%lld, DeltaTime=%lld, Now=%lld, earlier=%lld\n", pFrame->pkt_pts, DeltaTime, Now, earlier);
|
1101 |
// DeltaTime -= earlier;
|
1102 |
} |
1103 |
|
1104 |
Last = Now; |
1105 |
|
1106 |
if (RenderOverlay2Rect(YUVOverlay, ChunkerPlayerGUI_GetMainOverlayRect()) < 0) { |
1107 |
SDL_UnlockMutex(OverlayMutex); |
1108 |
continue;
|
1109 |
} |
1110 |
SDL_UnlockMutex(OverlayMutex); |
1111 |
|
1112 |
//redisplay logo
|
1113 |
/**SDL_BlitSurface(image, NULL, MainScreen, &dest);*/
|
1114 |
/* Update the screen area just changed */
|
1115 |
/**SDL_UpdateRects(MainScreen, 1, &dest);*/
|
1116 |
} //if FrameFinished
|
1117 |
else
|
1118 |
{ |
1119 |
ChunkerPlayerStats_UpdateVideoLossHistory(&(videoq.PacketHistory), VideoPkt.stream_index+1, videoq.last_frame_extracted-1); |
1120 |
} |
1121 |
} |
1122 |
} |
1123 |
usleep(5000);
|
1124 |
} |
1125 |
usleep(5000);
|
1126 |
} |
1127 |
avcodec_close(pCodecCtx); |
1128 |
av_free(pCodecCtx); |
1129 |
av_free(pFrame); |
1130 |
//fclose(frecon);
|
1131 |
#ifdef DEBUG_VIDEO
|
1132 |
printf("VIDEO: video callback end\n");
|
1133 |
#endif
|
1134 |
|
1135 |
#ifdef SAVE_YUV
|
1136 |
if(!lastSavedFrameBuffer)
|
1137 |
free(lastSavedFrameBuffer); |
1138 |
|
1139 |
lastSavedFrameBuffer = NULL;
|
1140 |
#endif
|
1141 |
|
1142 |
return 0; |
1143 |
} |
1144 |
|
1145 |
void AudioCallback(void *userdata, Uint8 *stream, int len) |
1146 |
{ |
1147 |
//AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
|
1148 |
int audio_size;
|
1149 |
|
1150 |
static uint8_t audio_buf[AVCODEC_MAX_AUDIO_FRAME_SIZE];
|
1151 |
|
1152 |
memset(audio_buf, CurrentAudioSilence, sizeof(audio_buf));
|
1153 |
audio_size = AudioDecodeFrame(audio_buf, sizeof(audio_buf));
|
1154 |
|
1155 |
if(SilentMode < 2) { |
1156 |
if(audio_size != len) {
|
1157 |
memset(stream, CurrentAudioSilence, len); |
1158 |
} else {
|
1159 |
memcpy(stream, (uint8_t *)audio_buf, len); |
1160 |
} |
1161 |
} |
1162 |
} |
1163 |
|
1164 |
void SaveFrame(AVFrame *pFrame, int width, int height) |
1165 |
{ |
1166 |
FILE *pFile; |
1167 |
int y;
|
1168 |
|
1169 |
// Open file
|
1170 |
pFile=fopen(YUVFileName, "ab");
|
1171 |
if(pFile==NULL) |
1172 |
return;
|
1173 |
|
1174 |
// Write header
|
1175 |
//fprintf(pFile, "P5\n%d %d\n255\n", width, height);
|
1176 |
|
1177 |
// Write Y data
|
1178 |
for(y=0; y<height; y++) |
1179 |
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile); |
1180 |
// Write U data
|
1181 |
for(y=0; y<height/2; y++) |
1182 |
fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile); |
1183 |
// Write V data
|
1184 |
for(y=0; y<height/2; y++) |
1185 |
fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile); |
1186 |
|
1187 |
// Close file
|
1188 |
fclose(pFile); |
1189 |
} |
1190 |
|
1191 |
int ChunkerPlayerCore_IsRunning()
|
1192 |
{ |
1193 |
return AVPlaying;
|
1194 |
} |
1195 |
|
1196 |
void ChunkerPlayerCore_Play()
|
1197 |
{ |
1198 |
if(AVPlaying) return; |
1199 |
AVPlaying = 1;
|
1200 |
|
1201 |
SDL_PauseAudio(0);
|
1202 |
video_thread = SDL_CreateThread(VideoCallback, &VideoCallbackThreadParams); |
1203 |
ChunkerPlayerStats_Init(&VideoCallbackThreadParams); |
1204 |
stats_thread = SDL_CreateThread(CollectStatisticsThread, NULL);
|
1205 |
|
1206 |
decoded_vframes = 0;
|
1207 |
LastSavedVFrame = -1;
|
1208 |
} |
1209 |
|
1210 |
void ChunkerPlayerCore_Stop()
|
1211 |
{ |
1212 |
if(!AVPlaying) return; |
1213 |
|
1214 |
AVPlaying = 0;
|
1215 |
|
1216 |
// Stop audio&video playback
|
1217 |
SDL_WaitThread(video_thread, NULL);
|
1218 |
SDL_WaitThread(stats_thread, NULL);
|
1219 |
SDL_PauseAudio(1);
|
1220 |
|
1221 |
if(YUVOverlay != NULL) |
1222 |
{ |
1223 |
SDL_FreeYUVOverlay(YUVOverlay); |
1224 |
YUVOverlay = NULL;
|
1225 |
} |
1226 |
|
1227 |
PacketQueueReset(&audioq); |
1228 |
PacketQueueReset(&videoq); |
1229 |
|
1230 |
avcodec_close(aCodecCtx); |
1231 |
av_free(aCodecCtx); |
1232 |
free(AudioPkt.data); |
1233 |
free(VideoPkt.data); |
1234 |
free(outbuf_audio); |
1235 |
|
1236 |
/*
|
1237 |
* Sleep two buffers' worth of audio before closing, in order
|
1238 |
* to allow the playback to finish. This isn't always enough;
|
1239 |
* perhaps SDL needs a way to explicitly wait for device drain?
|
1240 |
* Doesn't seem to be necessary -> disabled
|
1241 |
*/
|
1242 |
//int delay = 2 * 1000 * CurrentAudioSamples / CurrentAudioFreq;
|
1243 |
// printf("SDL_Delay(%d)\n", delay*10);
|
1244 |
//SDL_Delay(delay*10);
|
1245 |
} |
1246 |
|
1247 |
void ChunkerPlayerCore_Finalize()
|
1248 |
{ |
1249 |
if(YUVOverlay != NULL) |
1250 |
{ |
1251 |
SDL_FreeYUVOverlay(YUVOverlay); |
1252 |
YUVOverlay = NULL;
|
1253 |
} |
1254 |
|
1255 |
SDL_CloseAudio(); |
1256 |
} |
1257 |
|
1258 |
void ChunkerPlayerCore_Pause()
|
1259 |
{ |
1260 |
if(!AVPlaying) return; |
1261 |
|
1262 |
AVPlaying = 0;
|
1263 |
|
1264 |
// Stop audio&video playback
|
1265 |
SDL_WaitThread(video_thread, NULL);
|
1266 |
SDL_PauseAudio(1);
|
1267 |
|
1268 |
PacketQueueReset(&audioq); |
1269 |
PacketQueueReset(&videoq); |
1270 |
} |
1271 |
|
1272 |
int ChunkerPlayerCore_AudioEnded()
|
1273 |
{ |
1274 |
return (audioq.nb_packets==0 && audioq.last_frame_extracted>0); |
1275 |
} |
1276 |
|
1277 |
void ChunkerPlayerCore_ResetAVQueues()
|
1278 |
{ |
1279 |
#ifdef DEBUG_QUEUE
|
1280 |
printf("QUEUE: MAIN SHOULD RESET\n");
|
1281 |
#endif
|
1282 |
PacketQueueReset(&audioq); |
1283 |
PacketQueueReset(&videoq); |
1284 |
} |
1285 |
|
1286 |
int ChunkerPlayerCore_EnqueueBlocks(const uint8_t *block, const int block_size) |
1287 |
{ |
1288 |
#ifdef EMULATE_CHUNK_LOSS
|
1289 |
static time_t loss_cycle_start_time = 0, now = 0; |
1290 |
static int early_losses = 0; |
1291 |
static int clp_frames = 0; |
1292 |
|
1293 |
if(ScheduledChunkLosses)
|
1294 |
{ |
1295 |
static unsigned int random_threshold; |
1296 |
now=time(NULL);
|
1297 |
if(!loss_cycle_start_time)
|
1298 |
loss_cycle_start_time = now; |
1299 |
|
1300 |
if(((now-loss_cycle_start_time) >= ScheduledChunkLosses[((CurrChunkLossIndex+1)%NScheduledChunkLosses)].Time) && (NScheduledChunkLosses>1 || CurrChunkLossIndex==-1)) |
1301 |
{ |
1302 |
CurrChunkLossIndex = ((CurrChunkLossIndex+1)%NScheduledChunkLosses);
|
1303 |
if(CurrChunkLossIndex == (NScheduledChunkLosses-1)) |
1304 |
loss_cycle_start_time = now; |
1305 |
|
1306 |
if(ScheduledChunkLosses[CurrChunkLossIndex].Value == -1) |
1307 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].MinValue + (rand() % (ScheduledChunkLosses[CurrChunkLossIndex].MaxValue-ScheduledChunkLosses[CurrChunkLossIndex].MinValue)); |
1308 |
else
|
1309 |
random_threshold = ScheduledChunkLosses[CurrChunkLossIndex].Value; |
1310 |
|
1311 |
printf("new ScheduledChunkLoss, time: %d, value: %d\n", (int)ScheduledChunkLosses[CurrChunkLossIndex].Time, random_threshold); |
1312 |
} |
1313 |
|
1314 |
if(clp_frames > 0) |
1315 |
{ |
1316 |
clp_frames--; |
1317 |
return PLAYER_FAIL_RETURN;
|
1318 |
} |
1319 |
if((rand() % 100) < random_threshold) |
1320 |
{ |
1321 |
if(early_losses > 0) |
1322 |
early_losses--; |
1323 |
else
|
1324 |
{ |
1325 |
clp_frames=early_losses=(ScheduledChunkLosses[CurrChunkLossIndex].Burstiness-1);
|
1326 |
return PLAYER_FAIL_RETURN;
|
1327 |
} |
1328 |
} |
1329 |
} |
1330 |
#endif
|
1331 |
|
1332 |
Chunk *gchunk = NULL;
|
1333 |
int decoded_size = -1; |
1334 |
uint8_t *tempdata, *buffer; |
1335 |
int j;
|
1336 |
Frame *frame = NULL;
|
1337 |
AVPacket packet, packetaudio; |
1338 |
|
1339 |
uint16_t *audio_bufQ = NULL;
|
1340 |
|
1341 |
//the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
|
1342 |
static int sizeFrameHeader = 5*sizeof(int32_t); |
1343 |
//the following we dont need anymore
|
1344 |
//static int ExternalChunk_header_size = 5*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 1*CHUNK_TRANSCODING_INT_SIZE*2;
|
1345 |
|
1346 |
static int chunks_out_of_order = 0; |
1347 |
static int last_chunk_id = -1; |
1348 |
|
1349 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
1350 |
if(!audio_bufQ) {
|
1351 |
printf("Memory error in audio_bufQ!\n");
|
1352 |
return PLAYER_FAIL_RETURN;
|
1353 |
} |
1354 |
|
1355 |
gchunk = (Chunk *)malloc(sizeof(Chunk));
|
1356 |
if(!gchunk) {
|
1357 |
printf("Memory error in gchunk!\n");
|
1358 |
av_free(audio_bufQ); |
1359 |
return PLAYER_FAIL_RETURN;
|
1360 |
} |
1361 |
|
1362 |
decoded_size = decodeChunk(gchunk, block, block_size); |
1363 |
|
1364 |
if(last_chunk_id == -1) |
1365 |
last_chunk_id = gchunk->id; |
1366 |
|
1367 |
if(gchunk->id > (last_chunk_id+1)) { |
1368 |
chunks_out_of_order += gchunk->id - last_chunk_id - 1;
|
1369 |
} |
1370 |
last_chunk_id = gchunk->id; |
1371 |
|
1372 |
#ifdef DEBUG_CHUNKER
|
1373 |
printf("CHUNKER: enqueueBlock: id %d decoded_size %d target size %d - out_of_order %d\n", gchunk->id, decoded_size, GRAPES_ENCODED_CHUNK_HEADER_SIZE + ExternalChunk_header_size + gchunk->size, chunks_out_of_order);
|
1374 |
#endif
|
1375 |
if(decoded_size < 0) { |
1376 |
//HINT here i should differentiate between various return values of the decode
|
1377 |
//in order to free what has been allocated there
|
1378 |
printf("chunk probably corrupted!\n");
|
1379 |
av_free(audio_bufQ); |
1380 |
free(gchunk); |
1381 |
return PLAYER_FAIL_RETURN;
|
1382 |
} |
1383 |
|
1384 |
frame = (Frame *)malloc(sizeof(Frame));
|
1385 |
if(!frame) {
|
1386 |
printf("Memory error in Frame!\n");
|
1387 |
if(gchunk) {
|
1388 |
if(gchunk->attributes) {
|
1389 |
free(gchunk->attributes); |
1390 |
} |
1391 |
free(gchunk); |
1392 |
} |
1393 |
av_free(audio_bufQ); |
1394 |
return PLAYER_FAIL_RETURN;
|
1395 |
} |
1396 |
|
1397 |
tempdata = gchunk->data; //let it point to first frame of payload
|
1398 |
j=gchunk->size; |
1399 |
while(j>0 && !quit) { |
1400 |
frame->number = bit32_encoded_pull(tempdata); |
1401 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1402 |
frame->timestamp.tv_sec = bit32_encoded_pull(tempdata); |
1403 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1404 |
frame->timestamp.tv_usec = bit32_encoded_pull(tempdata); |
1405 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1406 |
frame->size = bit32_encoded_pull(tempdata); |
1407 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1408 |
frame->type = bit32_encoded_pull(tempdata); |
1409 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1410 |
|
1411 |
buffer = tempdata; // here coded frame information
|
1412 |
tempdata += frame->size; //let it point to the next frame
|
1413 |
|
1414 |
if(frame->type < 5) { // video frame |
1415 |
av_init_packet(&packet); |
1416 |
packet.data = buffer;//video_bufQ;
|
1417 |
packet.size = frame->size; |
1418 |
packet.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1419 |
packet.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1420 |
packet.stream_index = frame->number; // use of stream_index for number frame
|
1421 |
//packet.duration = frame->timestamp.tv_sec;
|
1422 |
if(packet.size > 0) { |
1423 |
int ret = ChunkerPlayerCore_PacketQueuePut(&videoq, &packet); //the _put makes a copy of the packet |
1424 |
if (ret == 1) { //TODO: check and correct return values |
1425 |
fprintf(stderr, "late chunk received, increasing delay to %lld\n", DeltaTime);
|
1426 |
DeltaTime += 5; //TODO: handle audio skip; verify this value |
1427 |
} |
1428 |
} |
1429 |
|
1430 |
#ifdef DEBUG_SOURCE
|
1431 |
printf("SOURCE: Insert video in queue pts=%lld %d %d sindex:%d\n",packet.pts,(int)frame->timestamp.tv_sec,(int)frame->timestamp.tv_usec,packet.stream_index); |
1432 |
#endif
|
1433 |
} |
1434 |
else if(frame->type == 5) { // audio frame |
1435 |
av_init_packet(&packetaudio); |
1436 |
packetaudio.data = buffer; |
1437 |
packetaudio.size = frame->size; |
1438 |
packetaudio.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1439 |
packetaudio.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1440 |
//packetaudio.duration = frame->timestamp.tv_sec;
|
1441 |
packetaudio.stream_index = frame->number; // use of stream_index for number frame
|
1442 |
packetaudio.flags = 1;
|
1443 |
packetaudio.pos = -1;
|
1444 |
|
1445 |
//instead of -1, in order to signal it is not decoded yet
|
1446 |
packetaudio.convergence_duration = 0;
|
1447 |
|
1448 |
// insert the audio frame into the queue
|
1449 |
if(packetaudio.size > 0) { |
1450 |
int ret = ChunkerPlayerCore_PacketQueuePut(&audioq, &packetaudio);//makes a copy of the packet so i can free here |
1451 |
if (ret == 1) { //TODO: check and correct return values |
1452 |
fprintf(stderr, "late chunk received, increasing delay to %lld\n", DeltaTime);
|
1453 |
DeltaTime += 5; //TODO: handle audio skip; verify this value |
1454 |
} |
1455 |
} |
1456 |
|
1457 |
#ifdef DEBUG_SOURCE
|
1458 |
printf("SOURCE: Insert audio in queue pts=%lld sindex:%d\n", packetaudio.pts, packetaudio.stream_index);
|
1459 |
#endif
|
1460 |
} |
1461 |
else {
|
1462 |
printf("SOURCE: Unknown frame type %d. Size %d\n", frame->type, frame->size);
|
1463 |
} |
1464 |
if(frame->size > 0) |
1465 |
j = j - sizeFrameHeader - frame->size; |
1466 |
else {
|
1467 |
printf("SOURCE: Corrupt frames (size %d) in chunk. Skipping it...\n", frame->size);
|
1468 |
j = -1;
|
1469 |
} |
1470 |
} |
1471 |
//chunk ingestion terminated!
|
1472 |
if(gchunk) {
|
1473 |
if(gchunk->attributes) {
|
1474 |
free(gchunk->attributes); |
1475 |
} |
1476 |
if(gchunk->data)
|
1477 |
free(gchunk->data); |
1478 |
free(gchunk); |
1479 |
} |
1480 |
if(frame)
|
1481 |
free(frame); |
1482 |
if(audio_bufQ)
|
1483 |
av_free(audio_bufQ); |
1484 |
|
1485 |
return PLAYER_OK_RETURN;
|
1486 |
} |
1487 |
|
1488 |
void ChunkerPlayerCore_SetupOverlay(int width, int height) |
1489 |
{ |
1490 |
// if(!MainScreen && !SilentMode)
|
1491 |
// {
|
1492 |
// printf("Cannot find main screen, exiting...\n");
|
1493 |
// exit(1);
|
1494 |
// }
|
1495 |
|
1496 |
if(SilentMode)
|
1497 |
return;
|
1498 |
|
1499 |
//TODO: return with error if size is too small
|
1500 |
width = MAX(width, 8);
|
1501 |
height = MAX(height, 8);
|
1502 |
|
1503 |
SDL_LockMutex(OverlayMutex); |
1504 |
if(YUVOverlay != NULL) |
1505 |
{ |
1506 |
SDL_FreeYUVOverlay(YUVOverlay); |
1507 |
YUVOverlay = NULL;
|
1508 |
} |
1509 |
|
1510 |
// create video overlay for display of video frames
|
1511 |
// printf("SDL_CreateYUVOverlay(%d, %d, SDL_YV12_OVERLAY, MainScreen)\n", width, height);
|
1512 |
YUVOverlay = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, MainScreen); |
1513 |
if ( YUVOverlay == NULL ) |
1514 |
{ |
1515 |
fprintf(stderr,"SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError());
|
1516 |
exit(1);
|
1517 |
} |
1518 |
|
1519 |
if ( YUVOverlay->hw_overlay )
|
1520 |
fprintf(stderr,"SDL: Using hardware overlay.\n");
|
1521 |
|
1522 |
SDL_DisplayYUVOverlay(YUVOverlay, ChunkerPlayerGUI_GetMainOverlayRect()); |
1523 |
|
1524 |
SDL_UnlockMutex(OverlayMutex); |
1525 |
} |
1526 |
|
1527 |
int CollectStatisticsThread(void *params) |
1528 |
{ |
1529 |
struct timeval last_stats_evaluation, now, last_trace, last_qoe_evaluation;
|
1530 |
gettimeofday(&last_stats_evaluation, NULL);
|
1531 |
last_trace = last_stats_evaluation; |
1532 |
last_qoe_evaluation = last_stats_evaluation; |
1533 |
|
1534 |
double video_qdensity;
|
1535 |
double audio_qdensity;
|
1536 |
char audio_stats_text[255]; |
1537 |
char video_stats_text[255]; |
1538 |
SStats audio_statistics, video_statistics; |
1539 |
double qoe = 0; |
1540 |
int sleep_time = STATS_THREAD_GRANULARITY*1000; |
1541 |
int audio_avg_bitrate = 0; |
1542 |
int video_avg_bitrate = 0; |
1543 |
|
1544 |
while(AVPlaying && !quit)
|
1545 |
{ |
1546 |
usleep(sleep_time); |
1547 |
|
1548 |
gettimeofday(&now, NULL);
|
1549 |
|
1550 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_stats_evaluation.tv_sec*1000)+(last_stats_evaluation.tv_usec/1000))) > GUI_PRINTSTATS_INTERVAL) |
1551 |
{ |
1552 |
// estimate audio queue stats
|
1553 |
int audio_stats_changed = ChunkerPlayerStats_GetStats(&(audioq.PacketHistory), &audio_statistics);
|
1554 |
|
1555 |
// estimate video queue stats
|
1556 |
int video_stats_changed = ChunkerPlayerStats_GetStats(&(videoq.PacketHistory), &video_statistics);
|
1557 |
|
1558 |
// compute avg bitrate up to now
|
1559 |
audioq.cumulative_bitrate += audio_statistics.Bitrate; |
1560 |
audioq.cumulative_samples++; |
1561 |
audio_avg_bitrate = (int)( ((double)audioq.cumulative_bitrate) / ((double)audioq.cumulative_samples) ); |
1562 |
videoq.cumulative_bitrate += video_statistics.Bitrate; |
1563 |
videoq.cumulative_samples++; |
1564 |
video_avg_bitrate = (int)( ((double)videoq.cumulative_bitrate) / ((double)videoq.cumulative_samples) ); |
1565 |
|
1566 |
#ifdef DEBUG_STATS
|
1567 |
printf("VIDEO: %d Kbit/sec; ", video_statistics.Bitrate);
|
1568 |
printf("AUDIO: %d Kbit/sec\n", audio_statistics.Bitrate);
|
1569 |
#endif
|
1570 |
|
1571 |
// QUEUE DENSITY EVALUATION
|
1572 |
if((audioq.last_pkt != NULL) && (audioq.first_pkt != NULL)) |
1573 |
if(audioq.last_pkt->pkt.stream_index >= audioq.first_pkt->pkt.stream_index)
|
1574 |
{ |
1575 |
//plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1576 |
audio_qdensity = (double)audioq.nb_packets / (double)(audioq.last_pkt->pkt.stream_index - audioq.first_pkt->pkt.stream_index + 1) * 100.0; |
1577 |
} |
1578 |
|
1579 |
if((videoq.last_pkt != NULL) && (videoq.first_pkt != NULL)) |
1580 |
if(videoq.last_pkt->pkt.stream_index >= videoq.first_pkt->pkt.stream_index)
|
1581 |
{ |
1582 |
// plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue
|
1583 |
video_qdensity = (double)videoq.nb_packets / (double)(videoq.last_pkt->pkt.stream_index - videoq.first_pkt->pkt.stream_index + 1) * 100.0; |
1584 |
} |
1585 |
|
1586 |
if(LogTraces)
|
1587 |
{ |
1588 |
ChunkerPlayerStats_PrintHistoryTrace(&(audioq.PacketHistory), AudioTraceFilename); |
1589 |
ChunkerPlayerStats_PrintHistoryTrace(&(videoq.PacketHistory), VideoTraceFilename); |
1590 |
|
1591 |
//if(SilentMode != 1 && SilentMode != 2)
|
1592 |
ChunkerPlayerStats_PrintContextFile(); |
1593 |
} |
1594 |
|
1595 |
// PRINT STATISTICS ON GUI
|
1596 |
if(!Audio_ON)
|
1597 |
sprintf(audio_stats_text, "AUDIO MUTED");
|
1598 |
else if(audio_stats_changed) |
1599 |
// sprintf(audio_stats_text, "[AUDIO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)", (int)audioq.nb_packets, (int)audio_qdensity, (int)audio_statistics.Lossrate, audioq.PacketHistory.LostCount, audio_statistics.Skiprate, audioq.PacketHistory.SkipCount);
|
1600 |
sprintf(audio_stats_text, "[AUDIO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - rate: %d kbits/sec (avg: %d)", (int)audioq.nb_packets, (int)audio_qdensity, (int)audio_statistics.Lossrate, audioq.PacketHistory.LostCount, audio_statistics.Bitrate, audio_avg_bitrate); |
1601 |
else
|
1602 |
sprintf(audio_stats_text, "waiting for incoming audio packets...");
|
1603 |
|
1604 |
if(video_stats_changed)
|
1605 |
{ |
1606 |
char est_psnr_string[255]; |
1607 |
sprintf(est_psnr_string, ".");
|
1608 |
if(qoe)
|
1609 |
{ |
1610 |
sprintf(est_psnr_string, " - Est. Mean PSNR: %.1f db", (float)qoe); |
1611 |
#ifdef PSNR_PUBLICATION
|
1612 |
// Publish measure into repository
|
1613 |
if(RepoAddress[0]!='\0') |
1614 |
{ |
1615 |
MeasurementRecord r; |
1616 |
r.originator = NetworkID; |
1617 |
r.targetA = NetworkID; |
1618 |
r.targetB = NULL;
|
1619 |
r.published_name = "PSNR_MEAN";
|
1620 |
r.value = qoe; |
1621 |
r.string_value = NULL;
|
1622 |
r.channel = Channels[SelectedChannel].Title; |
1623 |
gettimeofday(&(r.timestamp), NULL);
|
1624 |
// One update every REPO_UPDATE_INTERVALL seconds
|
1625 |
struct timeval ElapsedTime;
|
1626 |
timeval_subtract(&(r.timestamp),&LastTimeRepoPublish,&ElapsedTime); |
1627 |
if(ElapsedTime.tv_sec>=PSNR_REPO_UPDATE_INTERVALL)
|
1628 |
{ |
1629 |
LastTimeRepoPublish=r.timestamp; |
1630 |
if(repPublish(repoclient,NULL,NULL,&r)!=NULL) { |
1631 |
#ifdef DEBUG_PSNR
|
1632 |
printf("PSNR publish: %s %e %s\n",r.originator,qoe,r.channel);
|
1633 |
#endif
|
1634 |
} |
1635 |
} |
1636 |
} |
1637 |
#endif
|
1638 |
} |
1639 |
|
1640 |
// sprintf(video_stats_text, "[VIDEO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - skips: %d/sec (%ld tot)%s", (int)videoq.nb_packets, (int)video_qdensity, video_statistics.Lossrate, videoq.PacketHistory.LostCount, video_statistics.Skiprate, videoq.PacketHistory.SkipCount, est_psnr_string);
|
1641 |
sprintf(video_stats_text, "[VIDEO] qsize: %d qdensity: %d\%% - losses: %d/sec (%ld tot) - rate: %d kbits/sec (avg: %d) %s", (int)videoq.nb_packets, (int)video_qdensity, video_statistics.Lossrate, videoq.PacketHistory.LostCount, video_statistics.Bitrate, video_avg_bitrate, est_psnr_string); |
1642 |
} |
1643 |
else
|
1644 |
sprintf(video_stats_text, "waiting for incoming video packets...");
|
1645 |
|
1646 |
if(qoe && qoe_led) {
|
1647 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,(qoe>LED_THRS_YELLOW?LED_GREEN:((qoe<=LED_THRS_YELLOW && qoe>LED_THRS_RED)?LED_YELLOW:LED_RED))); |
1648 |
} else {
|
1649 |
ChunkerPlayerGUI_SetStatsText(audio_stats_text, video_stats_text,LED_NONE); |
1650 |
} |
1651 |
|
1652 |
last_stats_evaluation = now; |
1653 |
} |
1654 |
|
1655 |
if((((now.tv_sec*1000)+(now.tv_usec/1000)) - ((last_qoe_evaluation.tv_sec*1000)+(last_qoe_evaluation.tv_usec/1000))) > EVAL_QOE_INTERVAL) |
1656 |
{ |
1657 |
// ESTIMATE QoE
|
1658 |
//ChunkerPlayerStats_GetMeanVideoQuality(&(videoq.PacketHistory), &qoe);
|
1659 |
// ESTIMATE QoE using real-time computed cumulative average bitrate
|
1660 |
// (plus a diminshing contribution of the instantaneous bitrate, until the cumulative avg stabilizes)
|
1661 |
int input_bitrate = 0; |
1662 |
// stabilize after circa 30 seconds
|
1663 |
if(videoq.cumulative_samples < 30*(1000/GUI_PRINTSTATS_INTERVAL)) |
1664 |
input_bitrate = video_statistics.Bitrate; |
1665 |
else
|
1666 |
input_bitrate = video_avg_bitrate; |
1667 |
//double a = 1 / ((double)videoq.cumulative_samples);
|
1668 |
//double b = 1-a;
|
1669 |
//double input_bitrate = a*((double)video_statistics.Bitrate) + b*((double)video_avg_bitrate);
|
1670 |
ChunkerPlayerStats_GetMeanVideoQuality(&(videoq.PacketHistory), input_bitrate, &qoe); |
1671 |
#ifdef DEBUG_STATS
|
1672 |
printf("rate %d avg %d wghtd %d cum_samp %d PSNR %f\n", video_statistics.Bitrate, video_avg_bitrate, (int)input_bitrate, videoq.cumulative_samples, (float)qoe); |
1673 |
#endif
|
1674 |
last_qoe_evaluation = now; |
1675 |
} |
1676 |
} |
1677 |
return 0; |
1678 |
} |
1679 |
|
1680 |
void ChunkerPlayerCore_ChangeDelay(int ms) |
1681 |
{ |
1682 |
DeltaTime += ms; |
1683 |
} |