chunker-player / chunker_player / player_core.c @ a0e0d6ff
History | View | Annotate | Download (46 KB)
1 |
#include "player_defines.h" |
---|---|
2 |
#include "chunker_player.h" |
3 |
#include "player_gui.h" |
4 |
#include "player_core.h" |
5 |
#include <assert.h> |
6 |
|
7 |
void SaveFrame(AVFrame *pFrame, int width, int height); |
8 |
int VideoCallback(void *valthread); |
9 |
void AudioCallback(void *userdata, Uint8 *stream, int len); |
10 |
void UpdateQueueStats(PacketQueue *q, int packet_index); |
11 |
void UpdateLossTraces(int type, int first_lost, int n_lost); |
12 |
int UpdateQualityEvaluation(double instant_lost_frames, double instant_skips, int do_update); |
13 |
void PacketQueueCleanStats(PacketQueue *q);
|
14 |
|
15 |
void PacketQueueInit(PacketQueue *q, short int Type) |
16 |
{ |
17 |
#ifdef DEBUG_QUEUE
|
18 |
printf("QUEUE: INIT BEGIN: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
19 |
#endif
|
20 |
memset(q,0,sizeof(PacketQueue)); |
21 |
q->mutex = SDL_CreateMutex(); |
22 |
QueueFillingMode=1;
|
23 |
q->queueType=Type; |
24 |
q->last_frame_extracted = -1;
|
25 |
q->total_lost_frames = 0;
|
26 |
q->instant_lost_frames = 0;
|
27 |
q->total_skips = 0;
|
28 |
q->last_skips = 0;
|
29 |
q->instant_skips = 0;
|
30 |
q->first_pkt= NULL;
|
31 |
//q->last_pkt = NULL;
|
32 |
q->nb_packets = 0;
|
33 |
q->size = 0;
|
34 |
q->density= 0.0; |
35 |
FirstTime = 1;
|
36 |
FirstTimeAudio = 1;
|
37 |
//init up statistcs
|
38 |
PacketQueueCleanStats(q); |
39 |
#ifdef DEBUG_QUEUE
|
40 |
printf("QUEUE: INIT END: NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
41 |
#endif
|
42 |
} |
43 |
|
44 |
void PacketQueueReset(PacketQueue *q)
|
45 |
{ |
46 |
AVPacketList *tmp,*tmp1; |
47 |
#ifdef DEBUG_QUEUE
|
48 |
printf("QUEUE: RESET BEGIN: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
49 |
#endif
|
50 |
SDL_LockMutex(q->mutex); |
51 |
|
52 |
tmp = q->first_pkt; |
53 |
while(tmp) {
|
54 |
tmp1 = tmp; |
55 |
tmp = tmp->next; |
56 |
av_free_packet(&(tmp1->pkt)); |
57 |
av_free(tmp1); |
58 |
#ifdef DEBUG_QUEUE
|
59 |
printf("F ");
|
60 |
#endif
|
61 |
q->total_lost_frames++; |
62 |
} |
63 |
#ifdef DEBUG_QUEUE
|
64 |
printf("\n");
|
65 |
#endif
|
66 |
|
67 |
QueueFillingMode=1;
|
68 |
q->last_frame_extracted = -1;
|
69 |
// on queue reset do not reset loss count
|
70 |
// (loss count reset is done on queue init, ie channel switch)
|
71 |
// q->total_lost_frames = 0;
|
72 |
q->density=0.0; |
73 |
q->first_pkt= NULL;
|
74 |
//q->last_pkt = NULL;
|
75 |
q->nb_packets = 0;
|
76 |
q->size = 0;
|
77 |
FirstTime = 1;
|
78 |
FirstTimeAudio = 1;
|
79 |
//clean up statistcs
|
80 |
PacketQueueCleanStats(q); |
81 |
#ifdef DEBUG_QUEUE
|
82 |
printf("QUEUE: RESET END: NPackets=%d Type=%s LastExtr=%d\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
83 |
#endif
|
84 |
SDL_UnlockMutex(q->mutex); |
85 |
} |
86 |
|
87 |
void PacketQueueCleanStats(PacketQueue *q) {
|
88 |
int i=0; |
89 |
for(i=0; i<LOSS_HISTORY_MAX_SIZE; i++) { |
90 |
q->skip_history[i] = -1;
|
91 |
q->loss_history[i] = -1;
|
92 |
} |
93 |
q->loss_history_index = 0;
|
94 |
q->skip_history_index = 0;
|
95 |
q->instant_skips = 0.0; |
96 |
q->instant_lost_frames = 0.0; |
97 |
q->instant_window_size = 50; //averaging window size, self-correcting based on window_seconds |
98 |
q->instant_window_size_target = 0;
|
99 |
q->instant_window_seconds = 1; //we want to compute number of events in a 1sec wide window |
100 |
q->last_window_size_update = 0;
|
101 |
q->last_stats_display = 0;
|
102 |
sprintf(q->stats_message, "%s", "\n"); |
103 |
} |
104 |
|
105 |
int ChunkerPlayerCore_PacketQueuePut(PacketQueue *q, AVPacket *pkt)
|
106 |
{ |
107 |
short int skip = 0; |
108 |
AVPacketList *pkt1, *tmp, *prevtmp; |
109 |
|
110 |
if(q->nb_packets > queue_filling_threshold*QUEUE_MAX_GROW_FACTOR) {
|
111 |
#ifdef DEBUG_QUEUE
|
112 |
printf("QUEUE: PUT i have TOO MANY packets %d Type=%s, RESETTING\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
113 |
#endif
|
114 |
PacketQueueReset(q); |
115 |
} |
116 |
|
117 |
//make a copy of the incoming packet
|
118 |
if(av_dup_packet(pkt) < 0) { |
119 |
#ifdef DEBUG_QUEUE
|
120 |
printf("QUEUE: PUT in Queue cannot duplicate in packet : NPackets=%d Type=%s\n",q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
121 |
#endif
|
122 |
return -1; |
123 |
} |
124 |
pkt1 = av_malloc(sizeof(AVPacketList));
|
125 |
|
126 |
if(!pkt1) {
|
127 |
av_free_packet(pkt); |
128 |
return -1; |
129 |
} |
130 |
pkt1->pkt = *pkt; |
131 |
pkt1->next = NULL;
|
132 |
|
133 |
SDL_LockMutex(q->mutex); |
134 |
|
135 |
// INSERTION SORT ALGORITHM
|
136 |
// before inserting pkt, check if pkt.stream_index is <= current_extracted_frame.
|
137 |
if(pkt->stream_index > q->last_frame_extracted) {
|
138 |
// either checking starting from the first_pkt or needed other struct like AVPacketList with next and prev....
|
139 |
//if (!q->last_pkt)
|
140 |
if(!q->first_pkt) {
|
141 |
q->first_pkt = pkt1; |
142 |
q->last_pkt = pkt1; |
143 |
} |
144 |
else if(pkt->stream_index < q->first_pkt->pkt.stream_index) { |
145 |
//the packet that has arrived is earlier than the first we got some time ago!
|
146 |
//we need to put it at the head of the queue
|
147 |
pkt1->next = q->first_pkt; |
148 |
q->first_pkt = pkt1; |
149 |
} |
150 |
else {
|
151 |
tmp = q->first_pkt; |
152 |
while(tmp->pkt.stream_index < pkt->stream_index) {
|
153 |
prevtmp = tmp; |
154 |
tmp = tmp->next; |
155 |
|
156 |
if(!tmp) {
|
157 |
break;
|
158 |
} |
159 |
} |
160 |
if(tmp && tmp->pkt.stream_index == pkt->stream_index) {
|
161 |
//we already have a frame with that index
|
162 |
skip = 1;
|
163 |
#ifdef DEBUG_QUEUE
|
164 |
printf("QUEUE: PUT: we already have frame with index %d, skipping\n", pkt->stream_index);
|
165 |
#endif
|
166 |
} |
167 |
else {
|
168 |
prevtmp->next = pkt1; |
169 |
pkt1->next = tmp; |
170 |
if(pkt1->next == NULL) |
171 |
q->last_pkt = pkt1; |
172 |
} |
173 |
//q->last_pkt->next = pkt1; // It was uncommented when not insertion sort
|
174 |
} |
175 |
if(skip == 0) { |
176 |
//q->last_pkt = pkt1;
|
177 |
q->nb_packets++; |
178 |
q->size += pkt1->pkt.size; |
179 |
if(q->nb_packets>=queue_filling_threshold && QueueFillingMode) // && q->queueType==AUDIO) |
180 |
{ |
181 |
QueueFillingMode=0;
|
182 |
#ifdef DEBUG_QUEUE
|
183 |
printf("QUEUE: PUT: FillingMode set to zero\n");
|
184 |
#endif
|
185 |
} |
186 |
} |
187 |
} |
188 |
|
189 |
else {
|
190 |
av_free_packet(&pkt1->pkt); |
191 |
av_free(pkt1); |
192 |
#ifdef DEBUG_QUEUE
|
193 |
printf("QUEUE: PUT: NOT inserting because index %d > last extracted %d\n", pkt->stream_index, q->last_frame_extracted);
|
194 |
#endif
|
195 |
} |
196 |
|
197 |
// minus one means no lost frames estimation, useless during QueuePut operations
|
198 |
UpdateQueueStats(q, -1);
|
199 |
|
200 |
SDL_UnlockMutex(q->mutex); |
201 |
return 0; |
202 |
} |
203 |
|
204 |
int ChunkerPlayerCore_InitCodecs(int width, int height, int sample_rate, short int audio_channels) |
205 |
{ |
206 |
// some initializations
|
207 |
QueueStopped = 0;
|
208 |
AudioQueueOffset=0;
|
209 |
AVPlaying = 0;
|
210 |
GotSigInt = 0;
|
211 |
FirstTimeAudio=1;
|
212 |
FirstTime = 1;
|
213 |
deltaAudioQError=0;
|
214 |
InitRect = NULL;
|
215 |
img_convert_ctx = NULL;
|
216 |
|
217 |
SDL_AudioSpec wanted_spec; |
218 |
AVCodec *aCodec; |
219 |
|
220 |
memset(&VideoCallbackThreadParams, 0, sizeof(ThreadVal)); |
221 |
|
222 |
VideoCallbackThreadParams.width = width; |
223 |
VideoCallbackThreadParams.height = height; |
224 |
|
225 |
// Register all formats and codecs
|
226 |
av_register_all(); |
227 |
|
228 |
aCodecCtx = avcodec_alloc_context(); |
229 |
//aCodecCtx->bit_rate = 64000;
|
230 |
aCodecCtx->sample_rate = sample_rate; |
231 |
aCodecCtx->channels = audio_channels; |
232 |
#ifdef MP3_AUDIO_ENCODER
|
233 |
aCodec = avcodec_find_decoder(CODEC_ID_MP3); // codec audio
|
234 |
#else
|
235 |
aCodec = avcodec_find_decoder(CODEC_ID_MP2); |
236 |
#endif
|
237 |
printf("MP2 codec id %d MP3 codec id %d\n",CODEC_ID_MP2,CODEC_ID_MP3);
|
238 |
if(!aCodec) {
|
239 |
printf("Codec not found!\n");
|
240 |
return -1; |
241 |
} |
242 |
if(avcodec_open(aCodecCtx, aCodec)<0) { |
243 |
fprintf(stderr, "could not open codec\n");
|
244 |
return -1; // Could not open codec |
245 |
} |
246 |
printf("using audio Codecid: %d ",aCodecCtx->codec_id);
|
247 |
printf("samplerate: %d ",aCodecCtx->sample_rate);
|
248 |
printf("channels: %d\n",aCodecCtx->channels);
|
249 |
CurrentAudioFreq = wanted_spec.freq = aCodecCtx->sample_rate; |
250 |
wanted_spec.format = AUDIO_S16SYS; |
251 |
wanted_spec.channels = aCodecCtx->channels; |
252 |
wanted_spec.silence = 0;
|
253 |
CurrentAudioSamples = wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE; |
254 |
wanted_spec.callback = AudioCallback; |
255 |
wanted_spec.userdata = aCodecCtx; |
256 |
if(SDL_OpenAudio(&wanted_spec,&AudioSpecification)<0) |
257 |
{ |
258 |
fprintf(stderr,"SDL_OpenAudio: %s\n", SDL_GetError());
|
259 |
return -1; |
260 |
} |
261 |
dimAudioQ = AudioSpecification.size; |
262 |
deltaAudioQ = (float)((float)AudioSpecification.samples)*1000/AudioSpecification.freq; |
263 |
|
264 |
#ifdef DEBUG_AUDIO
|
265 |
printf("freq:%d\n",AudioSpecification.freq);
|
266 |
printf("format:%d\n",AudioSpecification.format);
|
267 |
printf("channels:%d\n",AudioSpecification.channels);
|
268 |
printf("silence:%d\n",AudioSpecification.silence);
|
269 |
printf("samples:%d\n",AudioSpecification.samples);
|
270 |
printf("size:%d\n",AudioSpecification.size);
|
271 |
printf("deltaAudioQ: %f\n",deltaAudioQ);
|
272 |
#endif
|
273 |
|
274 |
outbuf_audio = malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
275 |
|
276 |
//initialize the audio and the video queues
|
277 |
PacketQueueInit(&audioq, AUDIO); |
278 |
PacketQueueInit(&videoq, VIDEO); |
279 |
|
280 |
// Init audio and video buffers
|
281 |
av_init_packet(&AudioPkt); |
282 |
av_init_packet(&VideoPkt); |
283 |
//printf("AVCODEC_MAX_AUDIO_FRAME_SIZE=%d\n", AVCODEC_MAX_AUDIO_FRAME_SIZE);
|
284 |
AudioPkt.data=(uint8_t *)malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
285 |
if(!AudioPkt.data) return 1; |
286 |
VideoPkt.data=(uint8_t *)malloc(width*height*3/2); |
287 |
if(!VideoPkt.data) return 1; |
288 |
|
289 |
InitRect = (SDL_Rect*) malloc(sizeof(SDL_Rect));
|
290 |
if(!InitRect)
|
291 |
{ |
292 |
printf("Memory error!!!\n");
|
293 |
return -1; |
294 |
} |
295 |
InitRect->x = OverlayRect.x; |
296 |
InitRect->y = OverlayRect.y; |
297 |
InitRect->w = OverlayRect.w; |
298 |
InitRect->h = OverlayRect.h; |
299 |
|
300 |
char audio_stats[255], video_stats[255]; |
301 |
sprintf(audio_stats, "waiting for incoming audio packets...");
|
302 |
sprintf(video_stats, "waiting for incoming video packets...");
|
303 |
ChunkerPlayerGUI_SetStatsText(audio_stats, video_stats); |
304 |
|
305 |
return 0; |
306 |
} |
307 |
|
308 |
int DecodeEnqueuedAudio(AVPacket *pkt, PacketQueue *q)
|
309 |
{ |
310 |
uint16_t *audio_bufQ = NULL;
|
311 |
int16_t *dataQ = NULL;
|
312 |
int data_sizeQ = AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
313 |
int lenQ;
|
314 |
int ret = 0; |
315 |
|
316 |
//set the flag to decoded anyway
|
317 |
pkt->convergence_duration = -1;
|
318 |
|
319 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
320 |
if(audio_bufQ) {
|
321 |
#ifdef DEBUG_AUDIO_BUFFER
|
322 |
printf("AUDIO_BUFFER: about to decode packet %d, size %d, data %d\n", pkt->stream_index, pkt->size, pkt->data);
|
323 |
#endif
|
324 |
//decode the packet data
|
325 |
lenQ = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_bufQ, &data_sizeQ, pkt); |
326 |
if(lenQ > 0) { |
327 |
dataQ = (int16_t *)av_malloc(data_sizeQ); //this will be free later at the time of playback
|
328 |
if(dataQ) {
|
329 |
memcpy(dataQ, audio_bufQ, data_sizeQ); |
330 |
if(pkt->data != NULL) |
331 |
{ |
332 |
//discard the old encoded bytes
|
333 |
av_free(pkt->data); |
334 |
} |
335 |
//subtract them from queue size
|
336 |
q->size -= pkt->size; |
337 |
pkt->data = (int8_t *)dataQ; |
338 |
pkt->size = data_sizeQ; |
339 |
//add new size to queue size
|
340 |
q->size += pkt->size; |
341 |
ret = 1;
|
342 |
} |
343 |
else {
|
344 |
#ifdef DEBUG_AUDIO_BUFFER
|
345 |
printf("AUDIO_BUFFER: cannot alloc space for decoded packet %d\n", pkt->stream_index);
|
346 |
#endif
|
347 |
} |
348 |
} |
349 |
else {
|
350 |
#ifdef DEBUG_AUDIO_BUFFER
|
351 |
printf("AUDIO_BUFFER: cannot decode packet %d\n", pkt->stream_index);
|
352 |
#endif
|
353 |
} |
354 |
av_free(audio_bufQ); |
355 |
} |
356 |
else {
|
357 |
#ifdef DEBUG_AUDIO_BUFFER
|
358 |
printf("AUDIO_BUFFER: cannot alloc decode buffer for packet %d\n", pkt->stream_index);
|
359 |
#endif
|
360 |
} |
361 |
return ret; //problems occurred |
362 |
} |
363 |
|
364 |
/**
|
365 |
* removes a packet from the list and returns the next
|
366 |
* */
|
367 |
AVPacketList *RemoveFromQueue(PacketQueue *q, AVPacketList *p) |
368 |
{ |
369 |
AVPacketList *retpk = p->next; |
370 |
q->nb_packets--; |
371 |
//adjust size here and not in the various cases of the dequeue
|
372 |
q->size -= p->pkt.size; |
373 |
if(&p->pkt)
|
374 |
av_free_packet(&p->pkt); |
375 |
if(p)
|
376 |
av_free(p); |
377 |
return retpk;
|
378 |
} |
379 |
|
380 |
AVPacketList *SeekAndDecodePacketStartingFrom(AVPacketList *p, PacketQueue *q) |
381 |
{ |
382 |
while(p) {
|
383 |
//check if audio packet has been already decoded
|
384 |
if(p->pkt.convergence_duration == 0) { |
385 |
//not decoded yet, try to decode it
|
386 |
if( !DecodeEnqueuedAudio(&(p->pkt), q) ) {
|
387 |
//it was not possible to decode this packet, return next one
|
388 |
p = RemoveFromQueue(q, p); |
389 |
} |
390 |
else
|
391 |
return p;
|
392 |
} |
393 |
else
|
394 |
return p;
|
395 |
} |
396 |
return NULL; |
397 |
} |
398 |
|
399 |
void UpdateQueueStats(PacketQueue *q, int packet_index) |
400 |
{ |
401 |
//used as flag also (0 means dont update quality estimation average)
|
402 |
int update_quality_avg = 0; |
403 |
int i;
|
404 |
|
405 |
if(q == NULL) |
406 |
return;
|
407 |
if(q->first_pkt == NULL) |
408 |
return;
|
409 |
if(q->last_pkt == NULL) |
410 |
return;
|
411 |
|
412 |
int now = time(NULL); |
413 |
if(!q->last_stats_display)
|
414 |
q->last_stats_display = now; |
415 |
if(!q->last_window_size_update)
|
416 |
q->last_window_size_update = now; |
417 |
|
418 |
//calculate the queue density both in case of QueuePut and QueueGet
|
419 |
if(q->last_pkt->pkt.stream_index >= q->first_pkt->pkt.stream_index)
|
420 |
{ |
421 |
q->density = (double)q->nb_packets / (double)(q->last_pkt->pkt.stream_index - q->first_pkt->pkt.stream_index + 1) * 100.0; //plus 1 because if they are adjacent (difference 1) there really should be 2 packets in the queue |
422 |
} |
423 |
#ifdef DEBUG_STATS
|
424 |
if(q->queueType == AUDIO)
|
425 |
printf("STATS: AUDIO QUEUE DENSITY percentage %f, last %d, first %d, pkts %d\n", q->density, q->last_pkt->pkt.stream_index, q->first_pkt->pkt.stream_index, q->nb_packets);
|
426 |
if(q->queueType == VIDEO)
|
427 |
printf("STATS: VIDEO QUEUE DENSITY percentage %f, last %d, first %d, pkts %d\n", q->density, q->last_pkt->pkt.stream_index, q->first_pkt->pkt.stream_index, q->nb_packets);
|
428 |
#endif
|
429 |
|
430 |
//adjust the sliding window_size according to the elapsed time
|
431 |
update_quality_avg = 0;
|
432 |
//dynamically self-adjust the instant_window_size based on the time passing
|
433 |
//to match it at our best, since we have no separate thread to count time
|
434 |
if((now - q->last_window_size_update) >= q->instant_window_seconds) {
|
435 |
int i = 0; |
436 |
//if window is enlarged, erase old samples
|
437 |
for(i=q->instant_window_size; i<q->instant_window_size_target; i++) {
|
438 |
q->skip_history[i] = -1;
|
439 |
q->loss_history[i] = -1;
|
440 |
} |
441 |
#ifdef DEBUG_STATS
|
442 |
printf("STATS: %s WINDOW_SIZE instant set from %d to %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->instant_window_size, q->instant_window_size_target); |
443 |
#endif
|
444 |
q->instant_window_size = q->instant_window_size_target; |
445 |
if(q->instant_window_size == 0) { //in case of anomaly reset to default |
446 |
q->instant_window_size = 50;
|
447 |
printf("STATS: %s WINDOW_SIZE instant anomaly reset to default %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->instant_window_size); |
448 |
} |
449 |
if(q->instant_window_size > LOSS_HISTORY_MAX_SIZE) {
|
450 |
printf("ERROR: %s instant_window size updated to %d, which is more than the max of %d. Exiting.\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->instant_window_size, LOSS_HISTORY_MAX_SIZE); |
451 |
exit(1);
|
452 |
} |
453 |
q->instant_window_size_target = 0;
|
454 |
#ifdef DEBUG_STATS
|
455 |
printf("STATS: %s WINDOW_SIZE instant moving time from %d to %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_window_size_update, now); |
456 |
#endif
|
457 |
//update time passing
|
458 |
q->last_window_size_update = now; |
459 |
//signal to give the update values to the UpdateQuality evaluation
|
460 |
//since we dont update long-term averaging every time, just do it at every window_seconds
|
461 |
update_quality_avg = 1;
|
462 |
} |
463 |
|
464 |
//calculate lost frames and skipped frames during playing only if we are called from a QueueGet
|
465 |
//averaging them in a short-sized time window
|
466 |
if(packet_index != -1) |
467 |
{ |
468 |
int real_window_size = 0; |
469 |
int lost_frames = 0; |
470 |
double percentage = 0.0; |
471 |
|
472 |
//self adjust window size
|
473 |
q->instant_window_size_target++; |
474 |
|
475 |
//compute lost frame statistics
|
476 |
if(q->last_frame_extracted > 0 && packet_index > q->last_frame_extracted) |
477 |
{ |
478 |
lost_frames = packet_index - q->last_frame_extracted - 1;
|
479 |
q->total_lost_frames += lost_frames; |
480 |
percentage = (double)q->total_lost_frames / (double)q->last_frame_extracted * 100.0; |
481 |
|
482 |
//save a trace of lost frames to file
|
483 |
//we have lost "lost_frames" frames starting from the last extracted (excluded of course)
|
484 |
UpdateLossTraces(q->queueType, q->last_frame_extracted+1, lost_frames);
|
485 |
|
486 |
//compute the frame loss rate inside the short-sized sliding window
|
487 |
q->loss_history[q->loss_history_index] = lost_frames; |
488 |
q->loss_history_index = (q->loss_history_index+1)%q->instant_window_size;
|
489 |
q->instant_lost_frames = 0.0; |
490 |
real_window_size = 0;
|
491 |
#ifdef DEBUG_STATS_DEEP
|
492 |
printf("STATS: QUALITY: %s UPDATE LOSS:", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
493 |
#endif
|
494 |
for(i=0; i<q->instant_window_size; i++) { |
495 |
//-1 means not initialized value or erased value due to window shrinking
|
496 |
if(q->loss_history[i] != -1) { |
497 |
real_window_size++; |
498 |
q->instant_lost_frames += (double)q->loss_history[i];
|
499 |
#ifdef DEBUG_STATS_DEEP
|
500 |
printf(" %d", q->loss_history[i]);
|
501 |
#endif
|
502 |
} |
503 |
#ifdef DEBUG_STATS_DEEP
|
504 |
else
|
505 |
printf(" *");
|
506 |
#endif
|
507 |
} |
508 |
#ifdef DEBUG_STATS_DEEP
|
509 |
printf("\n");
|
510 |
#endif
|
511 |
q->instant_lost_frames /= (double)q->instant_window_seconds; //express them in events/sec |
512 |
#ifdef DEBUG_STATS
|
513 |
if(q->queueType == AUDIO)
|
514 |
printf("STATS: AUDIO FRAMES LOST: instant %f, total %d, total percentage %f\n", q->instant_lost_frames, q->total_lost_frames, percentage);
|
515 |
else if(q->queueType == VIDEO) |
516 |
printf("STATS: VIDEO FRAMES LOST: instant %f, total %d, total percentage %f\n", q->instant_lost_frames, q->total_lost_frames, percentage);
|
517 |
|
518 |
printf("STATS: QUALITY: %s UPDATE LOSS instant window %d, real window %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->instant_window_size, real_window_size); |
519 |
#endif
|
520 |
} |
521 |
|
522 |
//compute the skip events inside the short-sized sliding window
|
523 |
int skips = q->total_skips - q->last_skips;
|
524 |
q->last_skips = q->total_skips; |
525 |
q->skip_history[q->skip_history_index] = skips; |
526 |
q->skip_history_index = (q->skip_history_index+1)%q->instant_window_size;
|
527 |
q->instant_skips = 0.0; |
528 |
real_window_size = 0;
|
529 |
#ifdef DEBUG_STATS_DEEP
|
530 |
printf("STATS: QUALITY: %s UPDATE SKIP:", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
531 |
#endif
|
532 |
for(i=0; i<q->instant_window_size; i++) { |
533 |
//-1 means not initialized value or erased value due to window shrinking
|
534 |
if(q->skip_history[i] != -1) { |
535 |
real_window_size++; |
536 |
q->instant_skips += (double)q->skip_history[i];
|
537 |
#ifdef DEBUG_STATS_DEEP
|
538 |
printf(" %d", q->skip_history[i]);
|
539 |
#endif
|
540 |
} |
541 |
#ifdef DEBUG_STATS_DEEP
|
542 |
else
|
543 |
printf(" *");
|
544 |
#endif
|
545 |
} |
546 |
#ifdef DEBUG_STATS_DEEP
|
547 |
printf("\n");
|
548 |
#endif
|
549 |
q->instant_skips /= (double)q->instant_window_seconds; //express them in events/sec |
550 |
#ifdef DEBUG_STATS
|
551 |
if(q->queueType == AUDIO)
|
552 |
printf("STATS: AUDIO SKIPS: instant %f, total %d\n", q->instant_skips, q->total_skips);
|
553 |
else if(q->queueType == VIDEO) |
554 |
printf("STATS: VIDEO SKIPS: instant %f, total %d\n", q->instant_skips, q->total_skips);
|
555 |
|
556 |
printf("STATS: QUALITY: %s UPDATE SKIP instant window %d, real window %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->instant_window_size, real_window_size); |
557 |
#endif
|
558 |
} |
559 |
|
560 |
//continuous estimate of the channel quality
|
561 |
//but print it only if it has changed since last time
|
562 |
if(UpdateQualityEvaluation(q->instant_lost_frames, q->instant_skips, update_quality_avg)) {
|
563 |
//display channel quality
|
564 |
char stats[255]; |
565 |
sprintf(stats, "%s - %s", Channels[SelectedChannel].Title, Channels[SelectedChannel].quality);
|
566 |
ChunkerPlayerGUI_SetChannelTitle(stats); |
567 |
} |
568 |
|
569 |
//if statistics estimate printout has changed since last time, display it
|
570 |
{ |
571 |
char stats[255]; |
572 |
if(q->queueType == AUDIO)
|
573 |
{ |
574 |
sprintf(stats, "[AUDIO] %d\%% qdensity - %d lost_frames/sec - %d lost_frames - %d skips/sec - %d skips", (int)q->density, (int)q->instant_lost_frames, q->total_lost_frames, (int)q->instant_skips, q->total_skips); |
575 |
} |
576 |
else if(q->queueType == VIDEO) |
577 |
{ |
578 |
sprintf(stats, "[VIDEO] %d\%% qdensity - %d lost_frames/sec - %d lost_frames - %d skips/sec - %d skips", (int)q->density, (int)q->instant_lost_frames, q->total_lost_frames, (int)q->instant_skips, q->total_skips); |
579 |
} |
580 |
if((strcmp(q->stats_message, stats) ) && ((now-q->last_stats_display) >= 1)) |
581 |
{ |
582 |
//statistics estimate have changed
|
583 |
sprintf(q->stats_message, "%s", stats);
|
584 |
if(q->queueType == AUDIO)
|
585 |
ChunkerPlayerGUI_SetStatsText(stats, NULL);
|
586 |
else if(q->queueType == VIDEO) |
587 |
ChunkerPlayerGUI_SetStatsText(NULL, stats);
|
588 |
|
589 |
q->last_stats_display = now; |
590 |
} |
591 |
} |
592 |
} |
593 |
|
594 |
//returns 1 if quality value has changed since last time
|
595 |
int UpdateQualityEvaluation(double instant_lost_frames, double instant_skips, int do_update) |
596 |
{ |
597 |
//as of now, we enter new samples in the long-term averaging once every sec,
|
598 |
//thus 120 samples worth of window size equals 2 minutes averaging
|
599 |
static int avg_window_size = 10; //averaging window size, self-correcting based on window_seconds |
600 |
static int avg_window_size_target = 0; |
601 |
static int avg_window_seconds = 10; //we want to compute number of events in a 2min wide window |
602 |
|
603 |
static int last_window_size_update = 0; |
604 |
|
605 |
char base_quality[255]; |
606 |
char quality[255]; |
607 |
int i;
|
608 |
int now = time(NULL); |
609 |
if(!last_window_size_update)
|
610 |
last_window_size_update = now; |
611 |
int runningTime = now - Channels[SelectedChannel].startTime;
|
612 |
|
613 |
if(runningTime <= 0) { |
614 |
runningTime = 1;
|
615 |
#ifdef DEBUG_STATS
|
616 |
printf("STATS: QUALITY warning channel runningTime %d. Set to one!\n", runningTime);
|
617 |
#endif
|
618 |
} |
619 |
|
620 |
//update continuously the quality score
|
621 |
Channels[SelectedChannel].instant_score = instant_skips + instant_lost_frames; |
622 |
|
623 |
if(do_update) {
|
624 |
#ifdef DEBUG_STATS
|
625 |
printf("STATS: QUALITY: UPDATE SCORE lost frames %f, skips %f\n", instant_lost_frames, instant_skips);
|
626 |
#endif
|
627 |
//every once in a while also enter samples in the long-term averaging window
|
628 |
Channels[SelectedChannel].score_history[Channels[SelectedChannel].history_index] = Channels[SelectedChannel].instant_score; |
629 |
Channels[SelectedChannel].history_index = (Channels[SelectedChannel].history_index+1)%avg_window_size;
|
630 |
|
631 |
Channels[SelectedChannel].average_score = 0.0; |
632 |
int real_window_size = 0; |
633 |
for(i=0; i<avg_window_size; i++) { |
634 |
//-1 means not initialized value or erased value due to window shrinking
|
635 |
if(Channels[SelectedChannel].score_history[i] != -1) { |
636 |
real_window_size++; |
637 |
Channels[SelectedChannel].average_score += Channels[SelectedChannel].score_history[i]; |
638 |
} |
639 |
} |
640 |
Channels[SelectedChannel].average_score /= (double)real_window_size; //average in the window |
641 |
Channels[SelectedChannel].average_score /= (double)avg_window_seconds; //express it in events/sec |
642 |
#ifdef DEBUG_STATS
|
643 |
printf("STATS: QUALITY: UPDATE SCORE avg window %d, real window %d\n", avg_window_size, real_window_size);
|
644 |
#endif
|
645 |
//whenever we enter a sample, enlarge the self-adjusting window target (it will be checked later on)
|
646 |
avg_window_size_target++; |
647 |
} |
648 |
#ifdef DEBUG_STATS
|
649 |
printf("STATS: QUALITY: instant skips %f, instant loss %f\n", instant_skips, instant_lost_frames);
|
650 |
printf("STATS: QUALITY: instant score %f, avg score %f\n", Channels[SelectedChannel].instant_score, Channels[SelectedChannel].average_score);
|
651 |
#endif
|
652 |
|
653 |
if(Channels[SelectedChannel].average_score > 0.02) { |
654 |
sprintf(base_quality, "POOR");
|
655 |
if(Channels[SelectedChannel].instant_score < Channels[SelectedChannel].average_score) {
|
656 |
sprintf(quality, "%s, GETTING BETTER", base_quality);
|
657 |
} |
658 |
else if(Channels[SelectedChannel].instant_score == Channels[SelectedChannel].average_score) { |
659 |
sprintf(quality, "%s, STABLE", base_quality);
|
660 |
} |
661 |
else {
|
662 |
sprintf(quality, "%s, GETTING WORSE", base_quality);
|
663 |
} |
664 |
} |
665 |
else {
|
666 |
sprintf(base_quality, "GOOD");
|
667 |
if(Channels[SelectedChannel].instant_score < Channels[SelectedChannel].average_score) {
|
668 |
sprintf(quality, "%s, GETTING BETTER", base_quality);
|
669 |
} |
670 |
else if(Channels[SelectedChannel].instant_score == Channels[SelectedChannel].average_score) { |
671 |
sprintf(quality, "%s, STABLE", base_quality);
|
672 |
} |
673 |
else {
|
674 |
sprintf(quality, "%s, GETTING WORSE", base_quality);
|
675 |
} |
676 |
} |
677 |
|
678 |
#ifdef DEBUG_STATS
|
679 |
printf("STATS: QUALITY %s\n", quality);
|
680 |
#endif
|
681 |
|
682 |
//dynamically self-adjust the instant_window_size based on the time passing
|
683 |
//to match it at our best, since we have no separate thread to count time
|
684 |
if((now - last_window_size_update) >= avg_window_seconds) {
|
685 |
int i = 0; |
686 |
//if window is enlarged, erase old samples
|
687 |
for(i=avg_window_size; i<avg_window_size_target; i++) {
|
688 |
Channels[SelectedChannel].score_history[i] = -1;
|
689 |
} |
690 |
avg_window_size = avg_window_size_target; |
691 |
#ifdef DEBUG_STATS
|
692 |
printf("STATS: WINDOW_SIZE avg set to %d\n", avg_window_size);
|
693 |
#endif
|
694 |
if(avg_window_size > CHANNEL_SCORE_HISTORY_SIZE) {
|
695 |
printf("ERROR: avg_window size updated to %d, which is more than the max of %d. Exiting.\n", avg_window_size, CHANNEL_SCORE_HISTORY_SIZE);
|
696 |
exit(1);
|
697 |
} |
698 |
avg_window_size_target = 0;
|
699 |
//update time passing
|
700 |
last_window_size_update = now; |
701 |
} |
702 |
|
703 |
if( strcmp(Channels[SelectedChannel].quality, quality) ) {
|
704 |
//quality estimate has changed
|
705 |
sprintf(Channels[SelectedChannel].quality, "%s", quality);
|
706 |
return 1; |
707 |
} |
708 |
else {
|
709 |
return 0; |
710 |
} |
711 |
} |
712 |
|
713 |
void UpdateLossTraces(int type, int first_lost, int n_lost) |
714 |
{ |
715 |
FILE *lossFile; |
716 |
int i;
|
717 |
|
718 |
// Open loss traces file
|
719 |
char filename[255]; |
720 |
if(type == AUDIO)
|
721 |
sprintf(filename, "audio_%s", LossTracesFilename);
|
722 |
else
|
723 |
sprintf(filename, "video_%s", LossTracesFilename);
|
724 |
|
725 |
lossFile=fopen(filename, "a");
|
726 |
if(lossFile==NULL) { |
727 |
printf("STATS: UNABLE TO OPEN Loss FILE: %s\n", filename);
|
728 |
return;
|
729 |
} |
730 |
|
731 |
for(i=0; i<n_lost; i++) { |
732 |
fprintf(lossFile, "%d\n", first_lost+i);
|
733 |
} |
734 |
|
735 |
fclose(lossFile); |
736 |
} |
737 |
|
738 |
int PacketQueueGet(PacketQueue *q, AVPacket *pkt, short int av) { |
739 |
//AVPacket tmp;
|
740 |
AVPacketList *pkt1 = NULL;
|
741 |
int ret=-1; |
742 |
int SizeToCopy=0; |
743 |
|
744 |
SDL_LockMutex(q->mutex); |
745 |
|
746 |
#ifdef DEBUG_QUEUE
|
747 |
printf("QUEUE: Get NPackets=%d Type=%s\n", q->nb_packets, (q->queueType==AUDIO) ? "AUDIO" : "VIDEO"); |
748 |
#endif
|
749 |
|
750 |
if((q->queueType==AUDIO && QueueFillingMode) || QueueStopped)
|
751 |
{ |
752 |
SDL_UnlockMutex(q->mutex); |
753 |
return -1; |
754 |
} |
755 |
|
756 |
if(av==1) { //somebody requested an audio packet, q is the audio queue |
757 |
//try to dequeue the first packet of the audio queue
|
758 |
pkt1 = SeekAndDecodePacketStartingFrom(q->first_pkt, q); |
759 |
if(pkt1) { //yes we have them! |
760 |
if(pkt1->pkt.size-AudioQueueOffset > dimAudioQ) {
|
761 |
//one packet if enough to give us the requested number of bytes by the audio_callback
|
762 |
#ifdef DEBUG_QUEUE_DEEP
|
763 |
printf(" AV=1 and Extract from the same packet\n");
|
764 |
#endif
|
765 |
pkt->size = dimAudioQ; |
766 |
memcpy(pkt->data,pkt1->pkt.data+AudioQueueOffset,dimAudioQ); |
767 |
pkt->dts = pkt1->pkt.dts; |
768 |
pkt->pts = pkt1->pkt.pts; |
769 |
pkt->stream_index = pkt1->pkt.stream_index;//1;
|
770 |
pkt->flags = 1;
|
771 |
pkt->pos = -1;
|
772 |
pkt->convergence_duration = -1;
|
773 |
#ifdef DEBUG_QUEUE_DEEP
|
774 |
printf(" Adjust timestamps Old = %lld New = %lld\n", pkt1->pkt.dts, (int64_t)(pkt1->pkt.dts + deltaAudioQ + deltaAudioQError));
|
775 |
#endif
|
776 |
int64_t Olddts=pkt1->pkt.dts; |
777 |
pkt1->pkt.dts += deltaAudioQ + deltaAudioQError; |
778 |
pkt1->pkt.pts += deltaAudioQ + deltaAudioQError; |
779 |
deltaAudioQError=(float)Olddts + deltaAudioQ + deltaAudioQError - (float)pkt1->pkt.dts; |
780 |
AudioQueueOffset += dimAudioQ; |
781 |
#ifdef DEBUG_QUEUE_DEEP
|
782 |
printf(" deltaAudioQError = %f\n",deltaAudioQError);
|
783 |
#endif
|
784 |
//update overall state of queue
|
785 |
//size is diminished because we played some audio samples
|
786 |
//but packet is not removed since a portion has still to be played
|
787 |
//HINT ERRATA we had a size mismatch since size grows with the
|
788 |
//number of compressed bytes, and diminishes here with the number
|
789 |
//of raw uncompressed bytes, hence we update size during the
|
790 |
//real removes and not here anymore
|
791 |
//q->size -= dimAudioQ;
|
792 |
UpdateQueueStats(q, pkt->stream_index); |
793 |
//update index of last frame extracted
|
794 |
q->last_frame_extracted = pkt->stream_index; |
795 |
#ifdef DEBUG_AUDIO_BUFFER
|
796 |
printf("1: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
797 |
#endif
|
798 |
ret = 1; //OK |
799 |
} |
800 |
else {
|
801 |
//we need bytes from two consecutive packets to satisfy the audio_callback
|
802 |
#ifdef DEBUG_QUEUE_DEEP
|
803 |
printf(" AV = 1 and Extract from 2 packets\n");
|
804 |
#endif
|
805 |
//check for a valid next packet since we will finish the current packet
|
806 |
//and also take some bytes from the next one
|
807 |
pkt1->next = SeekAndDecodePacketStartingFrom(pkt1->next, q); |
808 |
if(pkt1->next) {
|
809 |
#ifdef DEBUG_QUEUE_DEEP
|
810 |
printf(" we have a next...\n");
|
811 |
#endif
|
812 |
pkt->size = dimAudioQ; |
813 |
pkt->dts = pkt1->pkt.dts; |
814 |
pkt->pts = pkt1->pkt.pts; |
815 |
pkt->stream_index = pkt1->pkt.stream_index;//1;
|
816 |
pkt->flags = 1;
|
817 |
pkt->pos = -1;
|
818 |
pkt->convergence_duration = -1;
|
819 |
{ |
820 |
SizeToCopy=pkt1->pkt.size-AudioQueueOffset; |
821 |
#ifdef DEBUG_QUEUE_DEEP
|
822 |
printf(" SizeToCopy=%d\n",SizeToCopy);
|
823 |
#endif
|
824 |
memcpy(pkt->data, pkt1->pkt.data+AudioQueueOffset, SizeToCopy); |
825 |
memcpy(pkt->data+SizeToCopy, pkt1->next->pkt.data, (dimAudioQ-SizeToCopy)*sizeof(uint8_t));
|
826 |
} |
827 |
#ifdef DEBUG_AUDIO_BUFFER
|
828 |
printf("2: idx %d \taqo %d \tstc %d \taqe %f \tpsz %d\n", pkt1->pkt.stream_index, AudioQueueOffset, SizeToCopy, deltaAudioQError, pkt1->pkt.size);
|
829 |
#endif
|
830 |
} |
831 |
#ifdef DEBUG_AUDIO_BUFFER
|
832 |
else {
|
833 |
printf("2: NONEXT\n");
|
834 |
} |
835 |
#endif
|
836 |
//HINT SEE before q->size -= SizeToCopy;
|
837 |
q->first_pkt = RemoveFromQueue(q, pkt1); |
838 |
|
839 |
// Adjust timestamps
|
840 |
pkt1 = q->first_pkt; |
841 |
if(pkt1) {
|
842 |
int Offset=(dimAudioQ-SizeToCopy)*1000/(AudioSpecification.freq*2*AudioSpecification.channels); |
843 |
int64_t LastDts=pkt1->pkt.dts; |
844 |
pkt1->pkt.dts += Offset + deltaAudioQError; |
845 |
pkt1->pkt.pts += Offset + deltaAudioQError; |
846 |
deltaAudioQError = (float)LastDts + (float)Offset + deltaAudioQError - (float)pkt1->pkt.dts; |
847 |
#ifdef DEBUG_QUEUE_DEEP
|
848 |
printf(" Adjust timestamps Old = %lld New = %lld\n", LastDts, pkt1->pkt.dts);
|
849 |
#endif
|
850 |
AudioQueueOffset = dimAudioQ - SizeToCopy; |
851 |
//SEE BEFORE HINT q->size -= AudioQueueOffset;
|
852 |
ret = 1;
|
853 |
UpdateQueueStats(q, pkt->stream_index); |
854 |
} |
855 |
else {
|
856 |
AudioQueueOffset=0;
|
857 |
} |
858 |
#ifdef DEBUG_QUEUE_DEEP
|
859 |
printf(" deltaAudioQError = %f\n",deltaAudioQError);
|
860 |
#endif
|
861 |
//update index of last frame extracted
|
862 |
q->last_frame_extracted = pkt->stream_index; |
863 |
} |
864 |
} |
865 |
} |
866 |
else { //somebody requested a video packet, q is the video queue |
867 |
pkt1 = q->first_pkt; |
868 |
if(pkt1) {
|
869 |
#ifdef DEBUG_QUEUE_DEEP
|
870 |
printf(" AV not 1\n");
|
871 |
#endif
|
872 |
pkt->size = pkt1->pkt.size; |
873 |
pkt->dts = pkt1->pkt.dts; |
874 |
pkt->pts = pkt1->pkt.pts; |
875 |
pkt->stream_index = pkt1->pkt.stream_index; |
876 |
pkt->flags = pkt1->pkt.flags; |
877 |
pkt->pos = pkt1->pkt.pos; |
878 |
pkt->convergence_duration = pkt1->pkt.convergence_duration; |
879 |
//*pkt = pkt1->pkt;
|
880 |
|
881 |
if((pkt->data != NULL) && (pkt1->pkt.data != NULL)) |
882 |
memcpy(pkt->data, pkt1->pkt.data, pkt1->pkt.size); |
883 |
|
884 |
//HINT SEE BEFORE q->size -= pkt1->pkt.size;
|
885 |
q->first_pkt = RemoveFromQueue(q, pkt1); |
886 |
|
887 |
ret = 1;
|
888 |
UpdateQueueStats(q, pkt->stream_index); |
889 |
//update index of last frame extracted
|
890 |
q->last_frame_extracted = pkt->stream_index; |
891 |
} |
892 |
#ifdef DEBUG_QUEUE
|
893 |
else {
|
894 |
printf(" VIDEO pk1 NULL!!!!\n");
|
895 |
} |
896 |
#endif
|
897 |
} |
898 |
|
899 |
if(q->nb_packets==0 && q->queueType==AUDIO) { |
900 |
QueueFillingMode=1;
|
901 |
#ifdef DEBUG_QUEUE
|
902 |
printf("QUEUE: Get FillingMode ON\n");
|
903 |
#endif
|
904 |
} |
905 |
#ifdef DEBUG_QUEUE
|
906 |
printf("QUEUE: Get Last %s Frame Extracted = %d\n", (q->queueType==AUDIO) ? "AUDIO" : "VIDEO", q->last_frame_extracted); |
907 |
#endif
|
908 |
|
909 |
SDL_UnlockMutex(q->mutex); |
910 |
return ret;
|
911 |
} |
912 |
|
913 |
int AudioDecodeFrame(uint8_t *audio_buf, int buf_size) { |
914 |
//struct timeval now;
|
915 |
int audio_pkt_size = 0; |
916 |
long long Now; |
917 |
short int DecodeAudio=0, SkipAudio=0; |
918 |
//int len1, data_size;
|
919 |
|
920 |
//gettimeofday(&now,NULL);
|
921 |
//Now = (now.tv_sec)*1000+now.tv_usec/1000;
|
922 |
Now=(long long)SDL_GetTicks(); |
923 |
|
924 |
if(QueueFillingMode || QueueStopped)
|
925 |
{ |
926 |
//SDL_LockMutex(timing_mutex);
|
927 |
FirstTimeAudio=1;
|
928 |
FirstTime = 1;
|
929 |
//SDL_UnlockMutex(timing_mutex);
|
930 |
return -1; |
931 |
} |
932 |
|
933 |
if((FirstTime==1 || FirstTimeAudio==1) && audioq.size>0) { |
934 |
if(audioq.first_pkt->pkt.pts>0) |
935 |
{ |
936 |
//SDL_LockMutex(timing_mutex);
|
937 |
DeltaTime=Now-(long long)(audioq.first_pkt->pkt.pts); |
938 |
FirstTimeAudio = 0;
|
939 |
FirstTime = 0;
|
940 |
//SDL_UnlockMutex(timing_mutex);
|
941 |
#ifdef DEBUG_AUDIO
|
942 |
printf("AUDIO: audio_decode_frame - DeltaTimeAudio=%lld\n",DeltaTime);
|
943 |
#endif
|
944 |
} |
945 |
} |
946 |
|
947 |
#ifdef DEBUG_AUDIO
|
948 |
if(audioq.first_pkt)
|
949 |
{ |
950 |
printf("AUDIO: audio_decode_frame - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)audioq.first_pkt->pkt.pts,(long long)audioq.first_pkt->pkt.pts+DeltaTime); |
951 |
printf("AUDIO: QueueLen=%d ",(int)audioq.nb_packets); |
952 |
printf("AUDIO: QueueSize=%d\n",(int)audioq.size); |
953 |
} |
954 |
else
|
955 |
printf("AUDIO: audio_decode_frame - Empty queue\n");
|
956 |
#endif
|
957 |
|
958 |
|
959 |
if(audioq.nb_packets>0) { |
960 |
if((long long)audioq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) { |
961 |
SkipAudio = 1;
|
962 |
DecodeAudio = 0;
|
963 |
} |
964 |
else if((long long)audioq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
965 |
(long long)audioq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
966 |
SkipAudio = 0;
|
967 |
DecodeAudio = 1;
|
968 |
} |
969 |
} |
970 |
|
971 |
while(SkipAudio==1 && audioq.size>0) { |
972 |
audioq.total_skips++; |
973 |
SkipAudio = 0;
|
974 |
#ifdef DEBUG_AUDIO
|
975 |
printf("AUDIO: skipaudio: queue size=%d\n",audioq.size);
|
976 |
#endif
|
977 |
if(PacketQueueGet(&audioq,&AudioPkt,1) < 0) { |
978 |
return -1; |
979 |
} |
980 |
if(audioq.first_pkt)
|
981 |
{ |
982 |
if((long long)audioq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) { |
983 |
SkipAudio = 1;
|
984 |
DecodeAudio = 0;
|
985 |
} |
986 |
else if((long long)audioq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
987 |
(long long)audioq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
988 |
SkipAudio = 0;
|
989 |
DecodeAudio = 1;
|
990 |
} |
991 |
} |
992 |
} |
993 |
if(DecodeAudio==1) { |
994 |
if(PacketQueueGet(&audioq,&AudioPkt,1) < 0) { |
995 |
return -1; |
996 |
} |
997 |
memcpy(audio_buf,AudioPkt.data,AudioPkt.size); |
998 |
audio_pkt_size = AudioPkt.size; |
999 |
#ifdef DEBUG_AUDIO
|
1000 |
printf("AUDIO: Decode audio\n");
|
1001 |
#endif
|
1002 |
} |
1003 |
|
1004 |
return audio_pkt_size;
|
1005 |
} |
1006 |
|
1007 |
int VideoCallback(void *valthread) |
1008 |
{ |
1009 |
//AVPacket pktvideo;
|
1010 |
AVCodecContext *pCodecCtx; |
1011 |
AVCodec *pCodec; |
1012 |
AVFrame *pFrame; |
1013 |
int frameFinished;
|
1014 |
AVPicture pict; |
1015 |
long long Now; |
1016 |
short int SkipVideo, DecodeVideo; |
1017 |
|
1018 |
//double frame_rate = 0.0,time_between_frames=0.0;
|
1019 |
//struct timeval now;
|
1020 |
|
1021 |
//int wait_for_sync = 1;
|
1022 |
ThreadVal *tval; |
1023 |
tval = (ThreadVal *)valthread; |
1024 |
|
1025 |
//frame_rate = tval->framerate;
|
1026 |
//time_between_frames = 1.e6 / frame_rate;
|
1027 |
//gettimeofday(&time_now,0);
|
1028 |
|
1029 |
//frecon = fopen("recondechunk.mpg","wb");
|
1030 |
|
1031 |
pCodecCtx=avcodec_alloc_context(); |
1032 |
pCodecCtx->codec_type = CODEC_TYPE_VIDEO; |
1033 |
#ifdef H264_VIDEO_ENCODER
|
1034 |
pCodecCtx->codec_id = CODEC_ID_H264; |
1035 |
pCodecCtx->me_range = 16;
|
1036 |
pCodecCtx->max_qdiff = 4;
|
1037 |
pCodecCtx->qmin = 1;
|
1038 |
pCodecCtx->qmax = 30;
|
1039 |
pCodecCtx->qcompress = 0.6; |
1040 |
#else
|
1041 |
pCodecCtx->codec_id = CODEC_ID_MPEG4; |
1042 |
#endif
|
1043 |
//pCodecCtx->bit_rate = 400000;
|
1044 |
// resolution must be a multiple of two
|
1045 |
pCodecCtx->width = tval->width;//176;//352;
|
1046 |
pCodecCtx->height = tval->height;//144;//288;
|
1047 |
|
1048 |
// frames per second
|
1049 |
//pCodecCtx->time_base = (AVRational){1,25};
|
1050 |
//pCodecCtx->gop_size = 10; // emit one intra frame every ten frames
|
1051 |
//pCodecCtx->max_b_frames=1;
|
1052 |
pCodecCtx->pix_fmt = PIX_FMT_YUV420P; |
1053 |
pCodec=avcodec_find_decoder(pCodecCtx->codec_id); |
1054 |
|
1055 |
if(pCodec==NULL) { |
1056 |
fprintf(stderr, "Unsupported codec!\n");
|
1057 |
return -1; // Codec not found |
1058 |
} |
1059 |
if(avcodec_open(pCodecCtx, pCodec) < 0) { |
1060 |
fprintf(stderr, "could not open codec\n");
|
1061 |
return -1; // Could not open codec |
1062 |
} |
1063 |
pFrame=avcodec_alloc_frame(); |
1064 |
if(pFrame==NULL) { |
1065 |
printf("Memory error!!!\n");
|
1066 |
return -1; |
1067 |
} |
1068 |
|
1069 |
#ifdef DEBUG_VIDEO
|
1070 |
printf("VIDEO: video_callback entering main cycle\n");
|
1071 |
#endif
|
1072 |
while(AVPlaying && !quit) {
|
1073 |
if(QueueFillingMode || QueueStopped)
|
1074 |
{ |
1075 |
//SDL_LockMutex(timing_mutex);
|
1076 |
FirstTime = 1;
|
1077 |
//SDL_UnlockMutex(timing_mutex);
|
1078 |
usleep(5000);
|
1079 |
continue;
|
1080 |
} |
1081 |
|
1082 |
DecodeVideo = 0;
|
1083 |
SkipVideo = 0;
|
1084 |
Now=(long long)SDL_GetTicks(); |
1085 |
if(FirstTime==1 && videoq.size>0) { |
1086 |
if(videoq.first_pkt->pkt.pts>0) |
1087 |
{ |
1088 |
//SDL_LockMutex(timing_mutex);
|
1089 |
DeltaTime=Now-(long long)videoq.first_pkt->pkt.pts; |
1090 |
FirstTime = 0;
|
1091 |
//SDL_UnlockMutex(timing_mutex);
|
1092 |
} |
1093 |
#ifdef DEBUG_VIDEO
|
1094 |
printf("VIDEO: VideoCallback - DeltaTimeAudio=%lld\n",DeltaTime);
|
1095 |
#endif
|
1096 |
} |
1097 |
|
1098 |
#ifdef DEBUG_VIDEO
|
1099 |
if(videoq.first_pkt)
|
1100 |
{ |
1101 |
printf("VIDEO: VideoCallback - Syncro params: Delta:%lld Now:%lld pts=%lld pts+Delta=%lld ",(long long)DeltaTime,Now,(long long)videoq.first_pkt->pkt.pts,(long long)videoq.first_pkt->pkt.pts+DeltaTime); |
1102 |
printf("VIDEO: Index=%d ", (int)videoq.first_pkt->pkt.stream_index); |
1103 |
printf("VIDEO: QueueLen=%d ", (int)videoq.nb_packets); |
1104 |
printf("VIDEO: QueueSize=%d\n", (int)videoq.size); |
1105 |
} |
1106 |
else
|
1107 |
printf("VIDEO: VideoCallback - Empty queue\n");
|
1108 |
#endif
|
1109 |
|
1110 |
if(videoq.nb_packets>0) { |
1111 |
if(((long long)videoq.first_pkt->pkt.pts+DeltaTime)<Now-(long long)MAX_TOLLERANCE) { |
1112 |
SkipVideo = 1;
|
1113 |
DecodeVideo = 0;
|
1114 |
} |
1115 |
else
|
1116 |
if(((long long)videoq.first_pkt->pkt.pts+DeltaTime)>=Now-(long long)MAX_TOLLERANCE && |
1117 |
((long long)videoq.first_pkt->pkt.pts+DeltaTime)<=Now+(long long)MAX_TOLLERANCE) { |
1118 |
SkipVideo = 0;
|
1119 |
DecodeVideo = 1;
|
1120 |
} |
1121 |
} |
1122 |
#ifdef DEBUG_VIDEO
|
1123 |
printf("VIDEO: skipvideo:%d decodevideo:%d\n",SkipVideo,DecodeVideo);
|
1124 |
#endif
|
1125 |
|
1126 |
while(SkipVideo==1 && videoq.size>0) { |
1127 |
videoq.total_skips++; |
1128 |
SkipVideo = 0;
|
1129 |
#ifdef DEBUG_VIDEO
|
1130 |
printf("VIDEO: Skip Video\n");
|
1131 |
#endif
|
1132 |
if(PacketQueueGet(&videoq,&VideoPkt,0) < 0) { |
1133 |
break;
|
1134 |
} |
1135 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
1136 |
if(videoq.first_pkt)
|
1137 |
{ |
1138 |
if((long long)videoq.first_pkt->pkt.pts+DeltaTime<Now-(long long)MAX_TOLLERANCE) { |
1139 |
SkipVideo = 1;
|
1140 |
DecodeVideo = 0;
|
1141 |
} |
1142 |
else if((long long)videoq.first_pkt->pkt.pts+DeltaTime>=Now-(long long)MAX_TOLLERANCE && |
1143 |
(long long)videoq.first_pkt->pkt.pts+DeltaTime<=Now+(long long)MAX_TOLLERANCE) { |
1144 |
SkipVideo = 0;
|
1145 |
DecodeVideo = 1;
|
1146 |
} |
1147 |
} |
1148 |
} |
1149 |
|
1150 |
if(DecodeVideo==1) { |
1151 |
if(PacketQueueGet(&videoq,&VideoPkt,0) > 0) { |
1152 |
|
1153 |
#ifdef DEBUG_VIDEO
|
1154 |
printf("VIDEO: Decode video FrameTime=%lld Now=%lld\n",(long long)VideoPkt.pts+DeltaTime,Now); |
1155 |
#endif
|
1156 |
|
1157 |
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &VideoPkt); |
1158 |
|
1159 |
if(frameFinished) { // it must be true all the time else error |
1160 |
#ifdef DEBUG_VIDEO
|
1161 |
printf("VIDEO: FrameFinished\n");
|
1162 |
#endif
|
1163 |
if(SaveYUV)
|
1164 |
SaveFrame(pFrame, pCodecCtx->width, pCodecCtx->height); |
1165 |
//fwrite(pktvideo.data, 1, pktvideo.size, frecon);
|
1166 |
|
1167 |
if(SilentMode)
|
1168 |
continue;
|
1169 |
|
1170 |
// Lock SDL_yuv_overlay
|
1171 |
if(SDL_MUSTLOCK(MainScreen)) {
|
1172 |
if(SDL_LockSurface(MainScreen) < 0) { |
1173 |
continue;
|
1174 |
} |
1175 |
} |
1176 |
|
1177 |
if(SDL_LockYUVOverlay(YUVOverlay) < 0) { |
1178 |
if(SDL_MUSTLOCK(MainScreen)) {
|
1179 |
SDL_UnlockSurface(MainScreen); |
1180 |
} |
1181 |
continue;
|
1182 |
} |
1183 |
|
1184 |
pict.data[0] = YUVOverlay->pixels[0]; |
1185 |
pict.data[1] = YUVOverlay->pixels[2]; |
1186 |
pict.data[2] = YUVOverlay->pixels[1]; |
1187 |
|
1188 |
pict.linesize[0] = YUVOverlay->pitches[0]; |
1189 |
pict.linesize[1] = YUVOverlay->pitches[2]; |
1190 |
pict.linesize[2] = YUVOverlay->pitches[1]; |
1191 |
|
1192 |
if(img_convert_ctx == NULL) { |
1193 |
img_convert_ctx = sws_getContext(tval->width, tval->height, PIX_FMT_YUV420P, InitRect->w, InitRect->h, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); |
1194 |
if(img_convert_ctx == NULL) { |
1195 |
fprintf(stderr, "Cannot initialize the conversion context!\n");
|
1196 |
exit(1);
|
1197 |
} |
1198 |
} |
1199 |
// let's draw the data (*yuv[3]) on a SDL screen (*screen)
|
1200 |
sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, tval->height, pict.data, pict.linesize);
|
1201 |
SDL_UnlockYUVOverlay(YUVOverlay); |
1202 |
// Show, baby, show!
|
1203 |
SDL_LockMutex(OverlayMutex); |
1204 |
SDL_DisplayYUVOverlay(YUVOverlay, &OverlayRect); |
1205 |
SDL_UnlockMutex(OverlayMutex); |
1206 |
|
1207 |
//redisplay logo
|
1208 |
/**SDL_BlitSurface(image, NULL, MainScreen, &dest);*/
|
1209 |
/* Update the screen area just changed */
|
1210 |
/**SDL_UpdateRects(MainScreen, 1, &dest);*/
|
1211 |
|
1212 |
if(SDL_MUSTLOCK(MainScreen)) {
|
1213 |
SDL_UnlockSurface(MainScreen); |
1214 |
} |
1215 |
} //if FrameFinished
|
1216 |
} // if packet_queue_get
|
1217 |
} //if DecodeVideo=1
|
1218 |
|
1219 |
usleep(5000);
|
1220 |
} |
1221 |
|
1222 |
av_free(pCodecCtx); |
1223 |
av_free(pFrame); |
1224 |
//fclose(frecon);
|
1225 |
#ifdef DEBUG_VIDEO
|
1226 |
printf("VIDEO: video callback end\n");
|
1227 |
#endif
|
1228 |
return 0; |
1229 |
} |
1230 |
|
1231 |
void AudioCallback(void *userdata, Uint8 *stream, int len) |
1232 |
{ |
1233 |
//AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
|
1234 |
int audio_size;
|
1235 |
|
1236 |
static uint8_t audio_buf[AVCODEC_MAX_AUDIO_FRAME_SIZE];
|
1237 |
|
1238 |
audio_size = AudioDecodeFrame(audio_buf, sizeof(audio_buf));
|
1239 |
|
1240 |
if(!SilentMode)
|
1241 |
if(audio_size != len) {
|
1242 |
memset(stream, 0, len);
|
1243 |
} else {
|
1244 |
memcpy(stream, (uint8_t *)audio_buf, len); |
1245 |
} |
1246 |
} |
1247 |
|
1248 |
void SaveFrame(AVFrame *pFrame, int width, int height) |
1249 |
{ |
1250 |
FILE *pFile; |
1251 |
int y;
|
1252 |
|
1253 |
// Open file
|
1254 |
pFile=fopen(YUVFileName, "ab");
|
1255 |
if(pFile==NULL) |
1256 |
return;
|
1257 |
|
1258 |
// Write header
|
1259 |
//fprintf(pFile, "P5\n%d %d\n255\n", width, height);
|
1260 |
|
1261 |
// Write Y data
|
1262 |
for(y=0; y<height; y++) |
1263 |
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width, pFile); |
1264 |
// Write U data
|
1265 |
for(y=0; y<height/2; y++) |
1266 |
fwrite(pFrame->data[1]+y*pFrame->linesize[1], 1, width/2, pFile); |
1267 |
// Write V data
|
1268 |
for(y=0; y<height/2; y++) |
1269 |
fwrite(pFrame->data[2]+y*pFrame->linesize[2], 1, width/2, pFile); |
1270 |
|
1271 |
// Close file
|
1272 |
fclose(pFile); |
1273 |
} |
1274 |
|
1275 |
int ChunkerPlayerCore_IsRunning()
|
1276 |
{ |
1277 |
return AVPlaying;
|
1278 |
} |
1279 |
|
1280 |
void ChunkerPlayerCore_Play()
|
1281 |
{ |
1282 |
if(AVPlaying) return; |
1283 |
|
1284 |
AVPlaying = 1;
|
1285 |
SDL_PauseAudio(0);
|
1286 |
video_thread = SDL_CreateThread(VideoCallback, &VideoCallbackThreadParams); |
1287 |
} |
1288 |
|
1289 |
void ChunkerPlayerCore_Stop()
|
1290 |
{ |
1291 |
if(!AVPlaying) return; |
1292 |
|
1293 |
AVPlaying = 0;
|
1294 |
|
1295 |
// Stop audio&video playback
|
1296 |
SDL_WaitThread(video_thread, NULL);
|
1297 |
SDL_PauseAudio(1);
|
1298 |
SDL_CloseAudio(); |
1299 |
|
1300 |
if(YUVOverlay != NULL) |
1301 |
{ |
1302 |
SDL_FreeYUVOverlay(YUVOverlay); |
1303 |
YUVOverlay = NULL;
|
1304 |
} |
1305 |
|
1306 |
PacketQueueReset(&audioq); |
1307 |
PacketQueueReset(&videoq); |
1308 |
|
1309 |
av_free(aCodecCtx); |
1310 |
free(AudioPkt.data); |
1311 |
free(VideoPkt.data); |
1312 |
free(outbuf_audio); |
1313 |
free(InitRect); |
1314 |
|
1315 |
/*
|
1316 |
* Sleep two buffers' worth of audio before closing, in order
|
1317 |
* to allow the playback to finish. This isn't always enough;
|
1318 |
* perhaps SDL needs a way to explicitly wait for device drain?
|
1319 |
*/
|
1320 |
int delay = 2 * 1000 * CurrentAudioSamples / CurrentAudioFreq; |
1321 |
printf("SDL_Delay(%d)\n", delay);
|
1322 |
SDL_Delay(delay); |
1323 |
} |
1324 |
|
1325 |
int ChunkerPlayerCore_AudioEnded()
|
1326 |
{ |
1327 |
return (audioq.nb_packets==0 && audioq.last_frame_extracted>0); |
1328 |
} |
1329 |
|
1330 |
void ChunkerPlayerCore_ResetAVQueues()
|
1331 |
{ |
1332 |
#ifdef DEBUG_QUEUE
|
1333 |
printf("QUEUE: MAIN SHOULD RESET\n");
|
1334 |
#endif
|
1335 |
PacketQueueReset(&audioq); |
1336 |
PacketQueueReset(&videoq); |
1337 |
} |
1338 |
|
1339 |
int ChunkerPlayerCore_EnqueueBlocks(const uint8_t *block, const int block_size) |
1340 |
{ |
1341 |
Chunk *gchunk = NULL;
|
1342 |
int decoded_size = -1; |
1343 |
uint8_t *tempdata, *buffer; |
1344 |
int j;
|
1345 |
Frame *frame = NULL;
|
1346 |
AVPacket packet, packetaudio; |
1347 |
|
1348 |
uint16_t *audio_bufQ = NULL;
|
1349 |
|
1350 |
//the frame.h gets encoded into 5 slots of 32bits (3 ints plus 2 more for the timeval struct
|
1351 |
static int sizeFrameHeader = 5*sizeof(int32_t); |
1352 |
static int ExternalChunk_header_size = 5*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 2*CHUNK_TRANSCODING_INT_SIZE + 1*CHUNK_TRANSCODING_INT_SIZE*2; |
1353 |
|
1354 |
static int chunks_out_of_order = 0; |
1355 |
static int last_chunk_id = -1; |
1356 |
|
1357 |
audio_bufQ = (uint16_t *)av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE); |
1358 |
if(!audio_bufQ) {
|
1359 |
printf("Memory error in audio_bufQ!\n");
|
1360 |
return PLAYER_FAIL_RETURN;
|
1361 |
} |
1362 |
|
1363 |
gchunk = (Chunk *)malloc(sizeof(Chunk));
|
1364 |
if(!gchunk) {
|
1365 |
printf("Memory error in gchunk!\n");
|
1366 |
av_free(audio_bufQ); |
1367 |
return PLAYER_FAIL_RETURN;
|
1368 |
} |
1369 |
|
1370 |
decoded_size = decodeChunk(gchunk, block, block_size); |
1371 |
|
1372 |
if(last_chunk_id == -1) |
1373 |
last_chunk_id = gchunk->id; |
1374 |
|
1375 |
if(gchunk->id > (last_chunk_id+1)) { |
1376 |
chunks_out_of_order += gchunk->id - last_chunk_id - 1;
|
1377 |
} |
1378 |
last_chunk_id = gchunk->id; |
1379 |
|
1380 |
#ifdef DEBUG_CHUNKER
|
1381 |
printf("CHUNKER: enqueueBlock: id %d decoded_size %d target size %d - out_of_order %d\n", gchunk->id, decoded_size, GRAPES_ENCODED_CHUNK_HEADER_SIZE + ExternalChunk_header_size + gchunk->size, chunks_out_of_order);
|
1382 |
#endif
|
1383 |
if(decoded_size < 0) { |
1384 |
//HINT here i should differentiate between various return values of the decode
|
1385 |
//in order to free what has been allocated there
|
1386 |
printf("chunk probably corrupted!\n");
|
1387 |
av_free(audio_bufQ); |
1388 |
free(gchunk); |
1389 |
return PLAYER_FAIL_RETURN;
|
1390 |
} |
1391 |
|
1392 |
frame = (Frame *)malloc(sizeof(Frame));
|
1393 |
if(!frame) {
|
1394 |
printf("Memory error in Frame!\n");
|
1395 |
if(gchunk) {
|
1396 |
if(gchunk->attributes) {
|
1397 |
free(gchunk->attributes); |
1398 |
} |
1399 |
free(gchunk); |
1400 |
} |
1401 |
av_free(audio_bufQ); |
1402 |
return PLAYER_FAIL_RETURN;
|
1403 |
} |
1404 |
|
1405 |
tempdata = gchunk->data; //let it point to first frame of payload
|
1406 |
j=gchunk->size; |
1407 |
while(j>0 && !quit) { |
1408 |
frame->number = bit32_encoded_pull(tempdata); |
1409 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1410 |
frame->timestamp.tv_sec = bit32_encoded_pull(tempdata); |
1411 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1412 |
frame->timestamp.tv_usec = bit32_encoded_pull(tempdata); |
1413 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1414 |
frame->size = bit32_encoded_pull(tempdata); |
1415 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1416 |
frame->type = bit32_encoded_pull(tempdata); |
1417 |
tempdata += CHUNK_TRANSCODING_INT_SIZE; |
1418 |
|
1419 |
buffer = tempdata; // here coded frame information
|
1420 |
tempdata += frame->size; //let it point to the next frame
|
1421 |
|
1422 |
if(frame->type < 5) { // video frame |
1423 |
av_init_packet(&packet); |
1424 |
packet.data = buffer;//video_bufQ;
|
1425 |
packet.size = frame->size; |
1426 |
packet.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1427 |
packet.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1428 |
packet.stream_index = frame->number; // use of stream_index for number frame
|
1429 |
//packet.duration = frame->timestamp.tv_sec;
|
1430 |
if(packet.size > 0) |
1431 |
ChunkerPlayerCore_PacketQueuePut(&videoq, &packet); //the _put makes a copy of the packet
|
1432 |
|
1433 |
#ifdef DEBUG_SOURCE
|
1434 |
printf("SOURCE: Insert video in queue pts=%lld %d %d sindex:%d\n",packet.pts,(int)frame->timestamp.tv_sec,(int)frame->timestamp.tv_usec,packet.stream_index); |
1435 |
#endif
|
1436 |
} |
1437 |
else if(frame->type == 5) { // audio frame |
1438 |
av_init_packet(&packetaudio); |
1439 |
packetaudio.data = buffer; |
1440 |
packetaudio.size = frame->size; |
1441 |
packetaudio.pts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1442 |
packetaudio.dts = frame->timestamp.tv_sec*(unsigned long long)1000+frame->timestamp.tv_usec; |
1443 |
//packetaudio.duration = frame->timestamp.tv_sec;
|
1444 |
packetaudio.stream_index = frame->number; // use of stream_index for number frame
|
1445 |
packetaudio.flags = 1;
|
1446 |
packetaudio.pos = -1;
|
1447 |
|
1448 |
//instead of -1, in order to signal it is not decoded yet
|
1449 |
packetaudio.convergence_duration = 0;
|
1450 |
|
1451 |
// insert the audio frame into the queue
|
1452 |
if(packetaudio.size > 0) |
1453 |
ChunkerPlayerCore_PacketQueuePut(&audioq, &packetaudio);//makes a copy of the packet so i can free here
|
1454 |
|
1455 |
#ifdef DEBUG_SOURCE
|
1456 |
printf("SOURCE: Insert audio in queue pts=%lld sindex:%d\n", packetaudio.pts, packetaudio.stream_index);
|
1457 |
#endif
|
1458 |
} |
1459 |
else {
|
1460 |
printf("SOURCE: Unknown frame type %d. Size %d\n", frame->type, frame->size);
|
1461 |
} |
1462 |
if(frame->size > 0) |
1463 |
j = j - sizeFrameHeader - frame->size; |
1464 |
else {
|
1465 |
printf("SOURCE: Corrupt frames (size %d) in chunk. Skipping it...\n", frame->size);
|
1466 |
j = -1;
|
1467 |
} |
1468 |
} |
1469 |
//chunk ingestion terminated!
|
1470 |
if(gchunk) {
|
1471 |
if(gchunk->attributes) {
|
1472 |
free(gchunk->attributes); |
1473 |
} |
1474 |
if(gchunk->data)
|
1475 |
free(gchunk->data); |
1476 |
free(gchunk); |
1477 |
} |
1478 |
if(frame)
|
1479 |
free(frame); |
1480 |
if(audio_bufQ)
|
1481 |
av_free(audio_bufQ); |
1482 |
|
1483 |
return PLAYER_OK_RETURN;
|
1484 |
} |
1485 |
|
1486 |
void ChunkerPlayerCore_SetupOverlay(int width, int height) |
1487 |
{ |
1488 |
// if(!MainScreen && !SilentMode)
|
1489 |
// {
|
1490 |
// printf("Cannot find main screen, exiting...\n");
|
1491 |
// exit(1);
|
1492 |
// }
|
1493 |
|
1494 |
if(SilentMode)
|
1495 |
return;
|
1496 |
|
1497 |
SDL_LockMutex(OverlayMutex); |
1498 |
if(YUVOverlay != NULL) |
1499 |
{ |
1500 |
SDL_FreeYUVOverlay(YUVOverlay); |
1501 |
YUVOverlay = NULL;
|
1502 |
} |
1503 |
|
1504 |
// create video overlay for display of video frames
|
1505 |
// printf("SDL_CreateYUVOverlay(%d, %d, SDL_YV12_OVERLAY, MainScreen)\n", width, height);
|
1506 |
YUVOverlay = SDL_CreateYUVOverlay(width, height, SDL_YV12_OVERLAY, MainScreen); |
1507 |
// YUVOverlay = SDL_CreateYUVOverlay(OverlayRect.w, OverlayRect.h, SDL_YV12_OVERLAY, MainScreen);
|
1508 |
if ( YUVOverlay == NULL ) |
1509 |
{ |
1510 |
fprintf(stderr,"SDL: Couldn't create SDL_yuv_overlay: %s", SDL_GetError());
|
1511 |
exit(1);
|
1512 |
} |
1513 |
|
1514 |
if ( YUVOverlay->hw_overlay )
|
1515 |
fprintf(stderr,"SDL: Using hardware overlay.\n");
|
1516 |
// OverlayRect.x = (screen_w - width) / 2;
|
1517 |
|
1518 |
SDL_DisplayYUVOverlay(YUVOverlay, &OverlayRect); |
1519 |
|
1520 |
SDL_UnlockMutex(OverlayMutex); |
1521 |
} |