Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 166621ab

History | View | Annotate | Download (93.2 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/avfft.h"
34

    
35
#if CONFIG_AVFILTER
36
# include "libavfilter/avfilter.h"
37
# include "libavfilter/avfiltergraph.h"
38
# include "libavfilter/graphparser.h"
39
#endif
40

    
41
#include "cmdutils.h"
42

    
43
#include <SDL.h>
44
#include <SDL_thread.h>
45

    
46
#ifdef __MINGW32__
47
#undef main /* We don't want SDL to override our main() */
48
#endif
49

    
50
#undef exit
51
#undef printf
52
#undef fprintf
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
/* maximum audio speed change to get correct sync */
73
#define SAMPLE_CORRECTION_PERCENT_MAX 10
74

    
75
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
76
#define AUDIO_DIFF_AVG_NB   20
77

    
78
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
79
#define SAMPLE_ARRAY_SIZE (2*65536)
80

    
81
#if !CONFIG_AVFILTER
82
static int sws_flags = SWS_BICUBIC;
83
#endif
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 1
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    int64_t pos;                                 ///<byte position in file
100
    SDL_Overlay *bmp;
101
    int width, height; /* source height & width */
102
    int allocated;
103
    SDL_TimerID timer_id;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    AVInputFormat *iformat;
126
    int no_background;
127
    int abort_request;
128
    int paused;
129
    int last_paused;
130
    int seek_req;
131
    int seek_flags;
132
    int64_t seek_pos;
133
    int64_t seek_rel;
134
    int read_pause_return;
135
    AVFormatContext *ic;
136
    int dtg_active_format;
137

    
138
    int audio_stream;
139

    
140
    int av_sync_type;
141
    double external_clock; /* external clock base */
142
    int64_t external_clock_time;
143

    
144
    double audio_clock;
145
    double audio_diff_cum; /* used for AV difference average computation */
146
    double audio_diff_avg_coef;
147
    double audio_diff_threshold;
148
    int audio_diff_avg_count;
149
    AVStream *audio_st;
150
    PacketQueue audioq;
151
    int audio_hw_buf_size;
152
    /* samples output by the codec. we reserve more space for avsync
153
       compensation */
154
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    uint8_t *audio_buf;
157
    unsigned int audio_buf_size; /* in bytes */
158
    int audio_buf_index; /* in bytes */
159
    AVPacket audio_pkt_temp;
160
    AVPacket audio_pkt;
161
    enum SampleFormat audio_src_fmt;
162
    AVAudioConvert *reformat_ctx;
163

    
164
    int show_audio; /* if true, display audio samples */
165
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
166
    int sample_array_index;
167
    int last_i_start;
168
    RDFTContext *rdft;
169
    int rdft_bits;
170
    int xpos;
171

    
172
    SDL_Thread *subtitle_tid;
173
    int subtitle_stream;
174
    int subtitle_stream_changed;
175
    AVStream *subtitle_st;
176
    PacketQueue subtitleq;
177
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
178
    int subpq_size, subpq_rindex, subpq_windex;
179
    SDL_mutex *subpq_mutex;
180
    SDL_cond *subpq_cond;
181

    
182
    double frame_timer;
183
    double frame_last_pts;
184
    double frame_last_delay;
185
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
186
    int video_stream;
187
    AVStream *video_st;
188
    PacketQueue videoq;
189
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
190
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
191
    int64_t video_current_pos;                   ///<current displayed file pos
192
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
193
    int pictq_size, pictq_rindex, pictq_windex;
194
    SDL_mutex *pictq_mutex;
195
    SDL_cond *pictq_cond;
196
#if !CONFIG_AVFILTER
197
    struct SwsContext *img_convert_ctx;
198
#endif
199

    
200
    //    QETimer *video_timer;
201
    char filename[1024];
202
    int width, height, xleft, ytop;
203

    
204
    int64_t faulty_pts;
205
    int64_t faulty_dts;
206
    int64_t last_dts_for_fault_detection;
207
    int64_t last_pts_for_fault_detection;
208

    
209
#if CONFIG_AVFILTER
210
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
211
#endif
212
} VideoState;
213

    
214
static void show_help(void);
215
static int audio_write_get_buf_size(VideoState *is);
216

    
217
/* options specified by the user */
218
static AVInputFormat *file_iformat;
219
static const char *input_filename;
220
static int fs_screen_width;
221
static int fs_screen_height;
222
static int screen_width = 0;
223
static int screen_height = 0;
224
static int frame_width = 0;
225
static int frame_height = 0;
226
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
227
static int audio_disable;
228
static int video_disable;
229
static int wanted_stream[CODEC_TYPE_NB]={
230
    [CODEC_TYPE_AUDIO]=-1,
231
    [CODEC_TYPE_VIDEO]=-1,
232
    [CODEC_TYPE_SUBTITLE]=-1,
233
};
234
static int seek_by_bytes=-1;
235
static int display_disable;
236
static int show_status = 1;
237
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
238
static int64_t start_time = AV_NOPTS_VALUE;
239
static int debug = 0;
240
static int debug_mv = 0;
241
static int step = 0;
242
static int thread_count = 1;
243
static int workaround_bugs = 1;
244
static int fast = 0;
245
static int genpts = 0;
246
static int lowres = 0;
247
static int idct = FF_IDCT_AUTO;
248
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
249
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
250
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
251
static int error_recognition = FF_ER_CAREFUL;
252
static int error_concealment = 3;
253
static int decoder_reorder_pts= -1;
254
static int autoexit;
255
#if CONFIG_AVFILTER
256
static char *vfilters = NULL;
257
#endif
258

    
259
/* current context */
260
static int is_full_screen;
261
static VideoState *cur_stream;
262
static int64_t audio_callback_time;
263

    
264
static AVPacket flush_pkt;
265

    
266
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
267
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
268
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
269

    
270
static SDL_Surface *screen;
271

    
272
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
273

    
274
/* packet queue handling */
275
static void packet_queue_init(PacketQueue *q)
276
{
277
    memset(q, 0, sizeof(PacketQueue));
278
    q->mutex = SDL_CreateMutex();
279
    q->cond = SDL_CreateCond();
280
    packet_queue_put(q, &flush_pkt);
281
}
282

    
283
static void packet_queue_flush(PacketQueue *q)
284
{
285
    AVPacketList *pkt, *pkt1;
286

    
287
    SDL_LockMutex(q->mutex);
288
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
289
        pkt1 = pkt->next;
290
        av_free_packet(&pkt->pkt);
291
        av_freep(&pkt);
292
    }
293
    q->last_pkt = NULL;
294
    q->first_pkt = NULL;
295
    q->nb_packets = 0;
296
    q->size = 0;
297
    SDL_UnlockMutex(q->mutex);
298
}
299

    
300
static void packet_queue_end(PacketQueue *q)
301
{
302
    packet_queue_flush(q);
303
    SDL_DestroyMutex(q->mutex);
304
    SDL_DestroyCond(q->cond);
305
}
306

    
307
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
308
{
309
    AVPacketList *pkt1;
310

    
311
    /* duplicate the packet */
312
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
313
        return -1;
314

    
315
    pkt1 = av_malloc(sizeof(AVPacketList));
316
    if (!pkt1)
317
        return -1;
318
    pkt1->pkt = *pkt;
319
    pkt1->next = NULL;
320

    
321

    
322
    SDL_LockMutex(q->mutex);
323

    
324
    if (!q->last_pkt)
325

    
326
        q->first_pkt = pkt1;
327
    else
328
        q->last_pkt->next = pkt1;
329
    q->last_pkt = pkt1;
330
    q->nb_packets++;
331
    q->size += pkt1->pkt.size + sizeof(*pkt1);
332
    /* XXX: should duplicate packet data in DV case */
333
    SDL_CondSignal(q->cond);
334

    
335
    SDL_UnlockMutex(q->mutex);
336
    return 0;
337
}
338

    
339
static void packet_queue_abort(PacketQueue *q)
340
{
341
    SDL_LockMutex(q->mutex);
342

    
343
    q->abort_request = 1;
344

    
345
    SDL_CondSignal(q->cond);
346

    
347
    SDL_UnlockMutex(q->mutex);
348
}
349

    
350
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
351
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
352
{
353
    AVPacketList *pkt1;
354
    int ret;
355

    
356
    SDL_LockMutex(q->mutex);
357

    
358
    for(;;) {
359
        if (q->abort_request) {
360
            ret = -1;
361
            break;
362
        }
363

    
364
        pkt1 = q->first_pkt;
365
        if (pkt1) {
366
            q->first_pkt = pkt1->next;
367
            if (!q->first_pkt)
368
                q->last_pkt = NULL;
369
            q->nb_packets--;
370
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
371
            *pkt = pkt1->pkt;
372
            av_free(pkt1);
373
            ret = 1;
374
            break;
375
        } else if (!block) {
376
            ret = 0;
377
            break;
378
        } else {
379
            SDL_CondWait(q->cond, q->mutex);
380
        }
381
    }
382
    SDL_UnlockMutex(q->mutex);
383
    return ret;
384
}
385

    
386
static inline void fill_rectangle(SDL_Surface *screen,
387
                                  int x, int y, int w, int h, int color)
388
{
389
    SDL_Rect rect;
390
    rect.x = x;
391
    rect.y = y;
392
    rect.w = w;
393
    rect.h = h;
394
    SDL_FillRect(screen, &rect, color);
395
}
396

    
397
#if 0
398
/* draw only the border of a rectangle */
399
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
400
{
401
    int w1, w2, h1, h2;
402

403
    /* fill the background */
404
    w1 = x;
405
    if (w1 < 0)
406
        w1 = 0;
407
    w2 = s->width - (x + w);
408
    if (w2 < 0)
409
        w2 = 0;
410
    h1 = y;
411
    if (h1 < 0)
412
        h1 = 0;
413
    h2 = s->height - (y + h);
414
    if (h2 < 0)
415
        h2 = 0;
416
    fill_rectangle(screen,
417
                   s->xleft, s->ytop,
418
                   w1, s->height,
419
                   color);
420
    fill_rectangle(screen,
421
                   s->xleft + s->width - w2, s->ytop,
422
                   w2, s->height,
423
                   color);
424
    fill_rectangle(screen,
425
                   s->xleft + w1, s->ytop,
426
                   s->width - w1 - w2, h1,
427
                   color);
428
    fill_rectangle(screen,
429
                   s->xleft + w1, s->ytop + s->height - h2,
430
                   s->width - w1 - w2, h2,
431
                   color);
432
}
433
#endif
434

    
435
#define ALPHA_BLEND(a, oldp, newp, s)\
436
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
437

    
438
#define RGBA_IN(r, g, b, a, s)\
439
{\
440
    unsigned int v = ((const uint32_t *)(s))[0];\
441
    a = (v >> 24) & 0xff;\
442
    r = (v >> 16) & 0xff;\
443
    g = (v >> 8) & 0xff;\
444
    b = v & 0xff;\
445
}
446

    
447
#define YUVA_IN(y, u, v, a, s, pal)\
448
{\
449
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
450
    a = (val >> 24) & 0xff;\
451
    y = (val >> 16) & 0xff;\
452
    u = (val >> 8) & 0xff;\
453
    v = val & 0xff;\
454
}
455

    
456
#define YUVA_OUT(d, y, u, v, a)\
457
{\
458
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
459
}
460

    
461

    
462
#define BPP 1
463

    
464
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
465
{
466
    int wrap, wrap3, width2, skip2;
467
    int y, u, v, a, u1, v1, a1, w, h;
468
    uint8_t *lum, *cb, *cr;
469
    const uint8_t *p;
470
    const uint32_t *pal;
471
    int dstx, dsty, dstw, dsth;
472

    
473
    dstw = av_clip(rect->w, 0, imgw);
474
    dsth = av_clip(rect->h, 0, imgh);
475
    dstx = av_clip(rect->x, 0, imgw - dstw);
476
    dsty = av_clip(rect->y, 0, imgh - dsth);
477
    lum = dst->data[0] + dsty * dst->linesize[0];
478
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
479
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
480

    
481
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
482
    skip2 = dstx >> 1;
483
    wrap = dst->linesize[0];
484
    wrap3 = rect->pict.linesize[0];
485
    p = rect->pict.data[0];
486
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
487

    
488
    if (dsty & 1) {
489
        lum += dstx;
490
        cb += skip2;
491
        cr += skip2;
492

    
493
        if (dstx & 1) {
494
            YUVA_IN(y, u, v, a, p, pal);
495
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498
            cb++;
499
            cr++;
500
            lum++;
501
            p += BPP;
502
        }
503
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
504
            YUVA_IN(y, u, v, a, p, pal);
505
            u1 = u;
506
            v1 = v;
507
            a1 = a;
508
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509

    
510
            YUVA_IN(y, u, v, a, p + BPP, pal);
511
            u1 += u;
512
            v1 += v;
513
            a1 += a;
514
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
515
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
516
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
517
            cb++;
518
            cr++;
519
            p += 2 * BPP;
520
            lum += 2;
521
        }
522
        if (w) {
523
            YUVA_IN(y, u, v, a, p, pal);
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
526
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
527
            p++;
528
            lum++;
529
        }
530
        p += wrap3 - dstw * BPP;
531
        lum += wrap - dstw - dstx;
532
        cb += dst->linesize[1] - width2 - skip2;
533
        cr += dst->linesize[2] - width2 - skip2;
534
    }
535
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
536
        lum += dstx;
537
        cb += skip2;
538
        cr += skip2;
539

    
540
        if (dstx & 1) {
541
            YUVA_IN(y, u, v, a, p, pal);
542
            u1 = u;
543
            v1 = v;
544
            a1 = a;
545
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
            p += wrap3;
547
            lum += wrap;
548
            YUVA_IN(y, u, v, a, p, pal);
549
            u1 += u;
550
            v1 += v;
551
            a1 += a;
552
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
554
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
555
            cb++;
556
            cr++;
557
            p += -wrap3 + BPP;
558
            lum += -wrap + 1;
559
        }
560
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 = u;
563
            v1 = v;
564
            a1 = a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566

    
567
            YUVA_IN(y, u, v, a, p + BPP, pal);
568
            u1 += u;
569
            v1 += v;
570
            a1 += a;
571
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
572
            p += wrap3;
573
            lum += wrap;
574

    
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 += u;
577
            v1 += v;
578
            a1 += a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586

    
587
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
588
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
589

    
590
            cb++;
591
            cr++;
592
            p += -wrap3 + 2 * BPP;
593
            lum += -wrap + 2;
594
        }
595
        if (w) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            u1 = u;
598
            v1 = v;
599
            a1 = a;
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601
            p += wrap3;
602
            lum += wrap;
603
            YUVA_IN(y, u, v, a, p, pal);
604
            u1 += u;
605
            v1 += v;
606
            a1 += a;
607
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
609
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
610
            cb++;
611
            cr++;
612
            p += -wrap3 + BPP;
613
            lum += -wrap + 1;
614
        }
615
        p += wrap3 + (wrap3 - dstw * BPP);
616
        lum += wrap + (wrap - dstw - dstx);
617
        cb += dst->linesize[1] - width2 - skip2;
618
        cr += dst->linesize[2] - width2 - skip2;
619
    }
620
    /* handle odd height */
621
    if (h) {
622
        lum += dstx;
623
        cb += skip2;
624
        cr += skip2;
625

    
626
        if (dstx & 1) {
627
            YUVA_IN(y, u, v, a, p, pal);
628
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631
            cb++;
632
            cr++;
633
            lum++;
634
            p += BPP;
635
        }
636
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
637
            YUVA_IN(y, u, v, a, p, pal);
638
            u1 = u;
639
            v1 = v;
640
            a1 = a;
641
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642

    
643
            YUVA_IN(y, u, v, a, p + BPP, pal);
644
            u1 += u;
645
            v1 += v;
646
            a1 += a;
647
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
648
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
649
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
650
            cb++;
651
            cr++;
652
            p += 2 * BPP;
653
            lum += 2;
654
        }
655
        if (w) {
656
            YUVA_IN(y, u, v, a, p, pal);
657
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
659
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
660
        }
661
    }
662
}
663

    
664
static void free_subpicture(SubPicture *sp)
665
{
666
    int i;
667

    
668
    for (i = 0; i < sp->sub.num_rects; i++)
669
    {
670
        av_freep(&sp->sub.rects[i]->pict.data[0]);
671
        av_freep(&sp->sub.rects[i]->pict.data[1]);
672
        av_freep(&sp->sub.rects[i]);
673
    }
674

    
675
    av_free(sp->sub.rects);
676

    
677
    memset(&sp->sub, 0, sizeof(AVSubtitle));
678
}
679

    
680
static void video_image_display(VideoState *is)
681
{
682
    VideoPicture *vp;
683
    SubPicture *sp;
684
    AVPicture pict;
685
    float aspect_ratio;
686
    int width, height, x, y;
687
    SDL_Rect rect;
688
    int i;
689

    
690
    vp = &is->pictq[is->pictq_rindex];
691
    if (vp->bmp) {
692
#if CONFIG_AVFILTER
693
         if (vp->picref->pixel_aspect.num == 0)
694
             aspect_ratio = 0;
695
         else
696
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
697
#else
698

    
699
        /* XXX: use variable in the frame */
700
        if (is->video_st->sample_aspect_ratio.num)
701
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
702
        else if (is->video_st->codec->sample_aspect_ratio.num)
703
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
704
        else
705
            aspect_ratio = 0;
706
#endif
707
        if (aspect_ratio <= 0.0)
708
            aspect_ratio = 1.0;
709
        aspect_ratio *= (float)vp->width / (float)vp->height;
710
        /* if an active format is indicated, then it overrides the
711
           mpeg format */
712
#if 0
713
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
714
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
715
            printf("dtg_active_format=%d\n", is->dtg_active_format);
716
        }
717
#endif
718
#if 0
719
        switch(is->video_st->codec->dtg_active_format) {
720
        case FF_DTG_AFD_SAME:
721
        default:
722
            /* nothing to do */
723
            break;
724
        case FF_DTG_AFD_4_3:
725
            aspect_ratio = 4.0 / 3.0;
726
            break;
727
        case FF_DTG_AFD_16_9:
728
            aspect_ratio = 16.0 / 9.0;
729
            break;
730
        case FF_DTG_AFD_14_9:
731
            aspect_ratio = 14.0 / 9.0;
732
            break;
733
        case FF_DTG_AFD_4_3_SP_14_9:
734
            aspect_ratio = 14.0 / 9.0;
735
            break;
736
        case FF_DTG_AFD_16_9_SP_14_9:
737
            aspect_ratio = 14.0 / 9.0;
738
            break;
739
        case FF_DTG_AFD_SP_4_3:
740
            aspect_ratio = 4.0 / 3.0;
741
            break;
742
        }
743
#endif
744

    
745
        if (is->subtitle_st)
746
        {
747
            if (is->subpq_size > 0)
748
            {
749
                sp = &is->subpq[is->subpq_rindex];
750

    
751
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
752
                {
753
                    SDL_LockYUVOverlay (vp->bmp);
754

    
755
                    pict.data[0] = vp->bmp->pixels[0];
756
                    pict.data[1] = vp->bmp->pixels[2];
757
                    pict.data[2] = vp->bmp->pixels[1];
758

    
759
                    pict.linesize[0] = vp->bmp->pitches[0];
760
                    pict.linesize[1] = vp->bmp->pitches[2];
761
                    pict.linesize[2] = vp->bmp->pitches[1];
762

    
763
                    for (i = 0; i < sp->sub.num_rects; i++)
764
                        blend_subrect(&pict, sp->sub.rects[i],
765
                                      vp->bmp->w, vp->bmp->h);
766

    
767
                    SDL_UnlockYUVOverlay (vp->bmp);
768
                }
769
            }
770
        }
771

    
772

    
773
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
774
        height = is->height;
775
        width = ((int)rint(height * aspect_ratio)) & ~1;
776
        if (width > is->width) {
777
            width = is->width;
778
            height = ((int)rint(width / aspect_ratio)) & ~1;
779
        }
780
        x = (is->width - width) / 2;
781
        y = (is->height - height) / 2;
782
        if (!is->no_background) {
783
            /* fill the background */
784
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
785
        } else {
786
            is->no_background = 0;
787
        }
788
        rect.x = is->xleft + x;
789
        rect.y = is->ytop  + y;
790
        rect.w = width;
791
        rect.h = height;
792
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
793
    } else {
794
#if 0
795
        fill_rectangle(screen,
796
                       is->xleft, is->ytop, is->width, is->height,
797
                       QERGB(0x00, 0x00, 0x00));
798
#endif
799
    }
800
}
801

    
802
static inline int compute_mod(int a, int b)
803
{
804
    a = a % b;
805
    if (a >= 0)
806
        return a;
807
    else
808
        return a + b;
809
}
810

    
811
static void video_audio_display(VideoState *s)
812
{
813
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
814
    int ch, channels, h, h2, bgcolor, fgcolor;
815
    int16_t time_diff;
816
    int rdft_bits, nb_freq;
817

    
818
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
819
        ;
820
    nb_freq= 1<<(rdft_bits-1);
821

    
822
    /* compute display index : center on currently output samples */
823
    channels = s->audio_st->codec->channels;
824
    nb_display_channels = channels;
825
    if (!s->paused) {
826
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
827
        n = 2 * channels;
828
        delay = audio_write_get_buf_size(s);
829
        delay /= n;
830

    
831
        /* to be more precise, we take into account the time spent since
832
           the last buffer computation */
833
        if (audio_callback_time) {
834
            time_diff = av_gettime() - audio_callback_time;
835
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
836
        }
837

    
838
        delay -= data_used / 2;
839
        if (delay < data_used)
840
            delay = data_used;
841

    
842
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
843
        if(s->show_audio==1){
844
            h= INT_MIN;
845
            for(i=0; i<1000; i+=channels){
846
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
847
                int a= s->sample_array[idx];
848
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
849
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
850
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
851
                int score= a-d;
852
                if(h<score && (b^c)<0){
853
                    h= score;
854
                    i_start= idx;
855
                }
856
            }
857
        }
858

    
859
        s->last_i_start = i_start;
860
    } else {
861
        i_start = s->last_i_start;
862
    }
863

    
864
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
865
    if(s->show_audio==1){
866
        fill_rectangle(screen,
867
                       s->xleft, s->ytop, s->width, s->height,
868
                       bgcolor);
869

    
870
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
871

    
872
        /* total height for one channel */
873
        h = s->height / nb_display_channels;
874
        /* graph height / 2 */
875
        h2 = (h * 9) / 20;
876
        for(ch = 0;ch < nb_display_channels; ch++) {
877
            i = i_start + ch;
878
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
879
            for(x = 0; x < s->width; x++) {
880
                y = (s->sample_array[i] * h2) >> 15;
881
                if (y < 0) {
882
                    y = -y;
883
                    ys = y1 - y;
884
                } else {
885
                    ys = y1;
886
                }
887
                fill_rectangle(screen,
888
                               s->xleft + x, ys, 1, y,
889
                               fgcolor);
890
                i += channels;
891
                if (i >= SAMPLE_ARRAY_SIZE)
892
                    i -= SAMPLE_ARRAY_SIZE;
893
            }
894
        }
895

    
896
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
897

    
898
        for(ch = 1;ch < nb_display_channels; ch++) {
899
            y = s->ytop + ch * h;
900
            fill_rectangle(screen,
901
                           s->xleft, y, s->width, 1,
902
                           fgcolor);
903
        }
904
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
905
    }else{
906
        nb_display_channels= FFMIN(nb_display_channels, 2);
907
        if(rdft_bits != s->rdft_bits){
908
            av_rdft_end(s->rdft);
909
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
910
            s->rdft_bits= rdft_bits;
911
        }
912
        {
913
            FFTSample data[2][2*nb_freq];
914
            for(ch = 0;ch < nb_display_channels; ch++) {
915
                i = i_start + ch;
916
                for(x = 0; x < 2*nb_freq; x++) {
917
                    double w= (x-nb_freq)*(1.0/nb_freq);
918
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
919
                    i += channels;
920
                    if (i >= SAMPLE_ARRAY_SIZE)
921
                        i -= SAMPLE_ARRAY_SIZE;
922
                }
923
                av_rdft_calc(s->rdft, data[ch]);
924
            }
925
            //least efficient way to do this, we should of course directly access it but its more than fast enough
926
            for(y=0; y<s->height; y++){
927
                double w= 1/sqrt(nb_freq);
928
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
929
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
930
                a= FFMIN(a,255);
931
                b= FFMIN(b,255);
932
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
933

    
934
                fill_rectangle(screen,
935
                            s->xpos, s->height-y, 1, 1,
936
                            fgcolor);
937
            }
938
        }
939
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
940
        s->xpos++;
941
        if(s->xpos >= s->width)
942
            s->xpos= s->xleft;
943
    }
944
}
945

    
946
static int video_open(VideoState *is){
947
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
948
    int w,h;
949

    
950
    if(is_full_screen) flags |= SDL_FULLSCREEN;
951
    else               flags |= SDL_RESIZABLE;
952

    
953
    if (is_full_screen && fs_screen_width) {
954
        w = fs_screen_width;
955
        h = fs_screen_height;
956
    } else if(!is_full_screen && screen_width){
957
        w = screen_width;
958
        h = screen_height;
959
#if CONFIG_AVFILTER
960
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
961
        w = is->out_video_filter->inputs[0]->w;
962
        h = is->out_video_filter->inputs[0]->h;
963
#else
964
    }else if (is->video_st && is->video_st->codec->width){
965
        w = is->video_st->codec->width;
966
        h = is->video_st->codec->height;
967
#endif
968
    } else {
969
        w = 640;
970
        h = 480;
971
    }
972
#ifndef __APPLE__
973
    screen = SDL_SetVideoMode(w, h, 0, flags);
974
#else
975
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
976
    screen = SDL_SetVideoMode(w, h, 24, flags);
977
#endif
978
    if (!screen) {
979
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
980
        return -1;
981
    }
982
    SDL_WM_SetCaption("FFplay", "FFplay");
983

    
984
    is->width = screen->w;
985
    is->height = screen->h;
986

    
987
    return 0;
988
}
989

    
990
/* display the current picture, if any */
991
static void video_display(VideoState *is)
992
{
993
    if(!screen)
994
        video_open(cur_stream);
995
    if (is->audio_st && is->show_audio)
996
        video_audio_display(is);
997
    else if (is->video_st)
998
        video_image_display(is);
999
}
1000

    
1001
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
1002
{
1003
    SDL_Event event;
1004
    event.type = FF_REFRESH_EVENT;
1005
    event.user.data1 = opaque;
1006
    SDL_PushEvent(&event);
1007
    return 0; /* 0 means stop timer */
1008
}
1009

    
1010
/* schedule a video refresh in 'delay' ms */
1011
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
1012
{
1013
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
1014
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
1015
}
1016

    
1017
/* get the current audio clock value */
1018
static double get_audio_clock(VideoState *is)
1019
{
1020
    double pts;
1021
    int hw_buf_size, bytes_per_sec;
1022
    pts = is->audio_clock;
1023
    hw_buf_size = audio_write_get_buf_size(is);
1024
    bytes_per_sec = 0;
1025
    if (is->audio_st) {
1026
        bytes_per_sec = is->audio_st->codec->sample_rate *
1027
            2 * is->audio_st->codec->channels;
1028
    }
1029
    if (bytes_per_sec)
1030
        pts -= (double)hw_buf_size / bytes_per_sec;
1031
    return pts;
1032
}
1033

    
1034
/* get the current video clock value */
1035
static double get_video_clock(VideoState *is)
1036
{
1037
    if (is->paused) {
1038
        return is->video_current_pts;
1039
    } else {
1040
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1041
    }
1042
}
1043

    
1044
/* get the current external clock value */
1045
static double get_external_clock(VideoState *is)
1046
{
1047
    int64_t ti;
1048
    ti = av_gettime();
1049
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1050
}
1051

    
1052
/* get the current master clock value */
1053
static double get_master_clock(VideoState *is)
1054
{
1055
    double val;
1056

    
1057
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1058
        if (is->video_st)
1059
            val = get_video_clock(is);
1060
        else
1061
            val = get_audio_clock(is);
1062
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1063
        if (is->audio_st)
1064
            val = get_audio_clock(is);
1065
        else
1066
            val = get_video_clock(is);
1067
    } else {
1068
        val = get_external_clock(is);
1069
    }
1070
    return val;
1071
}
1072

    
1073
/* seek in the stream */
1074
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1075
{
1076
    if (!is->seek_req) {
1077
        is->seek_pos = pos;
1078
        is->seek_rel = rel;
1079
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1080
        if (seek_by_bytes)
1081
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1082
        is->seek_req = 1;
1083
    }
1084
}
1085

    
1086
/* pause or resume the video */
1087
static void stream_pause(VideoState *is)
1088
{
1089
    if (is->paused) {
1090
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1091
        if(is->read_pause_return != AVERROR(ENOSYS)){
1092
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1093
        }
1094
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1095
    }
1096
    is->paused = !is->paused;
1097
}
1098

    
1099
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1100
{
1101
    double actual_delay, delay, sync_threshold, diff;
1102

    
1103
    /* compute nominal delay */
1104
    delay = frame_current_pts - is->frame_last_pts;
1105
    if (delay <= 0 || delay >= 10.0) {
1106
        /* if incorrect delay, use previous one */
1107
        delay = is->frame_last_delay;
1108
    } else {
1109
        is->frame_last_delay = delay;
1110
    }
1111
    is->frame_last_pts = frame_current_pts;
1112

    
1113
    /* update delay to follow master synchronisation source */
1114
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1115
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1116
        /* if video is slave, we try to correct big delays by
1117
           duplicating or deleting a frame */
1118
        diff = get_video_clock(is) - get_master_clock(is);
1119

    
1120
        /* skip or repeat frame. We take into account the
1121
           delay to compute the threshold. I still don't know
1122
           if it is the best guess */
1123
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1124
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1125
            if (diff <= -sync_threshold)
1126
                delay = 0;
1127
            else if (diff >= sync_threshold)
1128
                delay = 2 * delay;
1129
        }
1130
    }
1131

    
1132
    is->frame_timer += delay;
1133
    /* compute the REAL delay (we need to do that to avoid
1134
       long term errors */
1135
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1136
    if (actual_delay < 0.010) {
1137
        /* XXX: should skip picture */
1138
        actual_delay = 0.010;
1139
    }
1140

    
1141
#if defined(DEBUG_SYNC)
1142
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1143
            delay, actual_delay, frame_current_pts, -diff);
1144
#endif
1145

    
1146
    return actual_delay;
1147
}
1148

    
1149
/* called to display each frame */
1150
static void video_refresh_timer(void *opaque)
1151
{
1152
    VideoState *is = opaque;
1153
    VideoPicture *vp;
1154

    
1155
    SubPicture *sp, *sp2;
1156

    
1157
    if (is->video_st) {
1158
        if (is->pictq_size == 0) {
1159
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1160
        } else {
1161
            /* dequeue the picture */
1162
            vp = &is->pictq[is->pictq_rindex];
1163

    
1164
            /* update current video pts */
1165
            is->video_current_pts = vp->pts;
1166
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1167
            is->video_current_pos = vp->pos;
1168

    
1169
            if(is->subtitle_st) {
1170
                if (is->subtitle_stream_changed) {
1171
                    SDL_LockMutex(is->subpq_mutex);
1172

    
1173
                    while (is->subpq_size) {
1174
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1175

    
1176
                        /* update queue size and signal for next picture */
1177
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1178
                            is->subpq_rindex = 0;
1179

    
1180
                        is->subpq_size--;
1181
                    }
1182
                    is->subtitle_stream_changed = 0;
1183

    
1184
                    SDL_CondSignal(is->subpq_cond);
1185
                    SDL_UnlockMutex(is->subpq_mutex);
1186
                } else {
1187
                    if (is->subpq_size > 0) {
1188
                        sp = &is->subpq[is->subpq_rindex];
1189

    
1190
                        if (is->subpq_size > 1)
1191
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1192
                        else
1193
                            sp2 = NULL;
1194

    
1195
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1196
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1197
                        {
1198
                            free_subpicture(sp);
1199

    
1200
                            /* update queue size and signal for next picture */
1201
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1202
                                is->subpq_rindex = 0;
1203

    
1204
                            SDL_LockMutex(is->subpq_mutex);
1205
                            is->subpq_size--;
1206
                            SDL_CondSignal(is->subpq_cond);
1207
                            SDL_UnlockMutex(is->subpq_mutex);
1208
                        }
1209
                    }
1210
                }
1211
            }
1212

    
1213
            /* display picture */
1214
            video_display(is);
1215

    
1216
            /* update queue size and signal for next picture */
1217
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1218
                is->pictq_rindex = 0;
1219

    
1220
            SDL_LockMutex(is->pictq_mutex);
1221
            vp->timer_id= 0;
1222
            is->pictq_size--;
1223
            SDL_CondSignal(is->pictq_cond);
1224
            SDL_UnlockMutex(is->pictq_mutex);
1225
        }
1226
    } else if (is->audio_st) {
1227
        /* draw the next audio frame */
1228

    
1229
        schedule_refresh(is, 40);
1230

    
1231
        /* if only audio stream, then display the audio bars (better
1232
           than nothing, just to test the implementation */
1233

    
1234
        /* display picture */
1235
        video_display(is);
1236
    } else {
1237
        schedule_refresh(is, 100);
1238
    }
1239
    if (show_status) {
1240
        static int64_t last_time;
1241
        int64_t cur_time;
1242
        int aqsize, vqsize, sqsize;
1243
        double av_diff;
1244

    
1245
        cur_time = av_gettime();
1246
        if (!last_time || (cur_time - last_time) >= 30000) {
1247
            aqsize = 0;
1248
            vqsize = 0;
1249
            sqsize = 0;
1250
            if (is->audio_st)
1251
                aqsize = is->audioq.size;
1252
            if (is->video_st)
1253
                vqsize = is->videoq.size;
1254
            if (is->subtitle_st)
1255
                sqsize = is->subtitleq.size;
1256
            av_diff = 0;
1257
            if (is->audio_st && is->video_st)
1258
                av_diff = get_audio_clock(is) - get_video_clock(is);
1259
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1260
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1261
            fflush(stdout);
1262
            last_time = cur_time;
1263
        }
1264
    }
1265
}
1266

    
1267
/* allocate a picture (needs to do that in main thread to avoid
1268
   potential locking problems */
1269
static void alloc_picture(void *opaque)
1270
{
1271
    VideoState *is = opaque;
1272
    VideoPicture *vp;
1273

    
1274
    vp = &is->pictq[is->pictq_windex];
1275

    
1276
    if (vp->bmp)
1277
        SDL_FreeYUVOverlay(vp->bmp);
1278

    
1279
#if CONFIG_AVFILTER
1280
    if (vp->picref)
1281
        avfilter_unref_pic(vp->picref);
1282
    vp->picref = NULL;
1283

    
1284
    vp->width   = is->out_video_filter->inputs[0]->w;
1285
    vp->height  = is->out_video_filter->inputs[0]->h;
1286
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1287
#else
1288
    vp->width   = is->video_st->codec->width;
1289
    vp->height  = is->video_st->codec->height;
1290
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1291
#endif
1292

    
1293
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1294
                                   SDL_YV12_OVERLAY,
1295
                                   screen);
1296

    
1297
    SDL_LockMutex(is->pictq_mutex);
1298
    vp->allocated = 1;
1299
    SDL_CondSignal(is->pictq_cond);
1300
    SDL_UnlockMutex(is->pictq_mutex);
1301
}
1302

    
1303
/**
1304
 *
1305
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1306
 */
1307
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1308
{
1309
    VideoPicture *vp;
1310
    int dst_pix_fmt;
1311
#if CONFIG_AVFILTER
1312
    AVPicture pict_src;
1313
#endif
1314
    /* wait until we have space to put a new picture */
1315
    SDL_LockMutex(is->pictq_mutex);
1316
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1317
           !is->videoq.abort_request) {
1318
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1319
    }
1320
    SDL_UnlockMutex(is->pictq_mutex);
1321

    
1322
    if (is->videoq.abort_request)
1323
        return -1;
1324

    
1325
    vp = &is->pictq[is->pictq_windex];
1326

    
1327
    /* alloc or resize hardware picture buffer */
1328
    if (!vp->bmp ||
1329
#if CONFIG_AVFILTER
1330
        vp->width  != is->out_video_filter->inputs[0]->w ||
1331
        vp->height != is->out_video_filter->inputs[0]->h) {
1332
#else
1333
        vp->width != is->video_st->codec->width ||
1334
        vp->height != is->video_st->codec->height) {
1335
#endif
1336
        SDL_Event event;
1337

    
1338
        vp->allocated = 0;
1339

    
1340
        /* the allocation must be done in the main thread to avoid
1341
           locking problems */
1342
        event.type = FF_ALLOC_EVENT;
1343
        event.user.data1 = is;
1344
        SDL_PushEvent(&event);
1345

    
1346
        /* wait until the picture is allocated */
1347
        SDL_LockMutex(is->pictq_mutex);
1348
        while (!vp->allocated && !is->videoq.abort_request) {
1349
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1350
        }
1351
        SDL_UnlockMutex(is->pictq_mutex);
1352

    
1353
        if (is->videoq.abort_request)
1354
            return -1;
1355
    }
1356

    
1357
    /* if the frame is not skipped, then display it */
1358
    if (vp->bmp) {
1359
        AVPicture pict;
1360
#if CONFIG_AVFILTER
1361
        if(vp->picref)
1362
            avfilter_unref_pic(vp->picref);
1363
        vp->picref = src_frame->opaque;
1364
#endif
1365

    
1366
        /* get a pointer on the bitmap */
1367
        SDL_LockYUVOverlay (vp->bmp);
1368

    
1369
        dst_pix_fmt = PIX_FMT_YUV420P;
1370
        memset(&pict,0,sizeof(AVPicture));
1371
        pict.data[0] = vp->bmp->pixels[0];
1372
        pict.data[1] = vp->bmp->pixels[2];
1373
        pict.data[2] = vp->bmp->pixels[1];
1374

    
1375
        pict.linesize[0] = vp->bmp->pitches[0];
1376
        pict.linesize[1] = vp->bmp->pitches[2];
1377
        pict.linesize[2] = vp->bmp->pitches[1];
1378

    
1379
#if CONFIG_AVFILTER
1380
        pict_src.data[0] = src_frame->data[0];
1381
        pict_src.data[1] = src_frame->data[1];
1382
        pict_src.data[2] = src_frame->data[2];
1383

    
1384
        pict_src.linesize[0] = src_frame->linesize[0];
1385
        pict_src.linesize[1] = src_frame->linesize[1];
1386
        pict_src.linesize[2] = src_frame->linesize[2];
1387

    
1388
        //FIXME use direct rendering
1389
        av_picture_copy(&pict, &pict_src,
1390
                        vp->pix_fmt, vp->width, vp->height);
1391
#else
1392
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1396
        if (is->img_convert_ctx == NULL) {
1397
            fprintf(stderr, "Cannot initialize the conversion context\n");
1398
            exit(1);
1399
        }
1400
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401
                  0, vp->height, pict.data, pict.linesize);
1402
#endif
1403
        /* update the bitmap content */
1404
        SDL_UnlockYUVOverlay(vp->bmp);
1405

    
1406
        vp->pts = pts;
1407
        vp->pos = pos;
1408

    
1409
        /* now we can update the picture count */
1410
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411
            is->pictq_windex = 0;
1412
        SDL_LockMutex(is->pictq_mutex);
1413
        is->pictq_size++;
1414
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1415
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1416
        SDL_UnlockMutex(is->pictq_mutex);
1417
    }
1418
    return 0;
1419
}
1420

    
1421
/**
1422
 * compute the exact PTS for the picture if it is omitted in the stream
1423
 * @param pts1 the dts of the pkt / pts of the frame
1424
 */
1425
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1426
{
1427
    double frame_delay, pts;
1428

    
1429
    pts = pts1;
1430

    
1431
    if (pts != 0) {
1432
        /* update video clock with pts, if present */
1433
        is->video_clock = pts;
1434
    } else {
1435
        pts = is->video_clock;
1436
    }
1437
    /* update video clock for next frame */
1438
    frame_delay = av_q2d(is->video_st->codec->time_base);
1439
    /* for MPEG2, the frame can be repeated, so we update the
1440
       clock accordingly */
1441
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1442
    is->video_clock += frame_delay;
1443

    
1444
#if defined(DEBUG_SYNC) && 0
1445
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1446
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1447
#endif
1448
    return queue_picture(is, src_frame, pts, pos);
1449
}
1450

    
1451
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1452
{
1453
    int len1, got_picture, i;
1454

    
1455
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1456
            return -1;
1457

    
1458
        if(pkt->data == flush_pkt.data){
1459
            avcodec_flush_buffers(is->video_st->codec);
1460

    
1461
            SDL_LockMutex(is->pictq_mutex);
1462
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1463
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1464
                if(is->pictq[i].timer_id){
1465
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1466
                    is->pictq[i].timer_id=0;
1467
                    schedule_refresh(is, 1);
1468
                }
1469
            }
1470
            while (is->pictq_size && !is->videoq.abort_request) {
1471
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1472
            }
1473
            is->video_current_pos= -1;
1474
            SDL_UnlockMutex(is->pictq_mutex);
1475

    
1476
            is->last_dts_for_fault_detection=
1477
            is->last_pts_for_fault_detection= INT64_MIN;
1478
            is->frame_last_pts= AV_NOPTS_VALUE;
1479
            is->frame_last_delay = 0;
1480
            is->frame_timer = (double)av_gettime() / 1000000.0;
1481

    
1482
            return 0;
1483
        }
1484

    
1485
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1486
           this packet, if any */
1487
        is->video_st->codec->reordered_opaque= pkt->pts;
1488
        len1 = avcodec_decode_video2(is->video_st->codec,
1489
                                    frame, &got_picture,
1490
                                    pkt);
1491

    
1492
        if (got_picture) {
1493
            if(pkt->dts != AV_NOPTS_VALUE){
1494
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1495
                is->last_dts_for_fault_detection= pkt->dts;
1496
            }
1497
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1498
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1499
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1500
            }
1501
        }
1502

    
1503
        if(   (   decoder_reorder_pts==1
1504
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1505
               || pkt->dts == AV_NOPTS_VALUE)
1506
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1507
            *pts= frame->reordered_opaque;
1508
        else if(pkt->dts != AV_NOPTS_VALUE)
1509
            *pts= pkt->dts;
1510
        else
1511
            *pts= 0;
1512

    
1513
//            if (len1 < 0)
1514
//                break;
1515
    if (got_picture)
1516
        return 1;
1517
    return 0;
1518
}
1519

    
1520
#if CONFIG_AVFILTER
1521
typedef struct {
1522
    VideoState *is;
1523
    AVFrame *frame;
1524
} FilterPriv;
1525

    
1526
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1527
{
1528
    FilterPriv *priv = ctx->priv;
1529
    if(!opaque) return -1;
1530

    
1531
    priv->is = opaque;
1532
    priv->frame = avcodec_alloc_frame();
1533

    
1534
    return 0;
1535
}
1536

    
1537
static void input_uninit(AVFilterContext *ctx)
1538
{
1539
    FilterPriv *priv = ctx->priv;
1540
    av_free(priv->frame);
1541
}
1542

    
1543
static int input_request_frame(AVFilterLink *link)
1544
{
1545
    FilterPriv *priv = link->src->priv;
1546
    AVFilterPicRef *picref;
1547
    int64_t pts = 0;
1548
    AVPacket pkt;
1549
    int ret;
1550

    
1551
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1552
        av_free_packet(&pkt);
1553
    if (ret < 0)
1554
        return -1;
1555

    
1556
    /* FIXME: until I figure out how to hook everything up to the codec
1557
     * right, we're just copying the entire frame. */
1558
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1559
    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1560
                    picref->pic->format, link->w, link->h);
1561
    av_free_packet(&pkt);
1562

    
1563
    picref->pts = pts;
1564
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1565
    avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1566
    avfilter_draw_slice(link, 0, link->h, 1);
1567
    avfilter_end_frame(link);
1568
    avfilter_unref_pic(picref);
1569

    
1570
    return 0;
1571
}
1572

    
1573
static int input_query_formats(AVFilterContext *ctx)
1574
{
1575
    FilterPriv *priv = ctx->priv;
1576
    enum PixelFormat pix_fmts[] = {
1577
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1578
    };
1579

    
1580
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1581
    return 0;
1582
}
1583

    
1584
static int input_config_props(AVFilterLink *link)
1585
{
1586
    FilterPriv *priv  = link->src->priv;
1587
    AVCodecContext *c = priv->is->video_st->codec;
1588

    
1589
    link->w = c->width;
1590
    link->h = c->height;
1591

    
1592
    return 0;
1593
}
1594

    
1595
static AVFilter input_filter =
1596
{
1597
    .name      = "ffplay_input",
1598

    
1599
    .priv_size = sizeof(FilterPriv),
1600

    
1601
    .init      = input_init,
1602
    .uninit    = input_uninit,
1603

    
1604
    .query_formats = input_query_formats,
1605

    
1606
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1607
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1608
                                    .type = CODEC_TYPE_VIDEO,
1609
                                    .request_frame = input_request_frame,
1610
                                    .config_props  = input_config_props, },
1611
                                  { .name = NULL }},
1612
};
1613

    
1614
static void output_end_frame(AVFilterLink *link)
1615
{
1616
}
1617

    
1618
static int output_query_formats(AVFilterContext *ctx)
1619
{
1620
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1621

    
1622
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1623
    return 0;
1624
}
1625

    
1626
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1627
                                    int64_t *pts)
1628
{
1629
    AVFilterPicRef *pic;
1630

    
1631
    if(avfilter_request_frame(ctx->inputs[0]))
1632
        return -1;
1633
    if(!(pic = ctx->inputs[0]->cur_pic))
1634
        return -1;
1635
    ctx->inputs[0]->cur_pic = NULL;
1636

    
1637
    frame->opaque = pic;
1638
    *pts          = pic->pts;
1639

    
1640
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1641
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1642

    
1643
    return 1;
1644
}
1645

    
1646
static AVFilter output_filter =
1647
{
1648
    .name      = "ffplay_output",
1649

    
1650
    .query_formats = output_query_formats,
1651

    
1652
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1653
                                    .type          = CODEC_TYPE_VIDEO,
1654
                                    .end_frame     = output_end_frame,
1655
                                    .min_perms     = AV_PERM_READ, },
1656
                                  { .name = NULL }},
1657
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1658
};
1659
#endif  /* CONFIG_AVFILTER */
1660

    
1661
static int video_thread(void *arg)
1662
{
1663
    VideoState *is = arg;
1664
    AVFrame *frame= avcodec_alloc_frame();
1665
    int64_t pts_int;
1666
    double pts;
1667
    int ret;
1668

    
1669
#if CONFIG_AVFILTER
1670
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1671
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1672
    graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1673

    
1674
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1675
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1676

    
1677
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1678
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1679

    
1680

    
1681
    if(vfilters) {
1682
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1683
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1684

    
1685
        outputs->name    = av_strdup("in");
1686
        outputs->filter  = filt_src;
1687
        outputs->pad_idx = 0;
1688
        outputs->next    = NULL;
1689

    
1690
        inputs->name    = av_strdup("out");
1691
        inputs->filter  = filt_out;
1692
        inputs->pad_idx = 0;
1693
        inputs->next    = NULL;
1694

    
1695
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1696
            goto the_end;
1697
        av_freep(&vfilters);
1698
    } else {
1699
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1700
    }
1701
    avfilter_graph_add_filter(graph, filt_src);
1702
    avfilter_graph_add_filter(graph, filt_out);
1703

    
1704
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1705
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1706
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1707

    
1708
    is->out_video_filter = filt_out;
1709
#endif
1710

    
1711
    for(;;) {
1712
#if !CONFIG_AVFILTER
1713
        AVPacket pkt;
1714
#endif
1715
        while (is->paused && !is->videoq.abort_request)
1716
            SDL_Delay(10);
1717
#if CONFIG_AVFILTER
1718
        ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1719
#else
1720
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1721
#endif
1722

    
1723
        if (ret < 0) goto the_end;
1724

    
1725
        if (!ret)
1726
            continue;
1727

    
1728
        pts = pts_int*av_q2d(is->video_st->time_base);
1729

    
1730
#if CONFIG_AVFILTER
1731
        ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1732
#else
1733
        ret = output_picture2(is, frame, pts,  pkt.pos);
1734
        av_free_packet(&pkt);
1735
#endif
1736
        if (ret < 0)
1737
            goto the_end;
1738

    
1739
        if (step)
1740
            if (cur_stream)
1741
                stream_pause(cur_stream);
1742
    }
1743
 the_end:
1744
#if CONFIG_AVFILTER
1745
    avfilter_graph_destroy(graph);
1746
    av_freep(&graph);
1747
#endif
1748
    av_free(frame);
1749
    return 0;
1750
}
1751

    
1752
static int subtitle_thread(void *arg)
1753
{
1754
    VideoState *is = arg;
1755
    SubPicture *sp;
1756
    AVPacket pkt1, *pkt = &pkt1;
1757
    int len1, got_subtitle;
1758
    double pts;
1759
    int i, j;
1760
    int r, g, b, y, u, v, a;
1761

    
1762
    for(;;) {
1763
        while (is->paused && !is->subtitleq.abort_request) {
1764
            SDL_Delay(10);
1765
        }
1766
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1767
            break;
1768

    
1769
        if(pkt->data == flush_pkt.data){
1770
            avcodec_flush_buffers(is->subtitle_st->codec);
1771
            continue;
1772
        }
1773
        SDL_LockMutex(is->subpq_mutex);
1774
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1775
               !is->subtitleq.abort_request) {
1776
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1777
        }
1778
        SDL_UnlockMutex(is->subpq_mutex);
1779

    
1780
        if (is->subtitleq.abort_request)
1781
            goto the_end;
1782

    
1783
        sp = &is->subpq[is->subpq_windex];
1784

    
1785
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1786
           this packet, if any */
1787
        pts = 0;
1788
        if (pkt->pts != AV_NOPTS_VALUE)
1789
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1790

    
1791
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1792
                                    &sp->sub, &got_subtitle,
1793
                                    pkt);
1794
//            if (len1 < 0)
1795
//                break;
1796
        if (got_subtitle && sp->sub.format == 0) {
1797
            sp->pts = pts;
1798

    
1799
            for (i = 0; i < sp->sub.num_rects; i++)
1800
            {
1801
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1802
                {
1803
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1804
                    y = RGB_TO_Y_CCIR(r, g, b);
1805
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1806
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1807
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1808
                }
1809
            }
1810

    
1811
            /* now we can update the picture count */
1812
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1813
                is->subpq_windex = 0;
1814
            SDL_LockMutex(is->subpq_mutex);
1815
            is->subpq_size++;
1816
            SDL_UnlockMutex(is->subpq_mutex);
1817
        }
1818
        av_free_packet(pkt);
1819
//        if (step)
1820
//            if (cur_stream)
1821
//                stream_pause(cur_stream);
1822
    }
1823
 the_end:
1824
    return 0;
1825
}
1826

    
1827
/* copy samples for viewing in editor window */
1828
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1829
{
1830
    int size, len, channels;
1831

    
1832
    channels = is->audio_st->codec->channels;
1833

    
1834
    size = samples_size / sizeof(short);
1835
    while (size > 0) {
1836
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1837
        if (len > size)
1838
            len = size;
1839
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1840
        samples += len;
1841
        is->sample_array_index += len;
1842
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1843
            is->sample_array_index = 0;
1844
        size -= len;
1845
    }
1846
}
1847

    
1848
/* return the new audio buffer size (samples can be added or deleted
1849
   to get better sync if video or external master clock) */
1850
static int synchronize_audio(VideoState *is, short *samples,
1851
                             int samples_size1, double pts)
1852
{
1853
    int n, samples_size;
1854
    double ref_clock;
1855

    
1856
    n = 2 * is->audio_st->codec->channels;
1857
    samples_size = samples_size1;
1858

    
1859
    /* if not master, then we try to remove or add samples to correct the clock */
1860
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1861
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1862
        double diff, avg_diff;
1863
        int wanted_size, min_size, max_size, nb_samples;
1864

    
1865
        ref_clock = get_master_clock(is);
1866
        diff = get_audio_clock(is) - ref_clock;
1867

    
1868
        if (diff < AV_NOSYNC_THRESHOLD) {
1869
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1870
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1871
                /* not enough measures to have a correct estimate */
1872
                is->audio_diff_avg_count++;
1873
            } else {
1874
                /* estimate the A-V difference */
1875
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1876

    
1877
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1878
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1879
                    nb_samples = samples_size / n;
1880

    
1881
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1882
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1883
                    if (wanted_size < min_size)
1884
                        wanted_size = min_size;
1885
                    else if (wanted_size > max_size)
1886
                        wanted_size = max_size;
1887

    
1888
                    /* add or remove samples to correction the synchro */
1889
                    if (wanted_size < samples_size) {
1890
                        /* remove samples */
1891
                        samples_size = wanted_size;
1892
                    } else if (wanted_size > samples_size) {
1893
                        uint8_t *samples_end, *q;
1894
                        int nb;
1895

    
1896
                        /* add samples */
1897
                        nb = (samples_size - wanted_size);
1898
                        samples_end = (uint8_t *)samples + samples_size - n;
1899
                        q = samples_end + n;
1900
                        while (nb > 0) {
1901
                            memcpy(q, samples_end, n);
1902
                            q += n;
1903
                            nb -= n;
1904
                        }
1905
                        samples_size = wanted_size;
1906
                    }
1907
                }
1908
#if 0
1909
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1910
                       diff, avg_diff, samples_size - samples_size1,
1911
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1912
#endif
1913
            }
1914
        } else {
1915
            /* too big difference : may be initial PTS errors, so
1916
               reset A-V filter */
1917
            is->audio_diff_avg_count = 0;
1918
            is->audio_diff_cum = 0;
1919
        }
1920
    }
1921

    
1922
    return samples_size;
1923
}
1924

    
1925
/* decode one audio frame and returns its uncompressed size */
1926
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1927
{
1928
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1929
    AVPacket *pkt = &is->audio_pkt;
1930
    AVCodecContext *dec= is->audio_st->codec;
1931
    int n, len1, data_size;
1932
    double pts;
1933

    
1934
    for(;;) {
1935
        /* NOTE: the audio packet can contain several frames */
1936
        while (pkt_temp->size > 0) {
1937
            data_size = sizeof(is->audio_buf1);
1938
            len1 = avcodec_decode_audio3(dec,
1939
                                        (int16_t *)is->audio_buf1, &data_size,
1940
                                        pkt_temp);
1941
            if (len1 < 0) {
1942
                /* if error, we skip the frame */
1943
                pkt_temp->size = 0;
1944
                break;
1945
            }
1946

    
1947
            pkt_temp->data += len1;
1948
            pkt_temp->size -= len1;
1949
            if (data_size <= 0)
1950
                continue;
1951

    
1952
            if (dec->sample_fmt != is->audio_src_fmt) {
1953
                if (is->reformat_ctx)
1954
                    av_audio_convert_free(is->reformat_ctx);
1955
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1956
                                                         dec->sample_fmt, 1, NULL, 0);
1957
                if (!is->reformat_ctx) {
1958
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1959
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1960
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1961
                        break;
1962
                }
1963
                is->audio_src_fmt= dec->sample_fmt;
1964
            }
1965

    
1966
            if (is->reformat_ctx) {
1967
                const void *ibuf[6]= {is->audio_buf1};
1968
                void *obuf[6]= {is->audio_buf2};
1969
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1970
                int ostride[6]= {2};
1971
                int len= data_size/istride[0];
1972
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1973
                    printf("av_audio_convert() failed\n");
1974
                    break;
1975
                }
1976
                is->audio_buf= is->audio_buf2;
1977
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1978
                          remove this legacy cruft */
1979
                data_size= len*2;
1980
            }else{
1981
                is->audio_buf= is->audio_buf1;
1982
            }
1983

    
1984
            /* if no pts, then compute it */
1985
            pts = is->audio_clock;
1986
            *pts_ptr = pts;
1987
            n = 2 * dec->channels;
1988
            is->audio_clock += (double)data_size /
1989
                (double)(n * dec->sample_rate);
1990
#if defined(DEBUG_SYNC)
1991
            {
1992
                static double last_clock;
1993
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1994
                       is->audio_clock - last_clock,
1995
                       is->audio_clock, pts);
1996
                last_clock = is->audio_clock;
1997
            }
1998
#endif
1999
            return data_size;
2000
        }
2001

    
2002
        /* free the current packet */
2003
        if (pkt->data)
2004
            av_free_packet(pkt);
2005

    
2006
        if (is->paused || is->audioq.abort_request) {
2007
            return -1;
2008
        }
2009

    
2010
        /* read next packet */
2011
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2012
            return -1;
2013
        if(pkt->data == flush_pkt.data){
2014
            avcodec_flush_buffers(dec);
2015
            continue;
2016
        }
2017

    
2018
        pkt_temp->data = pkt->data;
2019
        pkt_temp->size = pkt->size;
2020

    
2021
        /* if update the audio clock with the pts */
2022
        if (pkt->pts != AV_NOPTS_VALUE) {
2023
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2024
        }
2025
    }
2026
}
2027

    
2028
/* get the current audio output buffer size, in samples. With SDL, we
2029
   cannot have a precise information */
2030
static int audio_write_get_buf_size(VideoState *is)
2031
{
2032
    return is->audio_buf_size - is->audio_buf_index;
2033
}
2034

    
2035

    
2036
/* prepare a new audio buffer */
2037
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2038
{
2039
    VideoState *is = opaque;
2040
    int audio_size, len1;
2041
    double pts;
2042

    
2043
    audio_callback_time = av_gettime();
2044

    
2045
    while (len > 0) {
2046
        if (is->audio_buf_index >= is->audio_buf_size) {
2047
           audio_size = audio_decode_frame(is, &pts);
2048
           if (audio_size < 0) {
2049
                /* if error, just output silence */
2050
               is->audio_buf = is->audio_buf1;
2051
               is->audio_buf_size = 1024;
2052
               memset(is->audio_buf, 0, is->audio_buf_size);
2053
           } else {
2054
               if (is->show_audio)
2055
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2056
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2057
                                              pts);
2058
               is->audio_buf_size = audio_size;
2059
           }
2060
           is->audio_buf_index = 0;
2061
        }
2062
        len1 = is->audio_buf_size - is->audio_buf_index;
2063
        if (len1 > len)
2064
            len1 = len;
2065
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2066
        len -= len1;
2067
        stream += len1;
2068
        is->audio_buf_index += len1;
2069
    }
2070
}
2071

    
2072
/* open a given stream. Return 0 if OK */
2073
static int stream_component_open(VideoState *is, int stream_index)
2074
{
2075
    AVFormatContext *ic = is->ic;
2076
    AVCodecContext *avctx;
2077
    AVCodec *codec;
2078
    SDL_AudioSpec wanted_spec, spec;
2079

    
2080
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2081
        return -1;
2082
    avctx = ic->streams[stream_index]->codec;
2083

    
2084
    /* prepare audio output */
2085
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2086
        if (avctx->channels > 0) {
2087
            avctx->request_channels = FFMIN(2, avctx->channels);
2088
        } else {
2089
            avctx->request_channels = 2;
2090
        }
2091
    }
2092

    
2093
    codec = avcodec_find_decoder(avctx->codec_id);
2094
    avctx->debug_mv = debug_mv;
2095
    avctx->debug = debug;
2096
    avctx->workaround_bugs = workaround_bugs;
2097
    avctx->lowres = lowres;
2098
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2099
    avctx->idct_algo= idct;
2100
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2101
    avctx->skip_frame= skip_frame;
2102
    avctx->skip_idct= skip_idct;
2103
    avctx->skip_loop_filter= skip_loop_filter;
2104
    avctx->error_recognition= error_recognition;
2105
    avctx->error_concealment= error_concealment;
2106
    avcodec_thread_init(avctx, thread_count);
2107

    
2108
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2109

    
2110
    if (!codec ||
2111
        avcodec_open(avctx, codec) < 0)
2112
        return -1;
2113

    
2114
    /* prepare audio output */
2115
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2116
        wanted_spec.freq = avctx->sample_rate;
2117
        wanted_spec.format = AUDIO_S16SYS;
2118
        wanted_spec.channels = avctx->channels;
2119
        wanted_spec.silence = 0;
2120
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2121
        wanted_spec.callback = sdl_audio_callback;
2122
        wanted_spec.userdata = is;
2123
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2124
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2125
            return -1;
2126
        }
2127
        is->audio_hw_buf_size = spec.size;
2128
        is->audio_src_fmt= SAMPLE_FMT_S16;
2129
    }
2130

    
2131
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2132
    switch(avctx->codec_type) {
2133
    case CODEC_TYPE_AUDIO:
2134
        is->audio_stream = stream_index;
2135
        is->audio_st = ic->streams[stream_index];
2136
        is->audio_buf_size = 0;
2137
        is->audio_buf_index = 0;
2138

    
2139
        /* init averaging filter */
2140
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2141
        is->audio_diff_avg_count = 0;
2142
        /* since we do not have a precise anough audio fifo fullness,
2143
           we correct audio sync only if larger than this threshold */
2144
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2145

    
2146
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2147
        packet_queue_init(&is->audioq);
2148
        SDL_PauseAudio(0);
2149
        break;
2150
    case CODEC_TYPE_VIDEO:
2151
        is->video_stream = stream_index;
2152
        is->video_st = ic->streams[stream_index];
2153

    
2154
//        is->video_current_pts_time = av_gettime();
2155

    
2156
        packet_queue_init(&is->videoq);
2157
        is->video_tid = SDL_CreateThread(video_thread, is);
2158
        break;
2159
    case CODEC_TYPE_SUBTITLE:
2160
        is->subtitle_stream = stream_index;
2161
        is->subtitle_st = ic->streams[stream_index];
2162
        packet_queue_init(&is->subtitleq);
2163

    
2164
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2165
        break;
2166
    default:
2167
        break;
2168
    }
2169
    return 0;
2170
}
2171

    
2172
static void stream_component_close(VideoState *is, int stream_index)
2173
{
2174
    AVFormatContext *ic = is->ic;
2175
    AVCodecContext *avctx;
2176

    
2177
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2178
        return;
2179
    avctx = ic->streams[stream_index]->codec;
2180

    
2181
    switch(avctx->codec_type) {
2182
    case CODEC_TYPE_AUDIO:
2183
        packet_queue_abort(&is->audioq);
2184

    
2185
        SDL_CloseAudio();
2186

    
2187
        packet_queue_end(&is->audioq);
2188
        if (is->reformat_ctx)
2189
            av_audio_convert_free(is->reformat_ctx);
2190
        is->reformat_ctx = NULL;
2191
        break;
2192
    case CODEC_TYPE_VIDEO:
2193
        packet_queue_abort(&is->videoq);
2194

    
2195
        /* note: we also signal this mutex to make sure we deblock the
2196
           video thread in all cases */
2197
        SDL_LockMutex(is->pictq_mutex);
2198
        SDL_CondSignal(is->pictq_cond);
2199
        SDL_UnlockMutex(is->pictq_mutex);
2200

    
2201
        SDL_WaitThread(is->video_tid, NULL);
2202

    
2203
        packet_queue_end(&is->videoq);
2204
        break;
2205
    case CODEC_TYPE_SUBTITLE:
2206
        packet_queue_abort(&is->subtitleq);
2207

    
2208
        /* note: we also signal this mutex to make sure we deblock the
2209
           video thread in all cases */
2210
        SDL_LockMutex(is->subpq_mutex);
2211
        is->subtitle_stream_changed = 1;
2212

    
2213
        SDL_CondSignal(is->subpq_cond);
2214
        SDL_UnlockMutex(is->subpq_mutex);
2215

    
2216
        SDL_WaitThread(is->subtitle_tid, NULL);
2217

    
2218
        packet_queue_end(&is->subtitleq);
2219
        break;
2220
    default:
2221
        break;
2222
    }
2223

    
2224
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2225
    avcodec_close(avctx);
2226
    switch(avctx->codec_type) {
2227
    case CODEC_TYPE_AUDIO:
2228
        is->audio_st = NULL;
2229
        is->audio_stream = -1;
2230
        break;
2231
    case CODEC_TYPE_VIDEO:
2232
        is->video_st = NULL;
2233
        is->video_stream = -1;
2234
        break;
2235
    case CODEC_TYPE_SUBTITLE:
2236
        is->subtitle_st = NULL;
2237
        is->subtitle_stream = -1;
2238
        break;
2239
    default:
2240
        break;
2241
    }
2242
}
2243

    
2244
/* since we have only one decoding thread, we can use a global
2245
   variable instead of a thread local variable */
2246
static VideoState *global_video_state;
2247

    
2248
static int decode_interrupt_cb(void)
2249
{
2250
    return (global_video_state && global_video_state->abort_request);
2251
}
2252

    
2253
/* this thread gets the stream from the disk or the network */
2254
static int decode_thread(void *arg)
2255
{
2256
    VideoState *is = arg;
2257
    AVFormatContext *ic;
2258
    int err, i, ret;
2259
    int st_index[CODEC_TYPE_NB];
2260
    int st_count[CODEC_TYPE_NB]={0};
2261
    int st_best_packet_count[CODEC_TYPE_NB];
2262
    AVPacket pkt1, *pkt = &pkt1;
2263
    AVFormatParameters params, *ap = &params;
2264
    int eof=0;
2265

    
2266
    ic = avformat_alloc_context();
2267

    
2268
    memset(st_index, -1, sizeof(st_index));
2269
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2270
    is->video_stream = -1;
2271
    is->audio_stream = -1;
2272
    is->subtitle_stream = -1;
2273

    
2274
    global_video_state = is;
2275
    url_set_interrupt_cb(decode_interrupt_cb);
2276

    
2277
    memset(ap, 0, sizeof(*ap));
2278

    
2279
    ap->prealloced_context = 1;
2280
    ap->width = frame_width;
2281
    ap->height= frame_height;
2282
    ap->time_base= (AVRational){1, 25};
2283
    ap->pix_fmt = frame_pix_fmt;
2284

    
2285
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2286

    
2287
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2288
    if (err < 0) {
2289
        print_error(is->filename, err);
2290
        ret = -1;
2291
        goto fail;
2292
    }
2293
    is->ic = ic;
2294

    
2295
    if(genpts)
2296
        ic->flags |= AVFMT_FLAG_GENPTS;
2297

    
2298
    err = av_find_stream_info(ic);
2299
    if (err < 0) {
2300
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2301
        ret = -1;
2302
        goto fail;
2303
    }
2304
    if(ic->pb)
2305
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2306

    
2307
    if(seek_by_bytes<0)
2308
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2309

    
2310
    /* if seeking requested, we execute it */
2311
    if (start_time != AV_NOPTS_VALUE) {
2312
        int64_t timestamp;
2313

    
2314
        timestamp = start_time;
2315
        /* add the stream start time */
2316
        if (ic->start_time != AV_NOPTS_VALUE)
2317
            timestamp += ic->start_time;
2318
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2319
        if (ret < 0) {
2320
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2321
                    is->filename, (double)timestamp / AV_TIME_BASE);
2322
        }
2323
    }
2324

    
2325
    for(i = 0; i < ic->nb_streams; i++) {
2326
        AVStream *st= ic->streams[i];
2327
        AVCodecContext *avctx = st->codec;
2328
        ic->streams[i]->discard = AVDISCARD_ALL;
2329
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2330
            continue;
2331
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2332
            continue;
2333

    
2334
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2335
            continue;
2336
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2337

    
2338
        switch(avctx->codec_type) {
2339
        case CODEC_TYPE_AUDIO:
2340
            if (!audio_disable)
2341
                st_index[CODEC_TYPE_AUDIO] = i;
2342
            break;
2343
        case CODEC_TYPE_VIDEO:
2344
        case CODEC_TYPE_SUBTITLE:
2345
            if (!video_disable)
2346
                st_index[avctx->codec_type] = i;
2347
            break;
2348
        default:
2349
            break;
2350
        }
2351
    }
2352
    if (show_status) {
2353
        dump_format(ic, 0, is->filename, 0);
2354
    }
2355

    
2356
    /* open the streams */
2357
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2358
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2359
    }
2360

    
2361
    ret=-1;
2362
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2363
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2364
    }
2365
    if(ret<0) {
2366
        /* add the refresh timer to draw the picture */
2367
        schedule_refresh(is, 40);
2368

    
2369
        if (!display_disable)
2370
            is->show_audio = 2;
2371
    }
2372

    
2373
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2374
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2375
    }
2376

    
2377
    if (is->video_stream < 0 && is->audio_stream < 0) {
2378
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2379
        ret = -1;
2380
        goto fail;
2381
    }
2382

    
2383
    for(;;) {
2384
        if (is->abort_request)
2385
            break;
2386
        if (is->paused != is->last_paused) {
2387
            is->last_paused = is->paused;
2388
            if (is->paused)
2389
                is->read_pause_return= av_read_pause(ic);
2390
            else
2391
                av_read_play(ic);
2392
        }
2393
#if CONFIG_RTSP_DEMUXER
2394
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2395
            /* wait 10 ms to avoid trying to get another packet */
2396
            /* XXX: horrible */
2397
            SDL_Delay(10);
2398
            continue;
2399
        }
2400
#endif
2401
        if (is->seek_req) {
2402
            int64_t seek_target= is->seek_pos;
2403
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2404
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2405
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2406
//      of the seek_pos/seek_rel variables
2407

    
2408
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2409
            if (ret < 0) {
2410
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2411
            }else{
2412
                if (is->audio_stream >= 0) {
2413
                    packet_queue_flush(&is->audioq);
2414
                    packet_queue_put(&is->audioq, &flush_pkt);
2415
                }
2416
                if (is->subtitle_stream >= 0) {
2417
                    packet_queue_flush(&is->subtitleq);
2418
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2419
                }
2420
                if (is->video_stream >= 0) {
2421
                    packet_queue_flush(&is->videoq);
2422
                    packet_queue_put(&is->videoq, &flush_pkt);
2423
                }
2424
            }
2425
            is->seek_req = 0;
2426
            eof= 0;
2427
        }
2428

    
2429
        /* if the queue are full, no need to read more */
2430
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2431
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2432
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2433
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2434
            /* wait 10 ms */
2435
            SDL_Delay(10);
2436
            continue;
2437
        }
2438
        if(url_feof(ic->pb) || eof) {
2439
            if(is->video_stream >= 0){
2440
                av_init_packet(pkt);
2441
                pkt->data=NULL;
2442
                pkt->size=0;
2443
                pkt->stream_index= is->video_stream;
2444
                packet_queue_put(&is->videoq, pkt);
2445
            }
2446
            SDL_Delay(10);
2447
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2448
                ret=AVERROR_EOF;
2449
                goto fail;
2450
            }
2451
            continue;
2452
        }
2453
        ret = av_read_frame(ic, pkt);
2454
        if (ret < 0) {
2455
            if (ret == AVERROR_EOF)
2456
                eof=1;
2457
            if (url_ferror(ic->pb))
2458
                break;
2459
            SDL_Delay(100); /* wait for user event */
2460
            continue;
2461
        }
2462
        if (pkt->stream_index == is->audio_stream) {
2463
            packet_queue_put(&is->audioq, pkt);
2464
        } else if (pkt->stream_index == is->video_stream) {
2465
            packet_queue_put(&is->videoq, pkt);
2466
        } else if (pkt->stream_index == is->subtitle_stream) {
2467
            packet_queue_put(&is->subtitleq, pkt);
2468
        } else {
2469
            av_free_packet(pkt);
2470
        }
2471
    }
2472
    /* wait until the end */
2473
    while (!is->abort_request) {
2474
        SDL_Delay(100);
2475
    }
2476

    
2477
    ret = 0;
2478
 fail:
2479
    /* disable interrupting */
2480
    global_video_state = NULL;
2481

    
2482
    /* close each stream */
2483
    if (is->audio_stream >= 0)
2484
        stream_component_close(is, is->audio_stream);
2485
    if (is->video_stream >= 0)
2486
        stream_component_close(is, is->video_stream);
2487
    if (is->subtitle_stream >= 0)
2488
        stream_component_close(is, is->subtitle_stream);
2489
    if (is->ic) {
2490
        av_close_input_file(is->ic);
2491
        is->ic = NULL; /* safety */
2492
    }
2493
    url_set_interrupt_cb(NULL);
2494

    
2495
    if (ret != 0) {
2496
        SDL_Event event;
2497

    
2498
        event.type = FF_QUIT_EVENT;
2499
        event.user.data1 = is;
2500
        SDL_PushEvent(&event);
2501
    }
2502
    return 0;
2503
}
2504

    
2505
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2506
{
2507
    VideoState *is;
2508

    
2509
    is = av_mallocz(sizeof(VideoState));
2510
    if (!is)
2511
        return NULL;
2512
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2513
    is->iformat = iformat;
2514
    is->ytop = 0;
2515
    is->xleft = 0;
2516

    
2517
    /* start video display */
2518
    is->pictq_mutex = SDL_CreateMutex();
2519
    is->pictq_cond = SDL_CreateCond();
2520

    
2521
    is->subpq_mutex = SDL_CreateMutex();
2522
    is->subpq_cond = SDL_CreateCond();
2523

    
2524
    is->av_sync_type = av_sync_type;
2525
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2526
    if (!is->parse_tid) {
2527
        av_free(is);
2528
        return NULL;
2529
    }
2530
    return is;
2531
}
2532

    
2533
static void stream_close(VideoState *is)
2534
{
2535
    VideoPicture *vp;
2536
    int i;
2537
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2538
    is->abort_request = 1;
2539
    SDL_WaitThread(is->parse_tid, NULL);
2540

    
2541
    /* free all pictures */
2542
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2543
        vp = &is->pictq[i];
2544
#if CONFIG_AVFILTER
2545
        if (vp->picref) {
2546
            avfilter_unref_pic(vp->picref);
2547
            vp->picref = NULL;
2548
        }
2549
#endif
2550
        if (vp->bmp) {
2551
            SDL_FreeYUVOverlay(vp->bmp);
2552
            vp->bmp = NULL;
2553
        }
2554
    }
2555
    SDL_DestroyMutex(is->pictq_mutex);
2556
    SDL_DestroyCond(is->pictq_cond);
2557
    SDL_DestroyMutex(is->subpq_mutex);
2558
    SDL_DestroyCond(is->subpq_cond);
2559
#if !CONFIG_AVFILTER
2560
    if (is->img_convert_ctx)
2561
        sws_freeContext(is->img_convert_ctx);
2562
#endif
2563
    av_free(is);
2564
}
2565

    
2566
static void stream_cycle_channel(VideoState *is, int codec_type)
2567
{
2568
    AVFormatContext *ic = is->ic;
2569
    int start_index, stream_index;
2570
    AVStream *st;
2571

    
2572
    if (codec_type == CODEC_TYPE_VIDEO)
2573
        start_index = is->video_stream;
2574
    else if (codec_type == CODEC_TYPE_AUDIO)
2575
        start_index = is->audio_stream;
2576
    else
2577
        start_index = is->subtitle_stream;
2578
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2579
        return;
2580
    stream_index = start_index;
2581
    for(;;) {
2582
        if (++stream_index >= is->ic->nb_streams)
2583
        {
2584
            if (codec_type == CODEC_TYPE_SUBTITLE)
2585
            {
2586
                stream_index = -1;
2587
                goto the_end;
2588
            } else
2589
                stream_index = 0;
2590
        }
2591
        if (stream_index == start_index)
2592
            return;
2593
        st = ic->streams[stream_index];
2594
        if (st->codec->codec_type == codec_type) {
2595
            /* check that parameters are OK */
2596
            switch(codec_type) {
2597
            case CODEC_TYPE_AUDIO:
2598
                if (st->codec->sample_rate != 0 &&
2599
                    st->codec->channels != 0)
2600
                    goto the_end;
2601
                break;
2602
            case CODEC_TYPE_VIDEO:
2603
            case CODEC_TYPE_SUBTITLE:
2604
                goto the_end;
2605
            default:
2606
                break;
2607
            }
2608
        }
2609
    }
2610
 the_end:
2611
    stream_component_close(is, start_index);
2612
    stream_component_open(is, stream_index);
2613
}
2614

    
2615

    
2616
static void toggle_full_screen(void)
2617
{
2618
    is_full_screen = !is_full_screen;
2619
    if (!fs_screen_width) {
2620
        /* use default SDL method */
2621
//        SDL_WM_ToggleFullScreen(screen);
2622
    }
2623
    video_open(cur_stream);
2624
}
2625

    
2626
static void toggle_pause(void)
2627
{
2628
    if (cur_stream)
2629
        stream_pause(cur_stream);
2630
    step = 0;
2631
}
2632

    
2633
static void step_to_next_frame(void)
2634
{
2635
    if (cur_stream) {
2636
        /* if the stream is paused unpause it, then step */
2637
        if (cur_stream->paused)
2638
            stream_pause(cur_stream);
2639
    }
2640
    step = 1;
2641
}
2642

    
2643
static void do_exit(void)
2644
{
2645
    int i;
2646
    if (cur_stream) {
2647
        stream_close(cur_stream);
2648
        cur_stream = NULL;
2649
    }
2650
    for (i = 0; i < CODEC_TYPE_NB; i++)
2651
        av_free(avcodec_opts[i]);
2652
    av_free(avformat_opts);
2653
    av_free(sws_opts);
2654
#if CONFIG_AVFILTER
2655
    avfilter_uninit();
2656
#endif
2657
    if (show_status)
2658
        printf("\n");
2659
    SDL_Quit();
2660
    exit(0);
2661
}
2662

    
2663
static void toggle_audio_display(void)
2664
{
2665
    if (cur_stream) {
2666
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2667
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2668
        fill_rectangle(screen,
2669
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2670
                    bgcolor);
2671
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2672
    }
2673
}
2674

    
2675
/* handle an event sent by the GUI */
2676
static void event_loop(void)
2677
{
2678
    SDL_Event event;
2679
    double incr, pos, frac;
2680

    
2681
    for(;;) {
2682
        double x;
2683
        SDL_WaitEvent(&event);
2684
        switch(event.type) {
2685
        case SDL_KEYDOWN:
2686
            switch(event.key.keysym.sym) {
2687
            case SDLK_ESCAPE:
2688
            case SDLK_q:
2689
                do_exit();
2690
                break;
2691
            case SDLK_f:
2692
                toggle_full_screen();
2693
                break;
2694
            case SDLK_p:
2695
            case SDLK_SPACE:
2696
                toggle_pause();
2697
                break;
2698
            case SDLK_s: //S: Step to next frame
2699
                step_to_next_frame();
2700
                break;
2701
            case SDLK_a:
2702
                if (cur_stream)
2703
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2704
                break;
2705
            case SDLK_v:
2706
                if (cur_stream)
2707
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2708
                break;
2709
            case SDLK_t:
2710
                if (cur_stream)
2711
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2712
                break;
2713
            case SDLK_w:
2714
                toggle_audio_display();
2715
                break;
2716
            case SDLK_LEFT:
2717
                incr = -10.0;
2718
                goto do_seek;
2719
            case SDLK_RIGHT:
2720
                incr = 10.0;
2721
                goto do_seek;
2722
            case SDLK_UP:
2723
                incr = 60.0;
2724
                goto do_seek;
2725
            case SDLK_DOWN:
2726
                incr = -60.0;
2727
            do_seek:
2728
                if (cur_stream) {
2729
                    if (seek_by_bytes) {
2730
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2731
                            pos= cur_stream->video_current_pos;
2732
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2733
                            pos= cur_stream->audio_pkt.pos;
2734
                        }else
2735
                            pos = url_ftell(cur_stream->ic->pb);
2736
                        if (cur_stream->ic->bit_rate)
2737
                            incr *= cur_stream->ic->bit_rate / 8.0;
2738
                        else
2739
                            incr *= 180000.0;
2740
                        pos += incr;
2741
                        stream_seek(cur_stream, pos, incr, 1);
2742
                    } else {
2743
                        pos = get_master_clock(cur_stream);
2744
                        pos += incr;
2745
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2746
                    }
2747
                }
2748
                break;
2749
            default:
2750
                break;
2751
            }
2752
            break;
2753
        case SDL_MOUSEBUTTONDOWN:
2754
        case SDL_MOUSEMOTION:
2755
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2756
                x= event.button.x;
2757
            }else{
2758
                if(event.motion.state != SDL_PRESSED)
2759
                    break;
2760
                x= event.motion.x;
2761
            }
2762
            if (cur_stream) {
2763
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2764
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2765
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2766
                }else{
2767
                    int64_t ts;
2768
                    int ns, hh, mm, ss;
2769
                    int tns, thh, tmm, tss;
2770
                    tns = cur_stream->ic->duration/1000000LL;
2771
                    thh = tns/3600;
2772
                    tmm = (tns%3600)/60;
2773
                    tss = (tns%60);
2774
                    frac = x/cur_stream->width;
2775
                    ns = frac*tns;
2776
                    hh = ns/3600;
2777
                    mm = (ns%3600)/60;
2778
                    ss = (ns%60);
2779
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2780
                            hh, mm, ss, thh, tmm, tss);
2781
                    ts = frac*cur_stream->ic->duration;
2782
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2783
                        ts += cur_stream->ic->start_time;
2784
                    stream_seek(cur_stream, ts, 0, 0);
2785
                }
2786
            }
2787
            break;
2788
        case SDL_VIDEORESIZE:
2789
            if (cur_stream) {
2790
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2791
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2792
                screen_width = cur_stream->width = event.resize.w;
2793
                screen_height= cur_stream->height= event.resize.h;
2794
            }
2795
            break;
2796
        case SDL_QUIT:
2797
        case FF_QUIT_EVENT:
2798
            do_exit();
2799
            break;
2800
        case FF_ALLOC_EVENT:
2801
            video_open(event.user.data1);
2802
            alloc_picture(event.user.data1);
2803
            break;
2804
        case FF_REFRESH_EVENT:
2805
            video_refresh_timer(event.user.data1);
2806
            break;
2807
        default:
2808
            break;
2809
        }
2810
    }
2811
}
2812

    
2813
static void opt_frame_size(const char *arg)
2814
{
2815
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2816
        fprintf(stderr, "Incorrect frame size\n");
2817
        exit(1);
2818
    }
2819
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2820
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2821
        exit(1);
2822
    }
2823
}
2824

    
2825
static int opt_width(const char *opt, const char *arg)
2826
{
2827
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2828
    return 0;
2829
}
2830

    
2831
static int opt_height(const char *opt, const char *arg)
2832
{
2833
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2834
    return 0;
2835
}
2836

    
2837
static void opt_format(const char *arg)
2838
{
2839
    file_iformat = av_find_input_format(arg);
2840
    if (!file_iformat) {
2841
        fprintf(stderr, "Unknown input format: %s\n", arg);
2842
        exit(1);
2843
    }
2844
}
2845

    
2846
static void opt_frame_pix_fmt(const char *arg)
2847
{
2848
    frame_pix_fmt = av_get_pix_fmt(arg);
2849
}
2850

    
2851
static int opt_sync(const char *opt, const char *arg)
2852
{
2853
    if (!strcmp(arg, "audio"))
2854
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2855
    else if (!strcmp(arg, "video"))
2856
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2857
    else if (!strcmp(arg, "ext"))
2858
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2859
    else {
2860
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2861
        exit(1);
2862
    }
2863
    return 0;
2864
}
2865

    
2866
static int opt_seek(const char *opt, const char *arg)
2867
{
2868
    start_time = parse_time_or_die(opt, arg, 1);
2869
    return 0;
2870
}
2871

    
2872
static int opt_debug(const char *opt, const char *arg)
2873
{
2874
    av_log_set_level(99);
2875
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2876
    return 0;
2877
}
2878

    
2879
static int opt_vismv(const char *opt, const char *arg)
2880
{
2881
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2882
    return 0;
2883
}
2884

    
2885
static int opt_thread_count(const char *opt, const char *arg)
2886
{
2887
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2888
#if !HAVE_THREADS
2889
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2890
#endif
2891
    return 0;
2892
}
2893

    
2894
static const OptionDef options[] = {
2895
#include "cmdutils_common_opts.h"
2896
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2897
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2898
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2899
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2900
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2901
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2902
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2903
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2904
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2905
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2906
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2907
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2908
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2909
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2910
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2911
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2912
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2913
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2914
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2915
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2916
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2917
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2918
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2919
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2920
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2921
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2922
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2923
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2924
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2925
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2926
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2927
#if CONFIG_AVFILTER
2928
    { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2929
#endif
2930
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2931
    { NULL, },
2932
};
2933

    
2934
static void show_usage(void)
2935
{
2936
    printf("Simple media player\n");
2937
    printf("usage: ffplay [options] input_file\n");
2938
    printf("\n");
2939
}
2940

    
2941
static void show_help(void)
2942
{
2943
    show_usage();
2944
    show_help_options(options, "Main options:\n",
2945
                      OPT_EXPERT, 0);
2946
    show_help_options(options, "\nAdvanced options:\n",
2947
                      OPT_EXPERT, OPT_EXPERT);
2948
    printf("\nWhile playing:\n"
2949
           "q, ESC              quit\n"
2950
           "f                   toggle full screen\n"
2951
           "p, SPC              pause\n"
2952
           "a                   cycle audio channel\n"
2953
           "v                   cycle video channel\n"
2954
           "t                   cycle subtitle channel\n"
2955
           "w                   show audio waves\n"
2956
           "left/right          seek backward/forward 10 seconds\n"
2957
           "down/up             seek backward/forward 1 minute\n"
2958
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2959
           );
2960
}
2961

    
2962
static void opt_input_file(const char *filename)
2963
{
2964
    if (input_filename) {
2965
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2966
                filename, input_filename);
2967
        exit(1);
2968
    }
2969
    if (!strcmp(filename, "-"))
2970
        filename = "pipe:";
2971
    input_filename = filename;
2972
}
2973

    
2974
/* Called from the main */
2975
int main(int argc, char **argv)
2976
{
2977
    int flags, i;
2978

    
2979
    /* register all codecs, demux and protocols */
2980
    avcodec_register_all();
2981
    avdevice_register_all();
2982
#if CONFIG_AVFILTER
2983
    avfilter_register_all();
2984
#endif
2985
    av_register_all();
2986

    
2987
    for(i=0; i<CODEC_TYPE_NB; i++){
2988
        avcodec_opts[i]= avcodec_alloc_context2(i);
2989
    }
2990
    avformat_opts = avformat_alloc_context();
2991
#if !CONFIG_AVFILTER
2992
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2993
#endif
2994

    
2995
    show_banner();
2996

    
2997
    parse_options(argc, argv, options, opt_input_file);
2998

    
2999
    if (!input_filename) {
3000
        show_usage();
3001
        fprintf(stderr, "An input file must be specified\n");
3002
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3003
        exit(1);
3004
    }
3005

    
3006
    if (display_disable) {
3007
        video_disable = 1;
3008
    }
3009
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3010
#if !defined(__MINGW32__) && !defined(__APPLE__)
3011
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3012
#endif
3013
    if (SDL_Init (flags)) {
3014
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3015
        exit(1);
3016
    }
3017

    
3018
    if (!display_disable) {
3019
#if HAVE_SDL_VIDEO_SIZE
3020
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3021
        fs_screen_width = vi->current_w;
3022
        fs_screen_height = vi->current_h;
3023
#endif
3024
    }
3025

    
3026
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3027
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3028
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3029

    
3030
    av_init_packet(&flush_pkt);
3031
    flush_pkt.data= "FLUSH";
3032

    
3033
    cur_stream = stream_open(input_filename, file_iformat);
3034

    
3035
    event_loop();
3036

    
3037
    /* never returns */
3038

    
3039
    return 0;
3040
}