Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 6580d5e3

History | View | Annotate | Download (93.2 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/avfft.h"
34

    
35
#if CONFIG_AVFILTER
36
# include "libavfilter/avfilter.h"
37
# include "libavfilter/avfiltergraph.h"
38
# include "libavfilter/graphparser.h"
39
#endif
40

    
41
#include "cmdutils.h"
42

    
43
#include <SDL.h>
44
#include <SDL_thread.h>
45

    
46
#ifdef __MINGW32__
47
#undef main /* We don't want SDL to override our main() */
48
#endif
49

    
50
const char program_name[] = "FFplay";
51
const int program_birth_year = 2003;
52

    
53
//#define DEBUG_SYNC
54

    
55
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
56
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
57
#define MIN_FRAMES 5
58

    
59
/* SDL audio buffer size, in samples. Should be small to have precise
60
   A/V sync as SDL does not have hardware buffer fullness info. */
61
#define SDL_AUDIO_BUFFER_SIZE 1024
62

    
63
/* no AV sync correction is done if below the AV sync threshold */
64
#define AV_SYNC_THRESHOLD 0.01
65
/* no AV correction is done if too big error */
66
#define AV_NOSYNC_THRESHOLD 10.0
67

    
68
/* maximum audio speed change to get correct sync */
69
#define SAMPLE_CORRECTION_PERCENT_MAX 10
70

    
71
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
72
#define AUDIO_DIFF_AVG_NB   20
73

    
74
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
75
#define SAMPLE_ARRAY_SIZE (2*65536)
76

    
77
#if !CONFIG_AVFILTER
78
static int sws_flags = SWS_BICUBIC;
79
#endif
80

    
81
typedef struct PacketQueue {
82
    AVPacketList *first_pkt, *last_pkt;
83
    int nb_packets;
84
    int size;
85
    int abort_request;
86
    SDL_mutex *mutex;
87
    SDL_cond *cond;
88
} PacketQueue;
89

    
90
#define VIDEO_PICTURE_QUEUE_SIZE 1
91
#define SUBPICTURE_QUEUE_SIZE 4
92

    
93
typedef struct VideoPicture {
94
    double pts;                                  ///<presentation time stamp for this picture
95
    int64_t pos;                                 ///<byte position in file
96
    SDL_Overlay *bmp;
97
    int width, height; /* source height & width */
98
    int allocated;
99
    SDL_TimerID timer_id;
100
    enum PixelFormat pix_fmt;
101

    
102
#if CONFIG_AVFILTER
103
    AVFilterPicRef *picref;
104
#endif
105
} VideoPicture;
106

    
107
typedef struct SubPicture {
108
    double pts; /* presentation time stamp for this picture */
109
    AVSubtitle sub;
110
} SubPicture;
111

    
112
enum {
113
    AV_SYNC_AUDIO_MASTER, /* default choice */
114
    AV_SYNC_VIDEO_MASTER,
115
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
116
};
117

    
118
typedef struct VideoState {
119
    SDL_Thread *parse_tid;
120
    SDL_Thread *video_tid;
121
    AVInputFormat *iformat;
122
    int no_background;
123
    int abort_request;
124
    int paused;
125
    int last_paused;
126
    int seek_req;
127
    int seek_flags;
128
    int64_t seek_pos;
129
    int64_t seek_rel;
130
    int read_pause_return;
131
    AVFormatContext *ic;
132
    int dtg_active_format;
133

    
134
    int audio_stream;
135

    
136
    int av_sync_type;
137
    double external_clock; /* external clock base */
138
    int64_t external_clock_time;
139

    
140
    double audio_clock;
141
    double audio_diff_cum; /* used for AV difference average computation */
142
    double audio_diff_avg_coef;
143
    double audio_diff_threshold;
144
    int audio_diff_avg_count;
145
    AVStream *audio_st;
146
    PacketQueue audioq;
147
    int audio_hw_buf_size;
148
    /* samples output by the codec. we reserve more space for avsync
149
       compensation */
150
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
151
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
152
    uint8_t *audio_buf;
153
    unsigned int audio_buf_size; /* in bytes */
154
    int audio_buf_index; /* in bytes */
155
    AVPacket audio_pkt_temp;
156
    AVPacket audio_pkt;
157
    enum SampleFormat audio_src_fmt;
158
    AVAudioConvert *reformat_ctx;
159

    
160
    int show_audio; /* if true, display audio samples */
161
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
162
    int sample_array_index;
163
    int last_i_start;
164
    RDFTContext *rdft;
165
    int rdft_bits;
166
    int xpos;
167

    
168
    SDL_Thread *subtitle_tid;
169
    int subtitle_stream;
170
    int subtitle_stream_changed;
171
    AVStream *subtitle_st;
172
    PacketQueue subtitleq;
173
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
174
    int subpq_size, subpq_rindex, subpq_windex;
175
    SDL_mutex *subpq_mutex;
176
    SDL_cond *subpq_cond;
177

    
178
    double frame_timer;
179
    double frame_last_pts;
180
    double frame_last_delay;
181
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
182
    int video_stream;
183
    AVStream *video_st;
184
    PacketQueue videoq;
185
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
186
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
187
    int64_t video_current_pos;                   ///<current displayed file pos
188
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
189
    int pictq_size, pictq_rindex, pictq_windex;
190
    SDL_mutex *pictq_mutex;
191
    SDL_cond *pictq_cond;
192
#if !CONFIG_AVFILTER
193
    struct SwsContext *img_convert_ctx;
194
#endif
195

    
196
    //    QETimer *video_timer;
197
    char filename[1024];
198
    int width, height, xleft, ytop;
199

    
200
    int64_t faulty_pts;
201
    int64_t faulty_dts;
202
    int64_t last_dts_for_fault_detection;
203
    int64_t last_pts_for_fault_detection;
204

    
205
#if CONFIG_AVFILTER
206
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
207
#endif
208
} VideoState;
209

    
210
static void show_help(void);
211
static int audio_write_get_buf_size(VideoState *is);
212

    
213
/* options specified by the user */
214
static AVInputFormat *file_iformat;
215
static const char *input_filename;
216
static int fs_screen_width;
217
static int fs_screen_height;
218
static int screen_width = 0;
219
static int screen_height = 0;
220
static int frame_width = 0;
221
static int frame_height = 0;
222
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
223
static int audio_disable;
224
static int video_disable;
225
static int wanted_stream[CODEC_TYPE_NB]={
226
    [CODEC_TYPE_AUDIO]=-1,
227
    [CODEC_TYPE_VIDEO]=-1,
228
    [CODEC_TYPE_SUBTITLE]=-1,
229
};
230
static int seek_by_bytes=-1;
231
static int display_disable;
232
static int show_status = 1;
233
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
234
static int64_t start_time = AV_NOPTS_VALUE;
235
static int debug = 0;
236
static int debug_mv = 0;
237
static int step = 0;
238
static int thread_count = 1;
239
static int workaround_bugs = 1;
240
static int fast = 0;
241
static int genpts = 0;
242
static int lowres = 0;
243
static int idct = FF_IDCT_AUTO;
244
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
245
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
246
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
247
static int error_recognition = FF_ER_CAREFUL;
248
static int error_concealment = 3;
249
static int decoder_reorder_pts= -1;
250
static int autoexit;
251
#if CONFIG_AVFILTER
252
static char *vfilters = NULL;
253
#endif
254

    
255
/* current context */
256
static int is_full_screen;
257
static VideoState *cur_stream;
258
static int64_t audio_callback_time;
259

    
260
static AVPacket flush_pkt;
261

    
262
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
263
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
264
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
265

    
266
static SDL_Surface *screen;
267

    
268
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
269

    
270
/* packet queue handling */
271
static void packet_queue_init(PacketQueue *q)
272
{
273
    memset(q, 0, sizeof(PacketQueue));
274
    q->mutex = SDL_CreateMutex();
275
    q->cond = SDL_CreateCond();
276
    packet_queue_put(q, &flush_pkt);
277
}
278

    
279
static void packet_queue_flush(PacketQueue *q)
280
{
281
    AVPacketList *pkt, *pkt1;
282

    
283
    SDL_LockMutex(q->mutex);
284
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
285
        pkt1 = pkt->next;
286
        av_free_packet(&pkt->pkt);
287
        av_freep(&pkt);
288
    }
289
    q->last_pkt = NULL;
290
    q->first_pkt = NULL;
291
    q->nb_packets = 0;
292
    q->size = 0;
293
    SDL_UnlockMutex(q->mutex);
294
}
295

    
296
static void packet_queue_end(PacketQueue *q)
297
{
298
    packet_queue_flush(q);
299
    SDL_DestroyMutex(q->mutex);
300
    SDL_DestroyCond(q->cond);
301
}
302

    
303
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
304
{
305
    AVPacketList *pkt1;
306

    
307
    /* duplicate the packet */
308
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
309
        return -1;
310

    
311
    pkt1 = av_malloc(sizeof(AVPacketList));
312
    if (!pkt1)
313
        return -1;
314
    pkt1->pkt = *pkt;
315
    pkt1->next = NULL;
316

    
317

    
318
    SDL_LockMutex(q->mutex);
319

    
320
    if (!q->last_pkt)
321

    
322
        q->first_pkt = pkt1;
323
    else
324
        q->last_pkt->next = pkt1;
325
    q->last_pkt = pkt1;
326
    q->nb_packets++;
327
    q->size += pkt1->pkt.size + sizeof(*pkt1);
328
    /* XXX: should duplicate packet data in DV case */
329
    SDL_CondSignal(q->cond);
330

    
331
    SDL_UnlockMutex(q->mutex);
332
    return 0;
333
}
334

    
335
static void packet_queue_abort(PacketQueue *q)
336
{
337
    SDL_LockMutex(q->mutex);
338

    
339
    q->abort_request = 1;
340

    
341
    SDL_CondSignal(q->cond);
342

    
343
    SDL_UnlockMutex(q->mutex);
344
}
345

    
346
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
347
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
348
{
349
    AVPacketList *pkt1;
350
    int ret;
351

    
352
    SDL_LockMutex(q->mutex);
353

    
354
    for(;;) {
355
        if (q->abort_request) {
356
            ret = -1;
357
            break;
358
        }
359

    
360
        pkt1 = q->first_pkt;
361
        if (pkt1) {
362
            q->first_pkt = pkt1->next;
363
            if (!q->first_pkt)
364
                q->last_pkt = NULL;
365
            q->nb_packets--;
366
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
367
            *pkt = pkt1->pkt;
368
            av_free(pkt1);
369
            ret = 1;
370
            break;
371
        } else if (!block) {
372
            ret = 0;
373
            break;
374
        } else {
375
            SDL_CondWait(q->cond, q->mutex);
376
        }
377
    }
378
    SDL_UnlockMutex(q->mutex);
379
    return ret;
380
}
381

    
382
static inline void fill_rectangle(SDL_Surface *screen,
383
                                  int x, int y, int w, int h, int color)
384
{
385
    SDL_Rect rect;
386
    rect.x = x;
387
    rect.y = y;
388
    rect.w = w;
389
    rect.h = h;
390
    SDL_FillRect(screen, &rect, color);
391
}
392

    
393
#if 0
394
/* draw only the border of a rectangle */
395
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
396
{
397
    int w1, w2, h1, h2;
398

399
    /* fill the background */
400
    w1 = x;
401
    if (w1 < 0)
402
        w1 = 0;
403
    w2 = s->width - (x + w);
404
    if (w2 < 0)
405
        w2 = 0;
406
    h1 = y;
407
    if (h1 < 0)
408
        h1 = 0;
409
    h2 = s->height - (y + h);
410
    if (h2 < 0)
411
        h2 = 0;
412
    fill_rectangle(screen,
413
                   s->xleft, s->ytop,
414
                   w1, s->height,
415
                   color);
416
    fill_rectangle(screen,
417
                   s->xleft + s->width - w2, s->ytop,
418
                   w2, s->height,
419
                   color);
420
    fill_rectangle(screen,
421
                   s->xleft + w1, s->ytop,
422
                   s->width - w1 - w2, h1,
423
                   color);
424
    fill_rectangle(screen,
425
                   s->xleft + w1, s->ytop + s->height - h2,
426
                   s->width - w1 - w2, h2,
427
                   color);
428
}
429
#endif
430

    
431
#define ALPHA_BLEND(a, oldp, newp, s)\
432
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
433

    
434
#define RGBA_IN(r, g, b, a, s)\
435
{\
436
    unsigned int v = ((const uint32_t *)(s))[0];\
437
    a = (v >> 24) & 0xff;\
438
    r = (v >> 16) & 0xff;\
439
    g = (v >> 8) & 0xff;\
440
    b = v & 0xff;\
441
}
442

    
443
#define YUVA_IN(y, u, v, a, s, pal)\
444
{\
445
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
446
    a = (val >> 24) & 0xff;\
447
    y = (val >> 16) & 0xff;\
448
    u = (val >> 8) & 0xff;\
449
    v = val & 0xff;\
450
}
451

    
452
#define YUVA_OUT(d, y, u, v, a)\
453
{\
454
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
455
}
456

    
457

    
458
#define BPP 1
459

    
460
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
461
{
462
    int wrap, wrap3, width2, skip2;
463
    int y, u, v, a, u1, v1, a1, w, h;
464
    uint8_t *lum, *cb, *cr;
465
    const uint8_t *p;
466
    const uint32_t *pal;
467
    int dstx, dsty, dstw, dsth;
468

    
469
    dstw = av_clip(rect->w, 0, imgw);
470
    dsth = av_clip(rect->h, 0, imgh);
471
    dstx = av_clip(rect->x, 0, imgw - dstw);
472
    dsty = av_clip(rect->y, 0, imgh - dsth);
473
    lum = dst->data[0] + dsty * dst->linesize[0];
474
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
475
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
476

    
477
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
478
    skip2 = dstx >> 1;
479
    wrap = dst->linesize[0];
480
    wrap3 = rect->pict.linesize[0];
481
    p = rect->pict.data[0];
482
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
483

    
484
    if (dsty & 1) {
485
        lum += dstx;
486
        cb += skip2;
487
        cr += skip2;
488

    
489
        if (dstx & 1) {
490
            YUVA_IN(y, u, v, a, p, pal);
491
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
492
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
493
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
494
            cb++;
495
            cr++;
496
            lum++;
497
            p += BPP;
498
        }
499
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
500
            YUVA_IN(y, u, v, a, p, pal);
501
            u1 = u;
502
            v1 = v;
503
            a1 = a;
504
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
505

    
506
            YUVA_IN(y, u, v, a, p + BPP, pal);
507
            u1 += u;
508
            v1 += v;
509
            a1 += a;
510
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
511
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
512
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
513
            cb++;
514
            cr++;
515
            p += 2 * BPP;
516
            lum += 2;
517
        }
518
        if (w) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
522
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
523
            p++;
524
            lum++;
525
        }
526
        p += wrap3 - dstw * BPP;
527
        lum += wrap - dstw - dstx;
528
        cb += dst->linesize[1] - width2 - skip2;
529
        cr += dst->linesize[2] - width2 - skip2;
530
    }
531
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
532
        lum += dstx;
533
        cb += skip2;
534
        cr += skip2;
535

    
536
        if (dstx & 1) {
537
            YUVA_IN(y, u, v, a, p, pal);
538
            u1 = u;
539
            v1 = v;
540
            a1 = a;
541
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542
            p += wrap3;
543
            lum += wrap;
544
            YUVA_IN(y, u, v, a, p, pal);
545
            u1 += u;
546
            v1 += v;
547
            a1 += a;
548
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
550
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
551
            cb++;
552
            cr++;
553
            p += -wrap3 + BPP;
554
            lum += -wrap + 1;
555
        }
556
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
557
            YUVA_IN(y, u, v, a, p, pal);
558
            u1 = u;
559
            v1 = v;
560
            a1 = a;
561
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562

    
563
            YUVA_IN(y, u, v, a, p + BPP, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
568
            p += wrap3;
569
            lum += wrap;
570

    
571
            YUVA_IN(y, u, v, a, p, pal);
572
            u1 += u;
573
            v1 += v;
574
            a1 += a;
575
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576

    
577
            YUVA_IN(y, u, v, a, p + BPP, pal);
578
            u1 += u;
579
            v1 += v;
580
            a1 += a;
581
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
582

    
583
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
584
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
585

    
586
            cb++;
587
            cr++;
588
            p += -wrap3 + 2 * BPP;
589
            lum += -wrap + 2;
590
        }
591
        if (w) {
592
            YUVA_IN(y, u, v, a, p, pal);
593
            u1 = u;
594
            v1 = v;
595
            a1 = a;
596
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
            p += wrap3;
598
            lum += wrap;
599
            YUVA_IN(y, u, v, a, p, pal);
600
            u1 += u;
601
            v1 += v;
602
            a1 += a;
603
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
605
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
606
            cb++;
607
            cr++;
608
            p += -wrap3 + BPP;
609
            lum += -wrap + 1;
610
        }
611
        p += wrap3 + (wrap3 - dstw * BPP);
612
        lum += wrap + (wrap - dstw - dstx);
613
        cb += dst->linesize[1] - width2 - skip2;
614
        cr += dst->linesize[2] - width2 - skip2;
615
    }
616
    /* handle odd height */
617
    if (h) {
618
        lum += dstx;
619
        cb += skip2;
620
        cr += skip2;
621

    
622
        if (dstx & 1) {
623
            YUVA_IN(y, u, v, a, p, pal);
624
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
626
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627
            cb++;
628
            cr++;
629
            lum++;
630
            p += BPP;
631
        }
632
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
633
            YUVA_IN(y, u, v, a, p, pal);
634
            u1 = u;
635
            v1 = v;
636
            a1 = a;
637
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
638

    
639
            YUVA_IN(y, u, v, a, p + BPP, pal);
640
            u1 += u;
641
            v1 += v;
642
            a1 += a;
643
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
644
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
645
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
646
            cb++;
647
            cr++;
648
            p += 2 * BPP;
649
            lum += 2;
650
        }
651
        if (w) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
655
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
656
        }
657
    }
658
}
659

    
660
static void free_subpicture(SubPicture *sp)
661
{
662
    int i;
663

    
664
    for (i = 0; i < sp->sub.num_rects; i++)
665
    {
666
        av_freep(&sp->sub.rects[i]->pict.data[0]);
667
        av_freep(&sp->sub.rects[i]->pict.data[1]);
668
        av_freep(&sp->sub.rects[i]);
669
    }
670

    
671
    av_free(sp->sub.rects);
672

    
673
    memset(&sp->sub, 0, sizeof(AVSubtitle));
674
}
675

    
676
static void video_image_display(VideoState *is)
677
{
678
    VideoPicture *vp;
679
    SubPicture *sp;
680
    AVPicture pict;
681
    float aspect_ratio;
682
    int width, height, x, y;
683
    SDL_Rect rect;
684
    int i;
685

    
686
    vp = &is->pictq[is->pictq_rindex];
687
    if (vp->bmp) {
688
#if CONFIG_AVFILTER
689
         if (vp->picref->pixel_aspect.num == 0)
690
             aspect_ratio = 0;
691
         else
692
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
693
#else
694

    
695
        /* XXX: use variable in the frame */
696
        if (is->video_st->sample_aspect_ratio.num)
697
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
698
        else if (is->video_st->codec->sample_aspect_ratio.num)
699
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
700
        else
701
            aspect_ratio = 0;
702
#endif
703
        if (aspect_ratio <= 0.0)
704
            aspect_ratio = 1.0;
705
        aspect_ratio *= (float)vp->width / (float)vp->height;
706
        /* if an active format is indicated, then it overrides the
707
           mpeg format */
708
#if 0
709
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
710
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
711
            printf("dtg_active_format=%d\n", is->dtg_active_format);
712
        }
713
#endif
714
#if 0
715
        switch(is->video_st->codec->dtg_active_format) {
716
        case FF_DTG_AFD_SAME:
717
        default:
718
            /* nothing to do */
719
            break;
720
        case FF_DTG_AFD_4_3:
721
            aspect_ratio = 4.0 / 3.0;
722
            break;
723
        case FF_DTG_AFD_16_9:
724
            aspect_ratio = 16.0 / 9.0;
725
            break;
726
        case FF_DTG_AFD_14_9:
727
            aspect_ratio = 14.0 / 9.0;
728
            break;
729
        case FF_DTG_AFD_4_3_SP_14_9:
730
            aspect_ratio = 14.0 / 9.0;
731
            break;
732
        case FF_DTG_AFD_16_9_SP_14_9:
733
            aspect_ratio = 14.0 / 9.0;
734
            break;
735
        case FF_DTG_AFD_SP_4_3:
736
            aspect_ratio = 4.0 / 3.0;
737
            break;
738
        }
739
#endif
740

    
741
        if (is->subtitle_st)
742
        {
743
            if (is->subpq_size > 0)
744
            {
745
                sp = &is->subpq[is->subpq_rindex];
746

    
747
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
748
                {
749
                    SDL_LockYUVOverlay (vp->bmp);
750

    
751
                    pict.data[0] = vp->bmp->pixels[0];
752
                    pict.data[1] = vp->bmp->pixels[2];
753
                    pict.data[2] = vp->bmp->pixels[1];
754

    
755
                    pict.linesize[0] = vp->bmp->pitches[0];
756
                    pict.linesize[1] = vp->bmp->pitches[2];
757
                    pict.linesize[2] = vp->bmp->pitches[1];
758

    
759
                    for (i = 0; i < sp->sub.num_rects; i++)
760
                        blend_subrect(&pict, sp->sub.rects[i],
761
                                      vp->bmp->w, vp->bmp->h);
762

    
763
                    SDL_UnlockYUVOverlay (vp->bmp);
764
                }
765
            }
766
        }
767

    
768

    
769
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
770
        height = is->height;
771
        width = ((int)rint(height * aspect_ratio)) & ~1;
772
        if (width > is->width) {
773
            width = is->width;
774
            height = ((int)rint(width / aspect_ratio)) & ~1;
775
        }
776
        x = (is->width - width) / 2;
777
        y = (is->height - height) / 2;
778
        if (!is->no_background) {
779
            /* fill the background */
780
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
781
        } else {
782
            is->no_background = 0;
783
        }
784
        rect.x = is->xleft + x;
785
        rect.y = is->ytop  + y;
786
        rect.w = width;
787
        rect.h = height;
788
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
789
    } else {
790
#if 0
791
        fill_rectangle(screen,
792
                       is->xleft, is->ytop, is->width, is->height,
793
                       QERGB(0x00, 0x00, 0x00));
794
#endif
795
    }
796
}
797

    
798
static inline int compute_mod(int a, int b)
799
{
800
    a = a % b;
801
    if (a >= 0)
802
        return a;
803
    else
804
        return a + b;
805
}
806

    
807
static void video_audio_display(VideoState *s)
808
{
809
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
810
    int ch, channels, h, h2, bgcolor, fgcolor;
811
    int16_t time_diff;
812
    int rdft_bits, nb_freq;
813

    
814
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
815
        ;
816
    nb_freq= 1<<(rdft_bits-1);
817

    
818
    /* compute display index : center on currently output samples */
819
    channels = s->audio_st->codec->channels;
820
    nb_display_channels = channels;
821
    if (!s->paused) {
822
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
823
        n = 2 * channels;
824
        delay = audio_write_get_buf_size(s);
825
        delay /= n;
826

    
827
        /* to be more precise, we take into account the time spent since
828
           the last buffer computation */
829
        if (audio_callback_time) {
830
            time_diff = av_gettime() - audio_callback_time;
831
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
832
        }
833

    
834
        delay -= data_used / 2;
835
        if (delay < data_used)
836
            delay = data_used;
837

    
838
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
839
        if(s->show_audio==1){
840
            h= INT_MIN;
841
            for(i=0; i<1000; i+=channels){
842
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
843
                int a= s->sample_array[idx];
844
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
845
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
846
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
847
                int score= a-d;
848
                if(h<score && (b^c)<0){
849
                    h= score;
850
                    i_start= idx;
851
                }
852
            }
853
        }
854

    
855
        s->last_i_start = i_start;
856
    } else {
857
        i_start = s->last_i_start;
858
    }
859

    
860
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
861
    if(s->show_audio==1){
862
        fill_rectangle(screen,
863
                       s->xleft, s->ytop, s->width, s->height,
864
                       bgcolor);
865

    
866
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
867

    
868
        /* total height for one channel */
869
        h = s->height / nb_display_channels;
870
        /* graph height / 2 */
871
        h2 = (h * 9) / 20;
872
        for(ch = 0;ch < nb_display_channels; ch++) {
873
            i = i_start + ch;
874
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
875
            for(x = 0; x < s->width; x++) {
876
                y = (s->sample_array[i] * h2) >> 15;
877
                if (y < 0) {
878
                    y = -y;
879
                    ys = y1 - y;
880
                } else {
881
                    ys = y1;
882
                }
883
                fill_rectangle(screen,
884
                               s->xleft + x, ys, 1, y,
885
                               fgcolor);
886
                i += channels;
887
                if (i >= SAMPLE_ARRAY_SIZE)
888
                    i -= SAMPLE_ARRAY_SIZE;
889
            }
890
        }
891

    
892
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
893

    
894
        for(ch = 1;ch < nb_display_channels; ch++) {
895
            y = s->ytop + ch * h;
896
            fill_rectangle(screen,
897
                           s->xleft, y, s->width, 1,
898
                           fgcolor);
899
        }
900
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
901
    }else{
902
        nb_display_channels= FFMIN(nb_display_channels, 2);
903
        if(rdft_bits != s->rdft_bits){
904
            av_rdft_end(s->rdft);
905
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
906
            s->rdft_bits= rdft_bits;
907
        }
908
        {
909
            FFTSample data[2][2*nb_freq];
910
            for(ch = 0;ch < nb_display_channels; ch++) {
911
                i = i_start + ch;
912
                for(x = 0; x < 2*nb_freq; x++) {
913
                    double w= (x-nb_freq)*(1.0/nb_freq);
914
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
915
                    i += channels;
916
                    if (i >= SAMPLE_ARRAY_SIZE)
917
                        i -= SAMPLE_ARRAY_SIZE;
918
                }
919
                av_rdft_calc(s->rdft, data[ch]);
920
            }
921
            //least efficient way to do this, we should of course directly access it but its more than fast enough
922
            for(y=0; y<s->height; y++){
923
                double w= 1/sqrt(nb_freq);
924
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
925
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
926
                a= FFMIN(a,255);
927
                b= FFMIN(b,255);
928
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
929

    
930
                fill_rectangle(screen,
931
                            s->xpos, s->height-y, 1, 1,
932
                            fgcolor);
933
            }
934
        }
935
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
936
        s->xpos++;
937
        if(s->xpos >= s->width)
938
            s->xpos= s->xleft;
939
    }
940
}
941

    
942
static int video_open(VideoState *is){
943
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
944
    int w,h;
945

    
946
    if(is_full_screen) flags |= SDL_FULLSCREEN;
947
    else               flags |= SDL_RESIZABLE;
948

    
949
    if (is_full_screen && fs_screen_width) {
950
        w = fs_screen_width;
951
        h = fs_screen_height;
952
    } else if(!is_full_screen && screen_width){
953
        w = screen_width;
954
        h = screen_height;
955
#if CONFIG_AVFILTER
956
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
957
        w = is->out_video_filter->inputs[0]->w;
958
        h = is->out_video_filter->inputs[0]->h;
959
#else
960
    }else if (is->video_st && is->video_st->codec->width){
961
        w = is->video_st->codec->width;
962
        h = is->video_st->codec->height;
963
#endif
964
    } else {
965
        w = 640;
966
        h = 480;
967
    }
968
#ifndef __APPLE__
969
    screen = SDL_SetVideoMode(w, h, 0, flags);
970
#else
971
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
972
    screen = SDL_SetVideoMode(w, h, 24, flags);
973
#endif
974
    if (!screen) {
975
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
976
        return -1;
977
    }
978
    SDL_WM_SetCaption("FFplay", "FFplay");
979

    
980
    is->width = screen->w;
981
    is->height = screen->h;
982

    
983
    return 0;
984
}
985

    
986
/* display the current picture, if any */
987
static void video_display(VideoState *is)
988
{
989
    if(!screen)
990
        video_open(cur_stream);
991
    if (is->audio_st && is->show_audio)
992
        video_audio_display(is);
993
    else if (is->video_st)
994
        video_image_display(is);
995
}
996

    
997
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
998
{
999
    SDL_Event event;
1000
    event.type = FF_REFRESH_EVENT;
1001
    event.user.data1 = opaque;
1002
    SDL_PushEvent(&event);
1003
    return 0; /* 0 means stop timer */
1004
}
1005

    
1006
/* schedule a video refresh in 'delay' ms */
1007
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
1008
{
1009
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
1010
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
1011
}
1012

    
1013
/* get the current audio clock value */
1014
static double get_audio_clock(VideoState *is)
1015
{
1016
    double pts;
1017
    int hw_buf_size, bytes_per_sec;
1018
    pts = is->audio_clock;
1019
    hw_buf_size = audio_write_get_buf_size(is);
1020
    bytes_per_sec = 0;
1021
    if (is->audio_st) {
1022
        bytes_per_sec = is->audio_st->codec->sample_rate *
1023
            2 * is->audio_st->codec->channels;
1024
    }
1025
    if (bytes_per_sec)
1026
        pts -= (double)hw_buf_size / bytes_per_sec;
1027
    return pts;
1028
}
1029

    
1030
/* get the current video clock value */
1031
static double get_video_clock(VideoState *is)
1032
{
1033
    if (is->paused) {
1034
        return is->video_current_pts;
1035
    } else {
1036
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1037
    }
1038
}
1039

    
1040
/* get the current external clock value */
1041
static double get_external_clock(VideoState *is)
1042
{
1043
    int64_t ti;
1044
    ti = av_gettime();
1045
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1046
}
1047

    
1048
/* get the current master clock value */
1049
static double get_master_clock(VideoState *is)
1050
{
1051
    double val;
1052

    
1053
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1054
        if (is->video_st)
1055
            val = get_video_clock(is);
1056
        else
1057
            val = get_audio_clock(is);
1058
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1059
        if (is->audio_st)
1060
            val = get_audio_clock(is);
1061
        else
1062
            val = get_video_clock(is);
1063
    } else {
1064
        val = get_external_clock(is);
1065
    }
1066
    return val;
1067
}
1068

    
1069
/* seek in the stream */
1070
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1071
{
1072
    if (!is->seek_req) {
1073
        is->seek_pos = pos;
1074
        is->seek_rel = rel;
1075
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1076
        if (seek_by_bytes)
1077
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1078
        is->seek_req = 1;
1079
    }
1080
}
1081

    
1082
/* pause or resume the video */
1083
static void stream_pause(VideoState *is)
1084
{
1085
    if (is->paused) {
1086
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1087
        if(is->read_pause_return != AVERROR(ENOSYS)){
1088
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1089
        }
1090
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1091
    }
1092
    is->paused = !is->paused;
1093
}
1094

    
1095
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1096
{
1097
    double actual_delay, delay, sync_threshold, diff;
1098

    
1099
    /* compute nominal delay */
1100
    delay = frame_current_pts - is->frame_last_pts;
1101
    if (delay <= 0 || delay >= 10.0) {
1102
        /* if incorrect delay, use previous one */
1103
        delay = is->frame_last_delay;
1104
    } else {
1105
        is->frame_last_delay = delay;
1106
    }
1107
    is->frame_last_pts = frame_current_pts;
1108

    
1109
    /* update delay to follow master synchronisation source */
1110
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1111
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1112
        /* if video is slave, we try to correct big delays by
1113
           duplicating or deleting a frame */
1114
        diff = get_video_clock(is) - get_master_clock(is);
1115

    
1116
        /* skip or repeat frame. We take into account the
1117
           delay to compute the threshold. I still don't know
1118
           if it is the best guess */
1119
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1120
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1121
            if (diff <= -sync_threshold)
1122
                delay = 0;
1123
            else if (diff >= sync_threshold)
1124
                delay = 2 * delay;
1125
        }
1126
    }
1127

    
1128
    is->frame_timer += delay;
1129
    /* compute the REAL delay (we need to do that to avoid
1130
       long term errors */
1131
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1132
    if (actual_delay < 0.010) {
1133
        /* XXX: should skip picture */
1134
        actual_delay = 0.010;
1135
    }
1136

    
1137
#if defined(DEBUG_SYNC)
1138
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1139
            delay, actual_delay, frame_current_pts, -diff);
1140
#endif
1141

    
1142
    return actual_delay;
1143
}
1144

    
1145
/* called to display each frame */
1146
static void video_refresh_timer(void *opaque)
1147
{
1148
    VideoState *is = opaque;
1149
    VideoPicture *vp;
1150

    
1151
    SubPicture *sp, *sp2;
1152

    
1153
    if (is->video_st) {
1154
        if (is->pictq_size == 0) {
1155
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1156
        } else {
1157
            /* dequeue the picture */
1158
            vp = &is->pictq[is->pictq_rindex];
1159

    
1160
            /* update current video pts */
1161
            is->video_current_pts = vp->pts;
1162
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1163
            is->video_current_pos = vp->pos;
1164

    
1165
            if(is->subtitle_st) {
1166
                if (is->subtitle_stream_changed) {
1167
                    SDL_LockMutex(is->subpq_mutex);
1168

    
1169
                    while (is->subpq_size) {
1170
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1171

    
1172
                        /* update queue size and signal for next picture */
1173
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1174
                            is->subpq_rindex = 0;
1175

    
1176
                        is->subpq_size--;
1177
                    }
1178
                    is->subtitle_stream_changed = 0;
1179

    
1180
                    SDL_CondSignal(is->subpq_cond);
1181
                    SDL_UnlockMutex(is->subpq_mutex);
1182
                } else {
1183
                    if (is->subpq_size > 0) {
1184
                        sp = &is->subpq[is->subpq_rindex];
1185

    
1186
                        if (is->subpq_size > 1)
1187
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1188
                        else
1189
                            sp2 = NULL;
1190

    
1191
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1192
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1193
                        {
1194
                            free_subpicture(sp);
1195

    
1196
                            /* update queue size and signal for next picture */
1197
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1198
                                is->subpq_rindex = 0;
1199

    
1200
                            SDL_LockMutex(is->subpq_mutex);
1201
                            is->subpq_size--;
1202
                            SDL_CondSignal(is->subpq_cond);
1203
                            SDL_UnlockMutex(is->subpq_mutex);
1204
                        }
1205
                    }
1206
                }
1207
            }
1208

    
1209
            /* display picture */
1210
            video_display(is);
1211

    
1212
            /* update queue size and signal for next picture */
1213
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1214
                is->pictq_rindex = 0;
1215

    
1216
            SDL_LockMutex(is->pictq_mutex);
1217
            vp->timer_id= 0;
1218
            is->pictq_size--;
1219
            SDL_CondSignal(is->pictq_cond);
1220
            SDL_UnlockMutex(is->pictq_mutex);
1221
        }
1222
    } else if (is->audio_st) {
1223
        /* draw the next audio frame */
1224

    
1225
        schedule_refresh(is, 40);
1226

    
1227
        /* if only audio stream, then display the audio bars (better
1228
           than nothing, just to test the implementation */
1229

    
1230
        /* display picture */
1231
        video_display(is);
1232
    } else {
1233
        schedule_refresh(is, 100);
1234
    }
1235
    if (show_status) {
1236
        static int64_t last_time;
1237
        int64_t cur_time;
1238
        int aqsize, vqsize, sqsize;
1239
        double av_diff;
1240

    
1241
        cur_time = av_gettime();
1242
        if (!last_time || (cur_time - last_time) >= 30000) {
1243
            aqsize = 0;
1244
            vqsize = 0;
1245
            sqsize = 0;
1246
            if (is->audio_st)
1247
                aqsize = is->audioq.size;
1248
            if (is->video_st)
1249
                vqsize = is->videoq.size;
1250
            if (is->subtitle_st)
1251
                sqsize = is->subtitleq.size;
1252
            av_diff = 0;
1253
            if (is->audio_st && is->video_st)
1254
                av_diff = get_audio_clock(is) - get_video_clock(is);
1255
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1256
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1257
            fflush(stdout);
1258
            last_time = cur_time;
1259
        }
1260
    }
1261
}
1262

    
1263
/* allocate a picture (needs to do that in main thread to avoid
1264
   potential locking problems */
1265
static void alloc_picture(void *opaque)
1266
{
1267
    VideoState *is = opaque;
1268
    VideoPicture *vp;
1269

    
1270
    vp = &is->pictq[is->pictq_windex];
1271

    
1272
    if (vp->bmp)
1273
        SDL_FreeYUVOverlay(vp->bmp);
1274

    
1275
#if CONFIG_AVFILTER
1276
    if (vp->picref)
1277
        avfilter_unref_pic(vp->picref);
1278
    vp->picref = NULL;
1279

    
1280
    vp->width   = is->out_video_filter->inputs[0]->w;
1281
    vp->height  = is->out_video_filter->inputs[0]->h;
1282
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1283
#else
1284
    vp->width   = is->video_st->codec->width;
1285
    vp->height  = is->video_st->codec->height;
1286
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1287
#endif
1288

    
1289
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1290
                                   SDL_YV12_OVERLAY,
1291
                                   screen);
1292

    
1293
    SDL_LockMutex(is->pictq_mutex);
1294
    vp->allocated = 1;
1295
    SDL_CondSignal(is->pictq_cond);
1296
    SDL_UnlockMutex(is->pictq_mutex);
1297
}
1298

    
1299
/**
1300
 *
1301
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1302
 */
1303
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1304
{
1305
    VideoPicture *vp;
1306
    int dst_pix_fmt;
1307
#if CONFIG_AVFILTER
1308
    AVPicture pict_src;
1309
#endif
1310
    /* wait until we have space to put a new picture */
1311
    SDL_LockMutex(is->pictq_mutex);
1312
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1313
           !is->videoq.abort_request) {
1314
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1315
    }
1316
    SDL_UnlockMutex(is->pictq_mutex);
1317

    
1318
    if (is->videoq.abort_request)
1319
        return -1;
1320

    
1321
    vp = &is->pictq[is->pictq_windex];
1322

    
1323
    /* alloc or resize hardware picture buffer */
1324
    if (!vp->bmp ||
1325
#if CONFIG_AVFILTER
1326
        vp->width  != is->out_video_filter->inputs[0]->w ||
1327
        vp->height != is->out_video_filter->inputs[0]->h) {
1328
#else
1329
        vp->width != is->video_st->codec->width ||
1330
        vp->height != is->video_st->codec->height) {
1331
#endif
1332
        SDL_Event event;
1333

    
1334
        vp->allocated = 0;
1335

    
1336
        /* the allocation must be done in the main thread to avoid
1337
           locking problems */
1338
        event.type = FF_ALLOC_EVENT;
1339
        event.user.data1 = is;
1340
        SDL_PushEvent(&event);
1341

    
1342
        /* wait until the picture is allocated */
1343
        SDL_LockMutex(is->pictq_mutex);
1344
        while (!vp->allocated && !is->videoq.abort_request) {
1345
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1346
        }
1347
        SDL_UnlockMutex(is->pictq_mutex);
1348

    
1349
        if (is->videoq.abort_request)
1350
            return -1;
1351
    }
1352

    
1353
    /* if the frame is not skipped, then display it */
1354
    if (vp->bmp) {
1355
        AVPicture pict;
1356
#if CONFIG_AVFILTER
1357
        if(vp->picref)
1358
            avfilter_unref_pic(vp->picref);
1359
        vp->picref = src_frame->opaque;
1360
#endif
1361

    
1362
        /* get a pointer on the bitmap */
1363
        SDL_LockYUVOverlay (vp->bmp);
1364

    
1365
        dst_pix_fmt = PIX_FMT_YUV420P;
1366
        memset(&pict,0,sizeof(AVPicture));
1367
        pict.data[0] = vp->bmp->pixels[0];
1368
        pict.data[1] = vp->bmp->pixels[2];
1369
        pict.data[2] = vp->bmp->pixels[1];
1370

    
1371
        pict.linesize[0] = vp->bmp->pitches[0];
1372
        pict.linesize[1] = vp->bmp->pitches[2];
1373
        pict.linesize[2] = vp->bmp->pitches[1];
1374

    
1375
#if CONFIG_AVFILTER
1376
        pict_src.data[0] = src_frame->data[0];
1377
        pict_src.data[1] = src_frame->data[1];
1378
        pict_src.data[2] = src_frame->data[2];
1379

    
1380
        pict_src.linesize[0] = src_frame->linesize[0];
1381
        pict_src.linesize[1] = src_frame->linesize[1];
1382
        pict_src.linesize[2] = src_frame->linesize[2];
1383

    
1384
        //FIXME use direct rendering
1385
        av_picture_copy(&pict, &pict_src,
1386
                        vp->pix_fmt, vp->width, vp->height);
1387
#else
1388
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1389
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1390
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1391
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1392
        if (is->img_convert_ctx == NULL) {
1393
            fprintf(stderr, "Cannot initialize the conversion context\n");
1394
            exit(1);
1395
        }
1396
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1397
                  0, vp->height, pict.data, pict.linesize);
1398
#endif
1399
        /* update the bitmap content */
1400
        SDL_UnlockYUVOverlay(vp->bmp);
1401

    
1402
        vp->pts = pts;
1403
        vp->pos = pos;
1404

    
1405
        /* now we can update the picture count */
1406
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1407
            is->pictq_windex = 0;
1408
        SDL_LockMutex(is->pictq_mutex);
1409
        is->pictq_size++;
1410
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1411
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1412
        SDL_UnlockMutex(is->pictq_mutex);
1413
    }
1414
    return 0;
1415
}
1416

    
1417
/**
1418
 * compute the exact PTS for the picture if it is omitted in the stream
1419
 * @param pts1 the dts of the pkt / pts of the frame
1420
 */
1421
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1422
{
1423
    double frame_delay, pts;
1424

    
1425
    pts = pts1;
1426

    
1427
    if (pts != 0) {
1428
        /* update video clock with pts, if present */
1429
        is->video_clock = pts;
1430
    } else {
1431
        pts = is->video_clock;
1432
    }
1433
    /* update video clock for next frame */
1434
    frame_delay = av_q2d(is->video_st->codec->time_base);
1435
    /* for MPEG2, the frame can be repeated, so we update the
1436
       clock accordingly */
1437
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1438
    is->video_clock += frame_delay;
1439

    
1440
#if defined(DEBUG_SYNC) && 0
1441
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1442
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1443
#endif
1444
    return queue_picture(is, src_frame, pts, pos);
1445
}
1446

    
1447
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1448
{
1449
    int len1, got_picture, i;
1450

    
1451
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1452
            return -1;
1453

    
1454
        if(pkt->data == flush_pkt.data){
1455
            avcodec_flush_buffers(is->video_st->codec);
1456

    
1457
            SDL_LockMutex(is->pictq_mutex);
1458
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1459
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1460
                if(is->pictq[i].timer_id){
1461
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1462
                    is->pictq[i].timer_id=0;
1463
                    schedule_refresh(is, 1);
1464
                }
1465
            }
1466
            while (is->pictq_size && !is->videoq.abort_request) {
1467
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1468
            }
1469
            is->video_current_pos= -1;
1470
            SDL_UnlockMutex(is->pictq_mutex);
1471

    
1472
            is->last_dts_for_fault_detection=
1473
            is->last_pts_for_fault_detection= INT64_MIN;
1474
            is->frame_last_pts= AV_NOPTS_VALUE;
1475
            is->frame_last_delay = 0;
1476
            is->frame_timer = (double)av_gettime() / 1000000.0;
1477

    
1478
            return 0;
1479
        }
1480

    
1481
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1482
           this packet, if any */
1483
        is->video_st->codec->reordered_opaque= pkt->pts;
1484
        len1 = avcodec_decode_video2(is->video_st->codec,
1485
                                    frame, &got_picture,
1486
                                    pkt);
1487

    
1488
        if (got_picture) {
1489
            if(pkt->dts != AV_NOPTS_VALUE){
1490
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1491
                is->last_dts_for_fault_detection= pkt->dts;
1492
            }
1493
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1494
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1495
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1496
            }
1497
        }
1498

    
1499
        if(   (   decoder_reorder_pts==1
1500
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1501
               || pkt->dts == AV_NOPTS_VALUE)
1502
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1503
            *pts= frame->reordered_opaque;
1504
        else if(pkt->dts != AV_NOPTS_VALUE)
1505
            *pts= pkt->dts;
1506
        else
1507
            *pts= 0;
1508

    
1509
//            if (len1 < 0)
1510
//                break;
1511
    if (got_picture)
1512
        return 1;
1513
    return 0;
1514
}
1515

    
1516
#if CONFIG_AVFILTER
1517
typedef struct {
1518
    VideoState *is;
1519
    AVFrame *frame;
1520
} FilterPriv;
1521

    
1522
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1523
{
1524
    FilterPriv *priv = ctx->priv;
1525
    if(!opaque) return -1;
1526

    
1527
    priv->is = opaque;
1528
    priv->frame = avcodec_alloc_frame();
1529

    
1530
    return 0;
1531
}
1532

    
1533
static void input_uninit(AVFilterContext *ctx)
1534
{
1535
    FilterPriv *priv = ctx->priv;
1536
    av_free(priv->frame);
1537
}
1538

    
1539
static int input_request_frame(AVFilterLink *link)
1540
{
1541
    FilterPriv *priv = link->src->priv;
1542
    AVFilterPicRef *picref;
1543
    int64_t pts = 0;
1544
    AVPacket pkt;
1545
    int ret;
1546

    
1547
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1548
        av_free_packet(&pkt);
1549
    if (ret < 0)
1550
        return -1;
1551

    
1552
    /* FIXME: until I figure out how to hook everything up to the codec
1553
     * right, we're just copying the entire frame. */
1554
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1555
    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1556
                    picref->pic->format, link->w, link->h);
1557
    av_free_packet(&pkt);
1558

    
1559
    picref->pts = pts;
1560
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1561
    avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1562
    avfilter_draw_slice(link, 0, link->h, 1);
1563
    avfilter_end_frame(link);
1564
    avfilter_unref_pic(picref);
1565

    
1566
    return 0;
1567
}
1568

    
1569
static int input_query_formats(AVFilterContext *ctx)
1570
{
1571
    FilterPriv *priv = ctx->priv;
1572
    enum PixelFormat pix_fmts[] = {
1573
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1574
    };
1575

    
1576
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1577
    return 0;
1578
}
1579

    
1580
static int input_config_props(AVFilterLink *link)
1581
{
1582
    FilterPriv *priv  = link->src->priv;
1583
    AVCodecContext *c = priv->is->video_st->codec;
1584

    
1585
    link->w = c->width;
1586
    link->h = c->height;
1587

    
1588
    return 0;
1589
}
1590

    
1591
static AVFilter input_filter =
1592
{
1593
    .name      = "ffplay_input",
1594

    
1595
    .priv_size = sizeof(FilterPriv),
1596

    
1597
    .init      = input_init,
1598
    .uninit    = input_uninit,
1599

    
1600
    .query_formats = input_query_formats,
1601

    
1602
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1603
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1604
                                    .type = CODEC_TYPE_VIDEO,
1605
                                    .request_frame = input_request_frame,
1606
                                    .config_props  = input_config_props, },
1607
                                  { .name = NULL }},
1608
};
1609

    
1610
static void output_end_frame(AVFilterLink *link)
1611
{
1612
}
1613

    
1614
static int output_query_formats(AVFilterContext *ctx)
1615
{
1616
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1617

    
1618
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1619
    return 0;
1620
}
1621

    
1622
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1623
                                    int64_t *pts)
1624
{
1625
    AVFilterPicRef *pic;
1626

    
1627
    if(avfilter_request_frame(ctx->inputs[0]))
1628
        return -1;
1629
    if(!(pic = ctx->inputs[0]->cur_pic))
1630
        return -1;
1631
    ctx->inputs[0]->cur_pic = NULL;
1632

    
1633
    frame->opaque = pic;
1634
    *pts          = pic->pts;
1635

    
1636
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1637
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1638

    
1639
    return 1;
1640
}
1641

    
1642
static AVFilter output_filter =
1643
{
1644
    .name      = "ffplay_output",
1645

    
1646
    .query_formats = output_query_formats,
1647

    
1648
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1649
                                    .type          = CODEC_TYPE_VIDEO,
1650
                                    .end_frame     = output_end_frame,
1651
                                    .min_perms     = AV_PERM_READ, },
1652
                                  { .name = NULL }},
1653
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1654
};
1655
#endif  /* CONFIG_AVFILTER */
1656

    
1657
static int video_thread(void *arg)
1658
{
1659
    VideoState *is = arg;
1660
    AVFrame *frame= avcodec_alloc_frame();
1661
    int64_t pts_int;
1662
    double pts;
1663
    int ret;
1664

    
1665
#if CONFIG_AVFILTER
1666
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1667
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1668
    graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1669

    
1670
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1671
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1672

    
1673
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1674
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1675

    
1676

    
1677
    if(vfilters) {
1678
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1679
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1680

    
1681
        outputs->name    = av_strdup("in");
1682
        outputs->filter  = filt_src;
1683
        outputs->pad_idx = 0;
1684
        outputs->next    = NULL;
1685

    
1686
        inputs->name    = av_strdup("out");
1687
        inputs->filter  = filt_out;
1688
        inputs->pad_idx = 0;
1689
        inputs->next    = NULL;
1690

    
1691
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1692
            goto the_end;
1693
        av_freep(&vfilters);
1694
    } else {
1695
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1696
    }
1697
    avfilter_graph_add_filter(graph, filt_src);
1698
    avfilter_graph_add_filter(graph, filt_out);
1699

    
1700
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1701
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1702
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1703

    
1704
    is->out_video_filter = filt_out;
1705
#endif
1706

    
1707
    for(;;) {
1708
#if !CONFIG_AVFILTER
1709
        AVPacket pkt;
1710
#endif
1711
        while (is->paused && !is->videoq.abort_request)
1712
            SDL_Delay(10);
1713
#if CONFIG_AVFILTER
1714
        ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1715
#else
1716
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1717
#endif
1718

    
1719
        if (ret < 0) goto the_end;
1720

    
1721
        if (!ret)
1722
            continue;
1723

    
1724
        pts = pts_int*av_q2d(is->video_st->time_base);
1725

    
1726
#if CONFIG_AVFILTER
1727
        ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1728
#else
1729
        ret = output_picture2(is, frame, pts,  pkt.pos);
1730
        av_free_packet(&pkt);
1731
#endif
1732
        if (ret < 0)
1733
            goto the_end;
1734

    
1735
        if (step)
1736
            if (cur_stream)
1737
                stream_pause(cur_stream);
1738
    }
1739
 the_end:
1740
#if CONFIG_AVFILTER
1741
    avfilter_graph_destroy(graph);
1742
    av_freep(&graph);
1743
#endif
1744
    av_free(frame);
1745
    return 0;
1746
}
1747

    
1748
static int subtitle_thread(void *arg)
1749
{
1750
    VideoState *is = arg;
1751
    SubPicture *sp;
1752
    AVPacket pkt1, *pkt = &pkt1;
1753
    int len1, got_subtitle;
1754
    double pts;
1755
    int i, j;
1756
    int r, g, b, y, u, v, a;
1757

    
1758
    for(;;) {
1759
        while (is->paused && !is->subtitleq.abort_request) {
1760
            SDL_Delay(10);
1761
        }
1762
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1763
            break;
1764

    
1765
        if(pkt->data == flush_pkt.data){
1766
            avcodec_flush_buffers(is->subtitle_st->codec);
1767
            continue;
1768
        }
1769
        SDL_LockMutex(is->subpq_mutex);
1770
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1771
               !is->subtitleq.abort_request) {
1772
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1773
        }
1774
        SDL_UnlockMutex(is->subpq_mutex);
1775

    
1776
        if (is->subtitleq.abort_request)
1777
            goto the_end;
1778

    
1779
        sp = &is->subpq[is->subpq_windex];
1780

    
1781
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1782
           this packet, if any */
1783
        pts = 0;
1784
        if (pkt->pts != AV_NOPTS_VALUE)
1785
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1786

    
1787
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1788
                                    &sp->sub, &got_subtitle,
1789
                                    pkt);
1790
//            if (len1 < 0)
1791
//                break;
1792
        if (got_subtitle && sp->sub.format == 0) {
1793
            sp->pts = pts;
1794

    
1795
            for (i = 0; i < sp->sub.num_rects; i++)
1796
            {
1797
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1798
                {
1799
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1800
                    y = RGB_TO_Y_CCIR(r, g, b);
1801
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1802
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1803
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1804
                }
1805
            }
1806

    
1807
            /* now we can update the picture count */
1808
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1809
                is->subpq_windex = 0;
1810
            SDL_LockMutex(is->subpq_mutex);
1811
            is->subpq_size++;
1812
            SDL_UnlockMutex(is->subpq_mutex);
1813
        }
1814
        av_free_packet(pkt);
1815
//        if (step)
1816
//            if (cur_stream)
1817
//                stream_pause(cur_stream);
1818
    }
1819
 the_end:
1820
    return 0;
1821
}
1822

    
1823
/* copy samples for viewing in editor window */
1824
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1825
{
1826
    int size, len, channels;
1827

    
1828
    channels = is->audio_st->codec->channels;
1829

    
1830
    size = samples_size / sizeof(short);
1831
    while (size > 0) {
1832
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1833
        if (len > size)
1834
            len = size;
1835
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1836
        samples += len;
1837
        is->sample_array_index += len;
1838
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1839
            is->sample_array_index = 0;
1840
        size -= len;
1841
    }
1842
}
1843

    
1844
/* return the new audio buffer size (samples can be added or deleted
1845
   to get better sync if video or external master clock) */
1846
static int synchronize_audio(VideoState *is, short *samples,
1847
                             int samples_size1, double pts)
1848
{
1849
    int n, samples_size;
1850
    double ref_clock;
1851

    
1852
    n = 2 * is->audio_st->codec->channels;
1853
    samples_size = samples_size1;
1854

    
1855
    /* if not master, then we try to remove or add samples to correct the clock */
1856
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1857
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1858
        double diff, avg_diff;
1859
        int wanted_size, min_size, max_size, nb_samples;
1860

    
1861
        ref_clock = get_master_clock(is);
1862
        diff = get_audio_clock(is) - ref_clock;
1863

    
1864
        if (diff < AV_NOSYNC_THRESHOLD) {
1865
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1866
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1867
                /* not enough measures to have a correct estimate */
1868
                is->audio_diff_avg_count++;
1869
            } else {
1870
                /* estimate the A-V difference */
1871
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1872

    
1873
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1874
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1875
                    nb_samples = samples_size / n;
1876

    
1877
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1878
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1879
                    if (wanted_size < min_size)
1880
                        wanted_size = min_size;
1881
                    else if (wanted_size > max_size)
1882
                        wanted_size = max_size;
1883

    
1884
                    /* add or remove samples to correction the synchro */
1885
                    if (wanted_size < samples_size) {
1886
                        /* remove samples */
1887
                        samples_size = wanted_size;
1888
                    } else if (wanted_size > samples_size) {
1889
                        uint8_t *samples_end, *q;
1890
                        int nb;
1891

    
1892
                        /* add samples */
1893
                        nb = (samples_size - wanted_size);
1894
                        samples_end = (uint8_t *)samples + samples_size - n;
1895
                        q = samples_end + n;
1896
                        while (nb > 0) {
1897
                            memcpy(q, samples_end, n);
1898
                            q += n;
1899
                            nb -= n;
1900
                        }
1901
                        samples_size = wanted_size;
1902
                    }
1903
                }
1904
#if 0
1905
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1906
                       diff, avg_diff, samples_size - samples_size1,
1907
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1908
#endif
1909
            }
1910
        } else {
1911
            /* too big difference : may be initial PTS errors, so
1912
               reset A-V filter */
1913
            is->audio_diff_avg_count = 0;
1914
            is->audio_diff_cum = 0;
1915
        }
1916
    }
1917

    
1918
    return samples_size;
1919
}
1920

    
1921
/* decode one audio frame and returns its uncompressed size */
1922
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1923
{
1924
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1925
    AVPacket *pkt = &is->audio_pkt;
1926
    AVCodecContext *dec= is->audio_st->codec;
1927
    int n, len1, data_size;
1928
    double pts;
1929

    
1930
    for(;;) {
1931
        /* NOTE: the audio packet can contain several frames */
1932
        while (pkt_temp->size > 0) {
1933
            data_size = sizeof(is->audio_buf1);
1934
            len1 = avcodec_decode_audio3(dec,
1935
                                        (int16_t *)is->audio_buf1, &data_size,
1936
                                        pkt_temp);
1937
            if (len1 < 0) {
1938
                /* if error, we skip the frame */
1939
                pkt_temp->size = 0;
1940
                break;
1941
            }
1942

    
1943
            pkt_temp->data += len1;
1944
            pkt_temp->size -= len1;
1945
            if (data_size <= 0)
1946
                continue;
1947

    
1948
            if (dec->sample_fmt != is->audio_src_fmt) {
1949
                if (is->reformat_ctx)
1950
                    av_audio_convert_free(is->reformat_ctx);
1951
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1952
                                                         dec->sample_fmt, 1, NULL, 0);
1953
                if (!is->reformat_ctx) {
1954
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1955
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1956
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1957
                        break;
1958
                }
1959
                is->audio_src_fmt= dec->sample_fmt;
1960
            }
1961

    
1962
            if (is->reformat_ctx) {
1963
                const void *ibuf[6]= {is->audio_buf1};
1964
                void *obuf[6]= {is->audio_buf2};
1965
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1966
                int ostride[6]= {2};
1967
                int len= data_size/istride[0];
1968
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1969
                    printf("av_audio_convert() failed\n");
1970
                    break;
1971
                }
1972
                is->audio_buf= is->audio_buf2;
1973
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1974
                          remove this legacy cruft */
1975
                data_size= len*2;
1976
            }else{
1977
                is->audio_buf= is->audio_buf1;
1978
            }
1979

    
1980
            /* if no pts, then compute it */
1981
            pts = is->audio_clock;
1982
            *pts_ptr = pts;
1983
            n = 2 * dec->channels;
1984
            is->audio_clock += (double)data_size /
1985
                (double)(n * dec->sample_rate);
1986
#if defined(DEBUG_SYNC)
1987
            {
1988
                static double last_clock;
1989
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1990
                       is->audio_clock - last_clock,
1991
                       is->audio_clock, pts);
1992
                last_clock = is->audio_clock;
1993
            }
1994
#endif
1995
            return data_size;
1996
        }
1997

    
1998
        /* free the current packet */
1999
        if (pkt->data)
2000
            av_free_packet(pkt);
2001

    
2002
        if (is->paused || is->audioq.abort_request) {
2003
            return -1;
2004
        }
2005

    
2006
        /* read next packet */
2007
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2008
            return -1;
2009
        if(pkt->data == flush_pkt.data){
2010
            avcodec_flush_buffers(dec);
2011
            continue;
2012
        }
2013

    
2014
        pkt_temp->data = pkt->data;
2015
        pkt_temp->size = pkt->size;
2016

    
2017
        /* if update the audio clock with the pts */
2018
        if (pkt->pts != AV_NOPTS_VALUE) {
2019
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2020
        }
2021
    }
2022
}
2023

    
2024
/* get the current audio output buffer size, in samples. With SDL, we
2025
   cannot have a precise information */
2026
static int audio_write_get_buf_size(VideoState *is)
2027
{
2028
    return is->audio_buf_size - is->audio_buf_index;
2029
}
2030

    
2031

    
2032
/* prepare a new audio buffer */
2033
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2034
{
2035
    VideoState *is = opaque;
2036
    int audio_size, len1;
2037
    double pts;
2038

    
2039
    audio_callback_time = av_gettime();
2040

    
2041
    while (len > 0) {
2042
        if (is->audio_buf_index >= is->audio_buf_size) {
2043
           audio_size = audio_decode_frame(is, &pts);
2044
           if (audio_size < 0) {
2045
                /* if error, just output silence */
2046
               is->audio_buf = is->audio_buf1;
2047
               is->audio_buf_size = 1024;
2048
               memset(is->audio_buf, 0, is->audio_buf_size);
2049
           } else {
2050
               if (is->show_audio)
2051
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2052
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2053
                                              pts);
2054
               is->audio_buf_size = audio_size;
2055
           }
2056
           is->audio_buf_index = 0;
2057
        }
2058
        len1 = is->audio_buf_size - is->audio_buf_index;
2059
        if (len1 > len)
2060
            len1 = len;
2061
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2062
        len -= len1;
2063
        stream += len1;
2064
        is->audio_buf_index += len1;
2065
    }
2066
}
2067

    
2068
/* open a given stream. Return 0 if OK */
2069
static int stream_component_open(VideoState *is, int stream_index)
2070
{
2071
    AVFormatContext *ic = is->ic;
2072
    AVCodecContext *avctx;
2073
    AVCodec *codec;
2074
    SDL_AudioSpec wanted_spec, spec;
2075

    
2076
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2077
        return -1;
2078
    avctx = ic->streams[stream_index]->codec;
2079

    
2080
    /* prepare audio output */
2081
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2082
        if (avctx->channels > 0) {
2083
            avctx->request_channels = FFMIN(2, avctx->channels);
2084
        } else {
2085
            avctx->request_channels = 2;
2086
        }
2087
    }
2088

    
2089
    codec = avcodec_find_decoder(avctx->codec_id);
2090
    avctx->debug_mv = debug_mv;
2091
    avctx->debug = debug;
2092
    avctx->workaround_bugs = workaround_bugs;
2093
    avctx->lowres = lowres;
2094
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2095
    avctx->idct_algo= idct;
2096
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2097
    avctx->skip_frame= skip_frame;
2098
    avctx->skip_idct= skip_idct;
2099
    avctx->skip_loop_filter= skip_loop_filter;
2100
    avctx->error_recognition= error_recognition;
2101
    avctx->error_concealment= error_concealment;
2102
    avcodec_thread_init(avctx, thread_count);
2103

    
2104
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2105

    
2106
    if (!codec ||
2107
        avcodec_open(avctx, codec) < 0)
2108
        return -1;
2109

    
2110
    /* prepare audio output */
2111
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2112
        wanted_spec.freq = avctx->sample_rate;
2113
        wanted_spec.format = AUDIO_S16SYS;
2114
        wanted_spec.channels = avctx->channels;
2115
        wanted_spec.silence = 0;
2116
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2117
        wanted_spec.callback = sdl_audio_callback;
2118
        wanted_spec.userdata = is;
2119
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2120
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2121
            return -1;
2122
        }
2123
        is->audio_hw_buf_size = spec.size;
2124
        is->audio_src_fmt= SAMPLE_FMT_S16;
2125
    }
2126

    
2127
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2128
    switch(avctx->codec_type) {
2129
    case CODEC_TYPE_AUDIO:
2130
        is->audio_stream = stream_index;
2131
        is->audio_st = ic->streams[stream_index];
2132
        is->audio_buf_size = 0;
2133
        is->audio_buf_index = 0;
2134

    
2135
        /* init averaging filter */
2136
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2137
        is->audio_diff_avg_count = 0;
2138
        /* since we do not have a precise anough audio fifo fullness,
2139
           we correct audio sync only if larger than this threshold */
2140
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2141

    
2142
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2143
        packet_queue_init(&is->audioq);
2144
        SDL_PauseAudio(0);
2145
        break;
2146
    case CODEC_TYPE_VIDEO:
2147
        is->video_stream = stream_index;
2148
        is->video_st = ic->streams[stream_index];
2149

    
2150
//        is->video_current_pts_time = av_gettime();
2151

    
2152
        packet_queue_init(&is->videoq);
2153
        is->video_tid = SDL_CreateThread(video_thread, is);
2154
        break;
2155
    case CODEC_TYPE_SUBTITLE:
2156
        is->subtitle_stream = stream_index;
2157
        is->subtitle_st = ic->streams[stream_index];
2158
        packet_queue_init(&is->subtitleq);
2159

    
2160
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2161
        break;
2162
    default:
2163
        break;
2164
    }
2165
    return 0;
2166
}
2167

    
2168
static void stream_component_close(VideoState *is, int stream_index)
2169
{
2170
    AVFormatContext *ic = is->ic;
2171
    AVCodecContext *avctx;
2172

    
2173
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2174
        return;
2175
    avctx = ic->streams[stream_index]->codec;
2176

    
2177
    switch(avctx->codec_type) {
2178
    case CODEC_TYPE_AUDIO:
2179
        packet_queue_abort(&is->audioq);
2180

    
2181
        SDL_CloseAudio();
2182

    
2183
        packet_queue_end(&is->audioq);
2184
        if (is->reformat_ctx)
2185
            av_audio_convert_free(is->reformat_ctx);
2186
        is->reformat_ctx = NULL;
2187
        break;
2188
    case CODEC_TYPE_VIDEO:
2189
        packet_queue_abort(&is->videoq);
2190

    
2191
        /* note: we also signal this mutex to make sure we deblock the
2192
           video thread in all cases */
2193
        SDL_LockMutex(is->pictq_mutex);
2194
        SDL_CondSignal(is->pictq_cond);
2195
        SDL_UnlockMutex(is->pictq_mutex);
2196

    
2197
        SDL_WaitThread(is->video_tid, NULL);
2198

    
2199
        packet_queue_end(&is->videoq);
2200
        break;
2201
    case CODEC_TYPE_SUBTITLE:
2202
        packet_queue_abort(&is->subtitleq);
2203

    
2204
        /* note: we also signal this mutex to make sure we deblock the
2205
           video thread in all cases */
2206
        SDL_LockMutex(is->subpq_mutex);
2207
        is->subtitle_stream_changed = 1;
2208

    
2209
        SDL_CondSignal(is->subpq_cond);
2210
        SDL_UnlockMutex(is->subpq_mutex);
2211

    
2212
        SDL_WaitThread(is->subtitle_tid, NULL);
2213

    
2214
        packet_queue_end(&is->subtitleq);
2215
        break;
2216
    default:
2217
        break;
2218
    }
2219

    
2220
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2221
    avcodec_close(avctx);
2222
    switch(avctx->codec_type) {
2223
    case CODEC_TYPE_AUDIO:
2224
        is->audio_st = NULL;
2225
        is->audio_stream = -1;
2226
        break;
2227
    case CODEC_TYPE_VIDEO:
2228
        is->video_st = NULL;
2229
        is->video_stream = -1;
2230
        break;
2231
    case CODEC_TYPE_SUBTITLE:
2232
        is->subtitle_st = NULL;
2233
        is->subtitle_stream = -1;
2234
        break;
2235
    default:
2236
        break;
2237
    }
2238
}
2239

    
2240
/* since we have only one decoding thread, we can use a global
2241
   variable instead of a thread local variable */
2242
static VideoState *global_video_state;
2243

    
2244
static int decode_interrupt_cb(void)
2245
{
2246
    return (global_video_state && global_video_state->abort_request);
2247
}
2248

    
2249
/* this thread gets the stream from the disk or the network */
2250
static int decode_thread(void *arg)
2251
{
2252
    VideoState *is = arg;
2253
    AVFormatContext *ic;
2254
    int err, i, ret;
2255
    int st_index[CODEC_TYPE_NB];
2256
    int st_count[CODEC_TYPE_NB]={0};
2257
    int st_best_packet_count[CODEC_TYPE_NB];
2258
    AVPacket pkt1, *pkt = &pkt1;
2259
    AVFormatParameters params, *ap = &params;
2260
    int eof=0;
2261

    
2262
    ic = avformat_alloc_context();
2263

    
2264
    memset(st_index, -1, sizeof(st_index));
2265
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2266
    is->video_stream = -1;
2267
    is->audio_stream = -1;
2268
    is->subtitle_stream = -1;
2269

    
2270
    global_video_state = is;
2271
    url_set_interrupt_cb(decode_interrupt_cb);
2272

    
2273
    memset(ap, 0, sizeof(*ap));
2274

    
2275
    ap->prealloced_context = 1;
2276
    ap->width = frame_width;
2277
    ap->height= frame_height;
2278
    ap->time_base= (AVRational){1, 25};
2279
    ap->pix_fmt = frame_pix_fmt;
2280

    
2281
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2282

    
2283
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2284
    if (err < 0) {
2285
        print_error(is->filename, err);
2286
        ret = -1;
2287
        goto fail;
2288
    }
2289
    is->ic = ic;
2290

    
2291
    if(genpts)
2292
        ic->flags |= AVFMT_FLAG_GENPTS;
2293

    
2294
    err = av_find_stream_info(ic);
2295
    if (err < 0) {
2296
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2297
        ret = -1;
2298
        goto fail;
2299
    }
2300
    if(ic->pb)
2301
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2302

    
2303
    if(seek_by_bytes<0)
2304
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2305

    
2306
    /* if seeking requested, we execute it */
2307
    if (start_time != AV_NOPTS_VALUE) {
2308
        int64_t timestamp;
2309

    
2310
        timestamp = start_time;
2311
        /* add the stream start time */
2312
        if (ic->start_time != AV_NOPTS_VALUE)
2313
            timestamp += ic->start_time;
2314
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2315
        if (ret < 0) {
2316
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2317
                    is->filename, (double)timestamp / AV_TIME_BASE);
2318
        }
2319
    }
2320

    
2321
    for(i = 0; i < ic->nb_streams; i++) {
2322
        AVStream *st= ic->streams[i];
2323
        AVCodecContext *avctx = st->codec;
2324
        ic->streams[i]->discard = AVDISCARD_ALL;
2325
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2326
            continue;
2327
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2328
            continue;
2329

    
2330
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2331
            continue;
2332
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2333

    
2334
        switch(avctx->codec_type) {
2335
        case CODEC_TYPE_AUDIO:
2336
            if (!audio_disable)
2337
                st_index[CODEC_TYPE_AUDIO] = i;
2338
            break;
2339
        case CODEC_TYPE_VIDEO:
2340
        case CODEC_TYPE_SUBTITLE:
2341
            if (!video_disable)
2342
                st_index[avctx->codec_type] = i;
2343
            break;
2344
        default:
2345
            break;
2346
        }
2347
    }
2348
    if (show_status) {
2349
        dump_format(ic, 0, is->filename, 0);
2350
    }
2351

    
2352
    /* open the streams */
2353
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2354
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2355
    }
2356

    
2357
    ret=-1;
2358
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2359
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2360
    }
2361
    if(ret<0) {
2362
        /* add the refresh timer to draw the picture */
2363
        schedule_refresh(is, 40);
2364

    
2365
        if (!display_disable)
2366
            is->show_audio = 2;
2367
    }
2368

    
2369
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2370
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2371
    }
2372

    
2373
    if (is->video_stream < 0 && is->audio_stream < 0) {
2374
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2375
        ret = -1;
2376
        goto fail;
2377
    }
2378

    
2379
    for(;;) {
2380
        if (is->abort_request)
2381
            break;
2382
        if (is->paused != is->last_paused) {
2383
            is->last_paused = is->paused;
2384
            if (is->paused)
2385
                is->read_pause_return= av_read_pause(ic);
2386
            else
2387
                av_read_play(ic);
2388
        }
2389
#if CONFIG_RTSP_DEMUXER
2390
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2391
            /* wait 10 ms to avoid trying to get another packet */
2392
            /* XXX: horrible */
2393
            SDL_Delay(10);
2394
            continue;
2395
        }
2396
#endif
2397
        if (is->seek_req) {
2398
            int64_t seek_target= is->seek_pos;
2399
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2400
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2401
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2402
//      of the seek_pos/seek_rel variables
2403

    
2404
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2405
            if (ret < 0) {
2406
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2407
            }else{
2408
                if (is->audio_stream >= 0) {
2409
                    packet_queue_flush(&is->audioq);
2410
                    packet_queue_put(&is->audioq, &flush_pkt);
2411
                }
2412
                if (is->subtitle_stream >= 0) {
2413
                    packet_queue_flush(&is->subtitleq);
2414
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2415
                }
2416
                if (is->video_stream >= 0) {
2417
                    packet_queue_flush(&is->videoq);
2418
                    packet_queue_put(&is->videoq, &flush_pkt);
2419
                }
2420
            }
2421
            is->seek_req = 0;
2422
            eof= 0;
2423
        }
2424

    
2425
        /* if the queue are full, no need to read more */
2426
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2427
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2428
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2429
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2430
            /* wait 10 ms */
2431
            SDL_Delay(10);
2432
            continue;
2433
        }
2434
        if(url_feof(ic->pb) || eof) {
2435
            if(is->video_stream >= 0){
2436
                av_init_packet(pkt);
2437
                pkt->data=NULL;
2438
                pkt->size=0;
2439
                pkt->stream_index= is->video_stream;
2440
                packet_queue_put(&is->videoq, pkt);
2441
            }
2442
            SDL_Delay(10);
2443
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2444
                ret=AVERROR_EOF;
2445
                goto fail;
2446
            }
2447
            continue;
2448
        }
2449
        ret = av_read_frame(ic, pkt);
2450
        if (ret < 0) {
2451
            if (ret == AVERROR_EOF)
2452
                eof=1;
2453
            if (url_ferror(ic->pb))
2454
                break;
2455
            SDL_Delay(100); /* wait for user event */
2456
            continue;
2457
        }
2458
        if (pkt->stream_index == is->audio_stream) {
2459
            packet_queue_put(&is->audioq, pkt);
2460
        } else if (pkt->stream_index == is->video_stream) {
2461
            packet_queue_put(&is->videoq, pkt);
2462
        } else if (pkt->stream_index == is->subtitle_stream) {
2463
            packet_queue_put(&is->subtitleq, pkt);
2464
        } else {
2465
            av_free_packet(pkt);
2466
        }
2467
    }
2468
    /* wait until the end */
2469
    while (!is->abort_request) {
2470
        SDL_Delay(100);
2471
    }
2472

    
2473
    ret = 0;
2474
 fail:
2475
    /* disable interrupting */
2476
    global_video_state = NULL;
2477

    
2478
    /* close each stream */
2479
    if (is->audio_stream >= 0)
2480
        stream_component_close(is, is->audio_stream);
2481
    if (is->video_stream >= 0)
2482
        stream_component_close(is, is->video_stream);
2483
    if (is->subtitle_stream >= 0)
2484
        stream_component_close(is, is->subtitle_stream);
2485
    if (is->ic) {
2486
        av_close_input_file(is->ic);
2487
        is->ic = NULL; /* safety */
2488
    }
2489
    url_set_interrupt_cb(NULL);
2490

    
2491
    if (ret != 0) {
2492
        SDL_Event event;
2493

    
2494
        event.type = FF_QUIT_EVENT;
2495
        event.user.data1 = is;
2496
        SDL_PushEvent(&event);
2497
    }
2498
    return 0;
2499
}
2500

    
2501
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2502
{
2503
    VideoState *is;
2504

    
2505
    is = av_mallocz(sizeof(VideoState));
2506
    if (!is)
2507
        return NULL;
2508
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2509
    is->iformat = iformat;
2510
    is->ytop = 0;
2511
    is->xleft = 0;
2512

    
2513
    /* start video display */
2514
    is->pictq_mutex = SDL_CreateMutex();
2515
    is->pictq_cond = SDL_CreateCond();
2516

    
2517
    is->subpq_mutex = SDL_CreateMutex();
2518
    is->subpq_cond = SDL_CreateCond();
2519

    
2520
    is->av_sync_type = av_sync_type;
2521
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2522
    if (!is->parse_tid) {
2523
        av_free(is);
2524
        return NULL;
2525
    }
2526
    return is;
2527
}
2528

    
2529
static void stream_close(VideoState *is)
2530
{
2531
    VideoPicture *vp;
2532
    int i;
2533
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2534
    is->abort_request = 1;
2535
    SDL_WaitThread(is->parse_tid, NULL);
2536

    
2537
    /* free all pictures */
2538
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2539
        vp = &is->pictq[i];
2540
#if CONFIG_AVFILTER
2541
        if (vp->picref) {
2542
            avfilter_unref_pic(vp->picref);
2543
            vp->picref = NULL;
2544
        }
2545
#endif
2546
        if (vp->bmp) {
2547
            SDL_FreeYUVOverlay(vp->bmp);
2548
            vp->bmp = NULL;
2549
        }
2550
    }
2551
    SDL_DestroyMutex(is->pictq_mutex);
2552
    SDL_DestroyCond(is->pictq_cond);
2553
    SDL_DestroyMutex(is->subpq_mutex);
2554
    SDL_DestroyCond(is->subpq_cond);
2555
#if !CONFIG_AVFILTER
2556
    if (is->img_convert_ctx)
2557
        sws_freeContext(is->img_convert_ctx);
2558
#endif
2559
    av_free(is);
2560
}
2561

    
2562
static void stream_cycle_channel(VideoState *is, int codec_type)
2563
{
2564
    AVFormatContext *ic = is->ic;
2565
    int start_index, stream_index;
2566
    AVStream *st;
2567

    
2568
    if (codec_type == CODEC_TYPE_VIDEO)
2569
        start_index = is->video_stream;
2570
    else if (codec_type == CODEC_TYPE_AUDIO)
2571
        start_index = is->audio_stream;
2572
    else
2573
        start_index = is->subtitle_stream;
2574
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2575
        return;
2576
    stream_index = start_index;
2577
    for(;;) {
2578
        if (++stream_index >= is->ic->nb_streams)
2579
        {
2580
            if (codec_type == CODEC_TYPE_SUBTITLE)
2581
            {
2582
                stream_index = -1;
2583
                goto the_end;
2584
            } else
2585
                stream_index = 0;
2586
        }
2587
        if (stream_index == start_index)
2588
            return;
2589
        st = ic->streams[stream_index];
2590
        if (st->codec->codec_type == codec_type) {
2591
            /* check that parameters are OK */
2592
            switch(codec_type) {
2593
            case CODEC_TYPE_AUDIO:
2594
                if (st->codec->sample_rate != 0 &&
2595
                    st->codec->channels != 0)
2596
                    goto the_end;
2597
                break;
2598
            case CODEC_TYPE_VIDEO:
2599
            case CODEC_TYPE_SUBTITLE:
2600
                goto the_end;
2601
            default:
2602
                break;
2603
            }
2604
        }
2605
    }
2606
 the_end:
2607
    stream_component_close(is, start_index);
2608
    stream_component_open(is, stream_index);
2609
}
2610

    
2611

    
2612
static void toggle_full_screen(void)
2613
{
2614
    is_full_screen = !is_full_screen;
2615
    if (!fs_screen_width) {
2616
        /* use default SDL method */
2617
//        SDL_WM_ToggleFullScreen(screen);
2618
    }
2619
    video_open(cur_stream);
2620
}
2621

    
2622
static void toggle_pause(void)
2623
{
2624
    if (cur_stream)
2625
        stream_pause(cur_stream);
2626
    step = 0;
2627
}
2628

    
2629
static void step_to_next_frame(void)
2630
{
2631
    if (cur_stream) {
2632
        /* if the stream is paused unpause it, then step */
2633
        if (cur_stream->paused)
2634
            stream_pause(cur_stream);
2635
    }
2636
    step = 1;
2637
}
2638

    
2639
static void do_exit(void)
2640
{
2641
    int i;
2642
    if (cur_stream) {
2643
        stream_close(cur_stream);
2644
        cur_stream = NULL;
2645
    }
2646
    for (i = 0; i < CODEC_TYPE_NB; i++)
2647
        av_free(avcodec_opts[i]);
2648
    av_free(avformat_opts);
2649
    av_free(sws_opts);
2650
#if CONFIG_AVFILTER
2651
    avfilter_uninit();
2652
#endif
2653
    if (show_status)
2654
        printf("\n");
2655
    SDL_Quit();
2656
    exit(0);
2657
}
2658

    
2659
static void toggle_audio_display(void)
2660
{
2661
    if (cur_stream) {
2662
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2663
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2664
        fill_rectangle(screen,
2665
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2666
                    bgcolor);
2667
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2668
    }
2669
}
2670

    
2671
/* handle an event sent by the GUI */
2672
static void event_loop(void)
2673
{
2674
    SDL_Event event;
2675
    double incr, pos, frac;
2676

    
2677
    for(;;) {
2678
        double x;
2679
        SDL_WaitEvent(&event);
2680
        switch(event.type) {
2681
        case SDL_KEYDOWN:
2682
            switch(event.key.keysym.sym) {
2683
            case SDLK_ESCAPE:
2684
            case SDLK_q:
2685
                do_exit();
2686
                break;
2687
            case SDLK_f:
2688
                toggle_full_screen();
2689
                break;
2690
            case SDLK_p:
2691
            case SDLK_SPACE:
2692
                toggle_pause();
2693
                break;
2694
            case SDLK_s: //S: Step to next frame
2695
                step_to_next_frame();
2696
                break;
2697
            case SDLK_a:
2698
                if (cur_stream)
2699
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2700
                break;
2701
            case SDLK_v:
2702
                if (cur_stream)
2703
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2704
                break;
2705
            case SDLK_t:
2706
                if (cur_stream)
2707
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2708
                break;
2709
            case SDLK_w:
2710
                toggle_audio_display();
2711
                break;
2712
            case SDLK_LEFT:
2713
                incr = -10.0;
2714
                goto do_seek;
2715
            case SDLK_RIGHT:
2716
                incr = 10.0;
2717
                goto do_seek;
2718
            case SDLK_UP:
2719
                incr = 60.0;
2720
                goto do_seek;
2721
            case SDLK_DOWN:
2722
                incr = -60.0;
2723
            do_seek:
2724
                if (cur_stream) {
2725
                    if (seek_by_bytes) {
2726
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2727
                            pos= cur_stream->video_current_pos;
2728
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2729
                            pos= cur_stream->audio_pkt.pos;
2730
                        }else
2731
                            pos = url_ftell(cur_stream->ic->pb);
2732
                        if (cur_stream->ic->bit_rate)
2733
                            incr *= cur_stream->ic->bit_rate / 8.0;
2734
                        else
2735
                            incr *= 180000.0;
2736
                        pos += incr;
2737
                        stream_seek(cur_stream, pos, incr, 1);
2738
                    } else {
2739
                        pos = get_master_clock(cur_stream);
2740
                        pos += incr;
2741
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2742
                    }
2743
                }
2744
                break;
2745
            default:
2746
                break;
2747
            }
2748
            break;
2749
        case SDL_MOUSEBUTTONDOWN:
2750
        case SDL_MOUSEMOTION:
2751
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2752
                x= event.button.x;
2753
            }else{
2754
                if(event.motion.state != SDL_PRESSED)
2755
                    break;
2756
                x= event.motion.x;
2757
            }
2758
            if (cur_stream) {
2759
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2760
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2761
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2762
                }else{
2763
                    int64_t ts;
2764
                    int ns, hh, mm, ss;
2765
                    int tns, thh, tmm, tss;
2766
                    tns = cur_stream->ic->duration/1000000LL;
2767
                    thh = tns/3600;
2768
                    tmm = (tns%3600)/60;
2769
                    tss = (tns%60);
2770
                    frac = x/cur_stream->width;
2771
                    ns = frac*tns;
2772
                    hh = ns/3600;
2773
                    mm = (ns%3600)/60;
2774
                    ss = (ns%60);
2775
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2776
                            hh, mm, ss, thh, tmm, tss);
2777
                    ts = frac*cur_stream->ic->duration;
2778
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2779
                        ts += cur_stream->ic->start_time;
2780
                    stream_seek(cur_stream, ts, 0, 0);
2781
                }
2782
            }
2783
            break;
2784
        case SDL_VIDEORESIZE:
2785
            if (cur_stream) {
2786
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2787
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2788
                screen_width = cur_stream->width = event.resize.w;
2789
                screen_height= cur_stream->height= event.resize.h;
2790
            }
2791
            break;
2792
        case SDL_QUIT:
2793
        case FF_QUIT_EVENT:
2794
            do_exit();
2795
            break;
2796
        case FF_ALLOC_EVENT:
2797
            video_open(event.user.data1);
2798
            alloc_picture(event.user.data1);
2799
            break;
2800
        case FF_REFRESH_EVENT:
2801
            video_refresh_timer(event.user.data1);
2802
            break;
2803
        default:
2804
            break;
2805
        }
2806
    }
2807
}
2808

    
2809
static void opt_frame_size(const char *arg)
2810
{
2811
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2812
        fprintf(stderr, "Incorrect frame size\n");
2813
        exit(1);
2814
    }
2815
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2816
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2817
        exit(1);
2818
    }
2819
}
2820

    
2821
static int opt_width(const char *opt, const char *arg)
2822
{
2823
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2824
    return 0;
2825
}
2826

    
2827
static int opt_height(const char *opt, const char *arg)
2828
{
2829
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2830
    return 0;
2831
}
2832

    
2833
static void opt_format(const char *arg)
2834
{
2835
    file_iformat = av_find_input_format(arg);
2836
    if (!file_iformat) {
2837
        fprintf(stderr, "Unknown input format: %s\n", arg);
2838
        exit(1);
2839
    }
2840
}
2841

    
2842
static void opt_frame_pix_fmt(const char *arg)
2843
{
2844
    frame_pix_fmt = av_get_pix_fmt(arg);
2845
}
2846

    
2847
static int opt_sync(const char *opt, const char *arg)
2848
{
2849
    if (!strcmp(arg, "audio"))
2850
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2851
    else if (!strcmp(arg, "video"))
2852
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2853
    else if (!strcmp(arg, "ext"))
2854
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2855
    else {
2856
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2857
        exit(1);
2858
    }
2859
    return 0;
2860
}
2861

    
2862
static int opt_seek(const char *opt, const char *arg)
2863
{
2864
    start_time = parse_time_or_die(opt, arg, 1);
2865
    return 0;
2866
}
2867

    
2868
static int opt_debug(const char *opt, const char *arg)
2869
{
2870
    av_log_set_level(99);
2871
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2872
    return 0;
2873
}
2874

    
2875
static int opt_vismv(const char *opt, const char *arg)
2876
{
2877
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2878
    return 0;
2879
}
2880

    
2881
static int opt_thread_count(const char *opt, const char *arg)
2882
{
2883
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2884
#if !HAVE_THREADS
2885
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2886
#endif
2887
    return 0;
2888
}
2889

    
2890
static const OptionDef options[] = {
2891
#include "cmdutils_common_opts.h"
2892
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2893
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2894
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2895
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2896
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2897
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2898
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2899
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2900
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2901
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2902
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2903
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2904
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2905
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2906
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2907
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2908
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2909
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2910
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2911
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2912
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2913
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2914
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2915
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2916
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2917
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2918
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2919
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2920
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2921
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2922
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2923
#if CONFIG_AVFILTER
2924
    { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2925
#endif
2926
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2927
    { NULL, },
2928
};
2929

    
2930
static void show_usage(void)
2931
{
2932
    printf("Simple media player\n");
2933
    printf("usage: ffplay [options] input_file\n");
2934
    printf("\n");
2935
}
2936

    
2937
static void show_help(void)
2938
{
2939
    show_usage();
2940
    show_help_options(options, "Main options:\n",
2941
                      OPT_EXPERT, 0);
2942
    show_help_options(options, "\nAdvanced options:\n",
2943
                      OPT_EXPERT, OPT_EXPERT);
2944
    printf("\nWhile playing:\n"
2945
           "q, ESC              quit\n"
2946
           "f                   toggle full screen\n"
2947
           "p, SPC              pause\n"
2948
           "a                   cycle audio channel\n"
2949
           "v                   cycle video channel\n"
2950
           "t                   cycle subtitle channel\n"
2951
           "w                   show audio waves\n"
2952
           "left/right          seek backward/forward 10 seconds\n"
2953
           "down/up             seek backward/forward 1 minute\n"
2954
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2955
           );
2956
}
2957

    
2958
static void opt_input_file(const char *filename)
2959
{
2960
    if (input_filename) {
2961
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2962
                filename, input_filename);
2963
        exit(1);
2964
    }
2965
    if (!strcmp(filename, "-"))
2966
        filename = "pipe:";
2967
    input_filename = filename;
2968
}
2969

    
2970
/* Called from the main */
2971
int main(int argc, char **argv)
2972
{
2973
    int flags, i;
2974

    
2975
    /* register all codecs, demux and protocols */
2976
    avcodec_register_all();
2977
    avdevice_register_all();
2978
#if CONFIG_AVFILTER
2979
    avfilter_register_all();
2980
#endif
2981
    av_register_all();
2982

    
2983
    for(i=0; i<CODEC_TYPE_NB; i++){
2984
        avcodec_opts[i]= avcodec_alloc_context2(i);
2985
    }
2986
    avformat_opts = avformat_alloc_context();
2987
#if !CONFIG_AVFILTER
2988
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2989
#endif
2990

    
2991
    show_banner();
2992

    
2993
    parse_options(argc, argv, options, opt_input_file);
2994

    
2995
    if (!input_filename) {
2996
        show_usage();
2997
        fprintf(stderr, "An input file must be specified\n");
2998
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2999
        exit(1);
3000
    }
3001

    
3002
    if (display_disable) {
3003
        video_disable = 1;
3004
    }
3005
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3006
#if !defined(__MINGW32__) && !defined(__APPLE__)
3007
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3008
#endif
3009
    if (SDL_Init (flags)) {
3010
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3011
        exit(1);
3012
    }
3013

    
3014
    if (!display_disable) {
3015
#if HAVE_SDL_VIDEO_SIZE
3016
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3017
        fs_screen_width = vi->current_w;
3018
        fs_screen_height = vi->current_h;
3019
#endif
3020
    }
3021

    
3022
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3023
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3024
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3025

    
3026
    av_init_packet(&flush_pkt);
3027
    flush_pkt.data= "FLUSH";
3028

    
3029
    cur_stream = stream_open(input_filename, file_iformat);
3030

    
3031
    event_loop();
3032

    
3033
    /* never returns */
3034

    
3035
    return 0;
3036
}