Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 3966a574

History | View | Annotate | Download (93.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/dsputil.h"
34

    
35
#if CONFIG_AVFILTER
36
# include "libavfilter/avfilter.h"
37
# include "libavfilter/avfiltergraph.h"
38
# include "libavfilter/graphparser.h"
39
#endif
40

    
41
#include "cmdutils.h"
42

    
43
#include <SDL.h>
44
#include <SDL_thread.h>
45

    
46
#ifdef __MINGW32__
47
#undef main /* We don't want SDL to override our main() */
48
#endif
49

    
50
#undef exit
51
#undef printf
52
#undef fprintf
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
/* maximum audio speed change to get correct sync */
73
#define SAMPLE_CORRECTION_PERCENT_MAX 10
74

    
75
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
76
#define AUDIO_DIFF_AVG_NB   20
77

    
78
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
79
#define SAMPLE_ARRAY_SIZE (2*65536)
80

    
81
#if !CONFIG_AVFILTER
82
static int sws_flags = SWS_BICUBIC;
83
#endif
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 1
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    int64_t pos;                                 ///<byte position in file
100
    SDL_Overlay *bmp;
101
    int width, height; /* source height & width */
102
    int allocated;
103
    SDL_TimerID timer_id;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    AVInputFormat *iformat;
126
    int no_background;
127
    int abort_request;
128
    int paused;
129
    int last_paused;
130
    int seek_req;
131
    int seek_flags;
132
    int64_t seek_pos;
133
    int64_t seek_rel;
134
    int read_pause_return;
135
    AVFormatContext *ic;
136
    int dtg_active_format;
137

    
138
    int audio_stream;
139

    
140
    int av_sync_type;
141
    double external_clock; /* external clock base */
142
    int64_t external_clock_time;
143

    
144
    double audio_clock;
145
    double audio_diff_cum; /* used for AV difference average computation */
146
    double audio_diff_avg_coef;
147
    double audio_diff_threshold;
148
    int audio_diff_avg_count;
149
    AVStream *audio_st;
150
    PacketQueue audioq;
151
    int audio_hw_buf_size;
152
    /* samples output by the codec. we reserve more space for avsync
153
       compensation */
154
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    uint8_t *audio_buf;
157
    unsigned int audio_buf_size; /* in bytes */
158
    int audio_buf_index; /* in bytes */
159
    AVPacket audio_pkt_temp;
160
    AVPacket audio_pkt;
161
    enum SampleFormat audio_src_fmt;
162
    AVAudioConvert *reformat_ctx;
163

    
164
    int show_audio; /* if true, display audio samples */
165
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
166
    int sample_array_index;
167
    int last_i_start;
168
    RDFTContext rdft;
169
    int rdft_bits;
170
    int xpos;
171

    
172
    SDL_Thread *subtitle_tid;
173
    int subtitle_stream;
174
    int subtitle_stream_changed;
175
    AVStream *subtitle_st;
176
    PacketQueue subtitleq;
177
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
178
    int subpq_size, subpq_rindex, subpq_windex;
179
    SDL_mutex *subpq_mutex;
180
    SDL_cond *subpq_cond;
181

    
182
    double frame_timer;
183
    double frame_last_pts;
184
    double frame_last_delay;
185
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
186
    int video_stream;
187
    AVStream *video_st;
188
    PacketQueue videoq;
189
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
190
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
191
    int64_t video_current_pos;                   ///<current displayed file pos
192
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
193
    int pictq_size, pictq_rindex, pictq_windex;
194
    SDL_mutex *pictq_mutex;
195
    SDL_cond *pictq_cond;
196
#if !CONFIG_AVFILTER
197
    struct SwsContext *img_convert_ctx;
198
#endif
199

    
200
    //    QETimer *video_timer;
201
    char filename[1024];
202
    int width, height, xleft, ytop;
203

    
204
    int64_t faulty_pts;
205
    int64_t faulty_dts;
206
    int64_t last_dts_for_fault_detection;
207
    int64_t last_pts_for_fault_detection;
208

    
209
#if CONFIG_AVFILTER
210
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
211
#endif
212
} VideoState;
213

    
214
static void show_help(void);
215
static int audio_write_get_buf_size(VideoState *is);
216

    
217
/* options specified by the user */
218
static AVInputFormat *file_iformat;
219
static const char *input_filename;
220
static int fs_screen_width;
221
static int fs_screen_height;
222
static int screen_width = 0;
223
static int screen_height = 0;
224
static int frame_width = 0;
225
static int frame_height = 0;
226
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
227
static int audio_disable;
228
static int video_disable;
229
static int wanted_stream[CODEC_TYPE_NB]={
230
    [CODEC_TYPE_AUDIO]=-1,
231
    [CODEC_TYPE_VIDEO]=-1,
232
    [CODEC_TYPE_SUBTITLE]=-1,
233
};
234
static int seek_by_bytes=-1;
235
static int display_disable;
236
static int show_status = 1;
237
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
238
static int64_t start_time = AV_NOPTS_VALUE;
239
static int debug = 0;
240
static int debug_mv = 0;
241
static int step = 0;
242
static int thread_count = 1;
243
static int workaround_bugs = 1;
244
static int fast = 0;
245
static int genpts = 0;
246
static int lowres = 0;
247
static int idct = FF_IDCT_AUTO;
248
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
249
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
250
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
251
static int error_recognition = FF_ER_CAREFUL;
252
static int error_concealment = 3;
253
static int decoder_reorder_pts= -1;
254
static int autoexit;
255
#if CONFIG_AVFILTER
256
static char *vfilters = NULL;
257
#endif
258

    
259
/* current context */
260
static int is_full_screen;
261
static VideoState *cur_stream;
262
static int64_t audio_callback_time;
263

    
264
static AVPacket flush_pkt;
265

    
266
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
267
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
268
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
269

    
270
static SDL_Surface *screen;
271

    
272
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
273

    
274
/* packet queue handling */
275
static void packet_queue_init(PacketQueue *q)
276
{
277
    memset(q, 0, sizeof(PacketQueue));
278
    q->mutex = SDL_CreateMutex();
279
    q->cond = SDL_CreateCond();
280
    packet_queue_put(q, &flush_pkt);
281
}
282

    
283
static void packet_queue_flush(PacketQueue *q)
284
{
285
    AVPacketList *pkt, *pkt1;
286

    
287
    SDL_LockMutex(q->mutex);
288
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
289
        pkt1 = pkt->next;
290
        av_free_packet(&pkt->pkt);
291
        av_freep(&pkt);
292
    }
293
    q->last_pkt = NULL;
294
    q->first_pkt = NULL;
295
    q->nb_packets = 0;
296
    q->size = 0;
297
    SDL_UnlockMutex(q->mutex);
298
}
299

    
300
static void packet_queue_end(PacketQueue *q)
301
{
302
    packet_queue_flush(q);
303
    SDL_DestroyMutex(q->mutex);
304
    SDL_DestroyCond(q->cond);
305
}
306

    
307
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
308
{
309
    AVPacketList *pkt1;
310

    
311
    /* duplicate the packet */
312
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
313
        return -1;
314

    
315
    pkt1 = av_malloc(sizeof(AVPacketList));
316
    if (!pkt1)
317
        return -1;
318
    pkt1->pkt = *pkt;
319
    pkt1->next = NULL;
320

    
321

    
322
    SDL_LockMutex(q->mutex);
323

    
324
    if (!q->last_pkt)
325

    
326
        q->first_pkt = pkt1;
327
    else
328
        q->last_pkt->next = pkt1;
329
    q->last_pkt = pkt1;
330
    q->nb_packets++;
331
    q->size += pkt1->pkt.size + sizeof(*pkt1);
332
    /* XXX: should duplicate packet data in DV case */
333
    SDL_CondSignal(q->cond);
334

    
335
    SDL_UnlockMutex(q->mutex);
336
    return 0;
337
}
338

    
339
static void packet_queue_abort(PacketQueue *q)
340
{
341
    SDL_LockMutex(q->mutex);
342

    
343
    q->abort_request = 1;
344

    
345
    SDL_CondSignal(q->cond);
346

    
347
    SDL_UnlockMutex(q->mutex);
348
}
349

    
350
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
351
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
352
{
353
    AVPacketList *pkt1;
354
    int ret;
355

    
356
    SDL_LockMutex(q->mutex);
357

    
358
    for(;;) {
359
        if (q->abort_request) {
360
            ret = -1;
361
            break;
362
        }
363

    
364
        pkt1 = q->first_pkt;
365
        if (pkt1) {
366
            q->first_pkt = pkt1->next;
367
            if (!q->first_pkt)
368
                q->last_pkt = NULL;
369
            q->nb_packets--;
370
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
371
            *pkt = pkt1->pkt;
372
            av_free(pkt1);
373
            ret = 1;
374
            break;
375
        } else if (!block) {
376
            ret = 0;
377
            break;
378
        } else {
379
            SDL_CondWait(q->cond, q->mutex);
380
        }
381
    }
382
    SDL_UnlockMutex(q->mutex);
383
    return ret;
384
}
385

    
386
static inline void fill_rectangle(SDL_Surface *screen,
387
                                  int x, int y, int w, int h, int color)
388
{
389
    SDL_Rect rect;
390
    rect.x = x;
391
    rect.y = y;
392
    rect.w = w;
393
    rect.h = h;
394
    SDL_FillRect(screen, &rect, color);
395
}
396

    
397
#if 0
398
/* draw only the border of a rectangle */
399
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
400
{
401
    int w1, w2, h1, h2;
402

403
    /* fill the background */
404
    w1 = x;
405
    if (w1 < 0)
406
        w1 = 0;
407
    w2 = s->width - (x + w);
408
    if (w2 < 0)
409
        w2 = 0;
410
    h1 = y;
411
    if (h1 < 0)
412
        h1 = 0;
413
    h2 = s->height - (y + h);
414
    if (h2 < 0)
415
        h2 = 0;
416
    fill_rectangle(screen,
417
                   s->xleft, s->ytop,
418
                   w1, s->height,
419
                   color);
420
    fill_rectangle(screen,
421
                   s->xleft + s->width - w2, s->ytop,
422
                   w2, s->height,
423
                   color);
424
    fill_rectangle(screen,
425
                   s->xleft + w1, s->ytop,
426
                   s->width - w1 - w2, h1,
427
                   color);
428
    fill_rectangle(screen,
429
                   s->xleft + w1, s->ytop + s->height - h2,
430
                   s->width - w1 - w2, h2,
431
                   color);
432
}
433
#endif
434

    
435
#define ALPHA_BLEND(a, oldp, newp, s)\
436
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
437

    
438
#define RGBA_IN(r, g, b, a, s)\
439
{\
440
    unsigned int v = ((const uint32_t *)(s))[0];\
441
    a = (v >> 24) & 0xff;\
442
    r = (v >> 16) & 0xff;\
443
    g = (v >> 8) & 0xff;\
444
    b = v & 0xff;\
445
}
446

    
447
#define YUVA_IN(y, u, v, a, s, pal)\
448
{\
449
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
450
    a = (val >> 24) & 0xff;\
451
    y = (val >> 16) & 0xff;\
452
    u = (val >> 8) & 0xff;\
453
    v = val & 0xff;\
454
}
455

    
456
#define YUVA_OUT(d, y, u, v, a)\
457
{\
458
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
459
}
460

    
461

    
462
#define BPP 1
463

    
464
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
465
{
466
    int wrap, wrap3, width2, skip2;
467
    int y, u, v, a, u1, v1, a1, w, h;
468
    uint8_t *lum, *cb, *cr;
469
    const uint8_t *p;
470
    const uint32_t *pal;
471
    int dstx, dsty, dstw, dsth;
472

    
473
    dstw = av_clip(rect->w, 0, imgw);
474
    dsth = av_clip(rect->h, 0, imgh);
475
    dstx = av_clip(rect->x, 0, imgw - dstw);
476
    dsty = av_clip(rect->y, 0, imgh - dsth);
477
    lum = dst->data[0] + dsty * dst->linesize[0];
478
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
479
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
480

    
481
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
482
    skip2 = dstx >> 1;
483
    wrap = dst->linesize[0];
484
    wrap3 = rect->pict.linesize[0];
485
    p = rect->pict.data[0];
486
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
487

    
488
    if (dsty & 1) {
489
        lum += dstx;
490
        cb += skip2;
491
        cr += skip2;
492

    
493
        if (dstx & 1) {
494
            YUVA_IN(y, u, v, a, p, pal);
495
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498
            cb++;
499
            cr++;
500
            lum++;
501
            p += BPP;
502
        }
503
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
504
            YUVA_IN(y, u, v, a, p, pal);
505
            u1 = u;
506
            v1 = v;
507
            a1 = a;
508
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509

    
510
            YUVA_IN(y, u, v, a, p + BPP, pal);
511
            u1 += u;
512
            v1 += v;
513
            a1 += a;
514
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
515
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
516
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
517
            cb++;
518
            cr++;
519
            p += 2 * BPP;
520
            lum += 2;
521
        }
522
        if (w) {
523
            YUVA_IN(y, u, v, a, p, pal);
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
526
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
527
            p++;
528
            lum++;
529
        }
530
        p += wrap3 - dstw * BPP;
531
        lum += wrap - dstw - dstx;
532
        cb += dst->linesize[1] - width2 - skip2;
533
        cr += dst->linesize[2] - width2 - skip2;
534
    }
535
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
536
        lum += dstx;
537
        cb += skip2;
538
        cr += skip2;
539

    
540
        if (dstx & 1) {
541
            YUVA_IN(y, u, v, a, p, pal);
542
            u1 = u;
543
            v1 = v;
544
            a1 = a;
545
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
            p += wrap3;
547
            lum += wrap;
548
            YUVA_IN(y, u, v, a, p, pal);
549
            u1 += u;
550
            v1 += v;
551
            a1 += a;
552
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
554
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
555
            cb++;
556
            cr++;
557
            p += -wrap3 + BPP;
558
            lum += -wrap + 1;
559
        }
560
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 = u;
563
            v1 = v;
564
            a1 = a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566

    
567
            YUVA_IN(y, u, v, a, p + BPP, pal);
568
            u1 += u;
569
            v1 += v;
570
            a1 += a;
571
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
572
            p += wrap3;
573
            lum += wrap;
574

    
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 += u;
577
            v1 += v;
578
            a1 += a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586

    
587
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
588
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
589

    
590
            cb++;
591
            cr++;
592
            p += -wrap3 + 2 * BPP;
593
            lum += -wrap + 2;
594
        }
595
        if (w) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            u1 = u;
598
            v1 = v;
599
            a1 = a;
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601
            p += wrap3;
602
            lum += wrap;
603
            YUVA_IN(y, u, v, a, p, pal);
604
            u1 += u;
605
            v1 += v;
606
            a1 += a;
607
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
609
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
610
            cb++;
611
            cr++;
612
            p += -wrap3 + BPP;
613
            lum += -wrap + 1;
614
        }
615
        p += wrap3 + (wrap3 - dstw * BPP);
616
        lum += wrap + (wrap - dstw - dstx);
617
        cb += dst->linesize[1] - width2 - skip2;
618
        cr += dst->linesize[2] - width2 - skip2;
619
    }
620
    /* handle odd height */
621
    if (h) {
622
        lum += dstx;
623
        cb += skip2;
624
        cr += skip2;
625

    
626
        if (dstx & 1) {
627
            YUVA_IN(y, u, v, a, p, pal);
628
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631
            cb++;
632
            cr++;
633
            lum++;
634
            p += BPP;
635
        }
636
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
637
            YUVA_IN(y, u, v, a, p, pal);
638
            u1 = u;
639
            v1 = v;
640
            a1 = a;
641
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642

    
643
            YUVA_IN(y, u, v, a, p + BPP, pal);
644
            u1 += u;
645
            v1 += v;
646
            a1 += a;
647
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
648
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
649
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
650
            cb++;
651
            cr++;
652
            p += 2 * BPP;
653
            lum += 2;
654
        }
655
        if (w) {
656
            YUVA_IN(y, u, v, a, p, pal);
657
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
659
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
660
        }
661
    }
662
}
663

    
664
static void free_subpicture(SubPicture *sp)
665
{
666
    int i;
667

    
668
    for (i = 0; i < sp->sub.num_rects; i++)
669
    {
670
        av_freep(&sp->sub.rects[i]->pict.data[0]);
671
        av_freep(&sp->sub.rects[i]->pict.data[1]);
672
        av_freep(&sp->sub.rects[i]);
673
    }
674

    
675
    av_free(sp->sub.rects);
676

    
677
    memset(&sp->sub, 0, sizeof(AVSubtitle));
678
}
679

    
680
static void video_image_display(VideoState *is)
681
{
682
    VideoPicture *vp;
683
    SubPicture *sp;
684
    AVPicture pict;
685
    float aspect_ratio;
686
    int width, height, x, y;
687
    SDL_Rect rect;
688
    int i;
689

    
690
    vp = &is->pictq[is->pictq_rindex];
691
    if (vp->bmp) {
692
#if CONFIG_AVFILTER
693
         if (vp->picref->pixel_aspect.num == 0)
694
             aspect_ratio = 0;
695
         else
696
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
697
#else
698

    
699
        /* XXX: use variable in the frame */
700
        if (is->video_st->sample_aspect_ratio.num)
701
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
702
        else if (is->video_st->codec->sample_aspect_ratio.num)
703
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
704
        else
705
            aspect_ratio = 0;
706
#endif
707
        if (aspect_ratio <= 0.0)
708
            aspect_ratio = 1.0;
709
        aspect_ratio *= (float)vp->width / (float)vp->height;
710
        /* if an active format is indicated, then it overrides the
711
           mpeg format */
712
#if 0
713
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
714
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
715
            printf("dtg_active_format=%d\n", is->dtg_active_format);
716
        }
717
#endif
718
#if 0
719
        switch(is->video_st->codec->dtg_active_format) {
720
        case FF_DTG_AFD_SAME:
721
        default:
722
            /* nothing to do */
723
            break;
724
        case FF_DTG_AFD_4_3:
725
            aspect_ratio = 4.0 / 3.0;
726
            break;
727
        case FF_DTG_AFD_16_9:
728
            aspect_ratio = 16.0 / 9.0;
729
            break;
730
        case FF_DTG_AFD_14_9:
731
            aspect_ratio = 14.0 / 9.0;
732
            break;
733
        case FF_DTG_AFD_4_3_SP_14_9:
734
            aspect_ratio = 14.0 / 9.0;
735
            break;
736
        case FF_DTG_AFD_16_9_SP_14_9:
737
            aspect_ratio = 14.0 / 9.0;
738
            break;
739
        case FF_DTG_AFD_SP_4_3:
740
            aspect_ratio = 4.0 / 3.0;
741
            break;
742
        }
743
#endif
744

    
745
        if (is->subtitle_st)
746
        {
747
            if (is->subpq_size > 0)
748
            {
749
                sp = &is->subpq[is->subpq_rindex];
750

    
751
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
752
                {
753
                    SDL_LockYUVOverlay (vp->bmp);
754

    
755
                    pict.data[0] = vp->bmp->pixels[0];
756
                    pict.data[1] = vp->bmp->pixels[2];
757
                    pict.data[2] = vp->bmp->pixels[1];
758

    
759
                    pict.linesize[0] = vp->bmp->pitches[0];
760
                    pict.linesize[1] = vp->bmp->pitches[2];
761
                    pict.linesize[2] = vp->bmp->pitches[1];
762

    
763
                    for (i = 0; i < sp->sub.num_rects; i++)
764
                        blend_subrect(&pict, sp->sub.rects[i],
765
                                      vp->bmp->w, vp->bmp->h);
766

    
767
                    SDL_UnlockYUVOverlay (vp->bmp);
768
                }
769
            }
770
        }
771

    
772

    
773
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
774
        height = is->height;
775
        width = ((int)rint(height * aspect_ratio)) & ~1;
776
        if (width > is->width) {
777
            width = is->width;
778
            height = ((int)rint(width / aspect_ratio)) & ~1;
779
        }
780
        x = (is->width - width) / 2;
781
        y = (is->height - height) / 2;
782
        if (!is->no_background) {
783
            /* fill the background */
784
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
785
        } else {
786
            is->no_background = 0;
787
        }
788
        rect.x = is->xleft + x;
789
        rect.y = is->ytop  + y;
790
        rect.w = width;
791
        rect.h = height;
792
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
793
    } else {
794
#if 0
795
        fill_rectangle(screen,
796
                       is->xleft, is->ytop, is->width, is->height,
797
                       QERGB(0x00, 0x00, 0x00));
798
#endif
799
    }
800
}
801

    
802
static inline int compute_mod(int a, int b)
803
{
804
    a = a % b;
805
    if (a >= 0)
806
        return a;
807
    else
808
        return a + b;
809
}
810

    
811
static void video_audio_display(VideoState *s)
812
{
813
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
814
    int ch, channels, h, h2, bgcolor, fgcolor;
815
    int16_t time_diff;
816
    int rdft_bits, nb_freq;
817

    
818
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
819
        ;
820
    nb_freq= 1<<(rdft_bits-1);
821

    
822
    /* compute display index : center on currently output samples */
823
    channels = s->audio_st->codec->channels;
824
    nb_display_channels = channels;
825
    if (!s->paused) {
826
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
827
        n = 2 * channels;
828
        delay = audio_write_get_buf_size(s);
829
        delay /= n;
830

    
831
        /* to be more precise, we take into account the time spent since
832
           the last buffer computation */
833
        if (audio_callback_time) {
834
            time_diff = av_gettime() - audio_callback_time;
835
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
836
        }
837

    
838
        delay -= data_used / 2;
839
        if (delay < data_used)
840
            delay = data_used;
841

    
842
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
843
        if(s->show_audio==1){
844
            h= INT_MIN;
845
            for(i=0; i<1000; i+=channels){
846
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
847
                int a= s->sample_array[idx];
848
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
849
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
850
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
851
                int score= a-d;
852
                if(h<score && (b^c)<0){
853
                    h= score;
854
                    i_start= idx;
855
                }
856
            }
857
        }
858

    
859
        s->last_i_start = i_start;
860
    } else {
861
        i_start = s->last_i_start;
862
    }
863

    
864
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
865
    if(s->show_audio==1){
866
        fill_rectangle(screen,
867
                       s->xleft, s->ytop, s->width, s->height,
868
                       bgcolor);
869

    
870
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
871

    
872
        /* total height for one channel */
873
        h = s->height / nb_display_channels;
874
        /* graph height / 2 */
875
        h2 = (h * 9) / 20;
876
        for(ch = 0;ch < nb_display_channels; ch++) {
877
            i = i_start + ch;
878
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
879
            for(x = 0; x < s->width; x++) {
880
                y = (s->sample_array[i] * h2) >> 15;
881
                if (y < 0) {
882
                    y = -y;
883
                    ys = y1 - y;
884
                } else {
885
                    ys = y1;
886
                }
887
                fill_rectangle(screen,
888
                               s->xleft + x, ys, 1, y,
889
                               fgcolor);
890
                i += channels;
891
                if (i >= SAMPLE_ARRAY_SIZE)
892
                    i -= SAMPLE_ARRAY_SIZE;
893
            }
894
        }
895

    
896
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
897

    
898
        for(ch = 1;ch < nb_display_channels; ch++) {
899
            y = s->ytop + ch * h;
900
            fill_rectangle(screen,
901
                           s->xleft, y, s->width, 1,
902
                           fgcolor);
903
        }
904
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
905
    }else{
906
        nb_display_channels= FFMIN(nb_display_channels, 2);
907
        if(rdft_bits != s->rdft_bits){
908
            ff_rdft_end(&s->rdft);
909
            ff_rdft_init(&s->rdft, rdft_bits, RDFT);
910
            s->rdft_bits= rdft_bits;
911
        }
912
        {
913
            FFTSample data[2][2*nb_freq];
914
            for(ch = 0;ch < nb_display_channels; ch++) {
915
                i = i_start + ch;
916
                for(x = 0; x < 2*nb_freq; x++) {
917
                    double w= (x-nb_freq)*(1.0/nb_freq);
918
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
919
                    i += channels;
920
                    if (i >= SAMPLE_ARRAY_SIZE)
921
                        i -= SAMPLE_ARRAY_SIZE;
922
                }
923
                ff_rdft_calc(&s->rdft, data[ch]);
924
            }
925
            //least efficient way to do this, we should of course directly access it but its more than fast enough
926
            for(y=0; y<s->height; y++){
927
                double w= 1/sqrt(nb_freq);
928
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
929
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
930
                a= FFMIN(a,255);
931
                b= FFMIN(b,255);
932
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
933

    
934
                fill_rectangle(screen,
935
                            s->xpos, s->height-y, 1, 1,
936
                            fgcolor);
937
            }
938
        }
939
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
940
        s->xpos++;
941
        if(s->xpos >= s->width)
942
            s->xpos= s->xleft;
943
    }
944
}
945

    
946
static int video_open(VideoState *is){
947
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
948
    int w,h;
949

    
950
    if(is_full_screen) flags |= SDL_FULLSCREEN;
951
    else               flags |= SDL_RESIZABLE;
952

    
953
    if (is_full_screen && fs_screen_width) {
954
        w = fs_screen_width;
955
        h = fs_screen_height;
956
    } else if(!is_full_screen && screen_width){
957
        w = screen_width;
958
        h = screen_height;
959
#if CONFIG_AVFILTER
960
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
961
        w = is->out_video_filter->inputs[0]->w;
962
        h = is->out_video_filter->inputs[0]->h;
963
#else
964
    }else if (is->video_st && is->video_st->codec->width){
965
        w = is->video_st->codec->width;
966
        h = is->video_st->codec->height;
967
#endif
968
    } else {
969
        w = 640;
970
        h = 480;
971
    }
972
#ifndef __APPLE__
973
    screen = SDL_SetVideoMode(w, h, 0, flags);
974
#else
975
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
976
    screen = SDL_SetVideoMode(w, h, 24, flags);
977
#endif
978
    if (!screen) {
979
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
980
        return -1;
981
    }
982
    SDL_WM_SetCaption("FFplay", "FFplay");
983

    
984
    is->width = screen->w;
985
    is->height = screen->h;
986

    
987
    return 0;
988
}
989

    
990
/* display the current picture, if any */
991
static void video_display(VideoState *is)
992
{
993
    if(!screen)
994
        video_open(cur_stream);
995
    if (is->audio_st && is->show_audio)
996
        video_audio_display(is);
997
    else if (is->video_st)
998
        video_image_display(is);
999
}
1000

    
1001
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
1002
{
1003
    SDL_Event event;
1004
    event.type = FF_REFRESH_EVENT;
1005
    event.user.data1 = opaque;
1006
    SDL_PushEvent(&event);
1007
    return 0; /* 0 means stop timer */
1008
}
1009

    
1010
/* schedule a video refresh in 'delay' ms */
1011
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
1012
{
1013
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
1014
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
1015
}
1016

    
1017
/* get the current audio clock value */
1018
static double get_audio_clock(VideoState *is)
1019
{
1020
    double pts;
1021
    int hw_buf_size, bytes_per_sec;
1022
    pts = is->audio_clock;
1023
    hw_buf_size = audio_write_get_buf_size(is);
1024
    bytes_per_sec = 0;
1025
    if (is->audio_st) {
1026
        bytes_per_sec = is->audio_st->codec->sample_rate *
1027
            2 * is->audio_st->codec->channels;
1028
    }
1029
    if (bytes_per_sec)
1030
        pts -= (double)hw_buf_size / bytes_per_sec;
1031
    return pts;
1032
}
1033

    
1034
/* get the current video clock value */
1035
static double get_video_clock(VideoState *is)
1036
{
1037
    if (is->paused) {
1038
        return is->video_current_pts;
1039
    } else {
1040
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1041
    }
1042
}
1043

    
1044
/* get the current external clock value */
1045
static double get_external_clock(VideoState *is)
1046
{
1047
    int64_t ti;
1048
    ti = av_gettime();
1049
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1050
}
1051

    
1052
/* get the current master clock value */
1053
static double get_master_clock(VideoState *is)
1054
{
1055
    double val;
1056

    
1057
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1058
        if (is->video_st)
1059
            val = get_video_clock(is);
1060
        else
1061
            val = get_audio_clock(is);
1062
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1063
        if (is->audio_st)
1064
            val = get_audio_clock(is);
1065
        else
1066
            val = get_video_clock(is);
1067
    } else {
1068
        val = get_external_clock(is);
1069
    }
1070
    return val;
1071
}
1072

    
1073
/* seek in the stream */
1074
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1075
{
1076
    if (!is->seek_req) {
1077
        is->seek_pos = pos;
1078
        is->seek_rel = rel;
1079
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1080
        if (seek_by_bytes)
1081
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1082
        is->seek_req = 1;
1083
    }
1084
}
1085

    
1086
/* pause or resume the video */
1087
static void stream_pause(VideoState *is)
1088
{
1089
    if (is->paused) {
1090
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1091
        if(is->read_pause_return != AVERROR(ENOSYS)){
1092
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1093
        }
1094
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1095
    }
1096
    is->paused = !is->paused;
1097
}
1098

    
1099
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1100
{
1101
    double actual_delay, delay, sync_threshold, diff;
1102

    
1103
    /* compute nominal delay */
1104
    delay = frame_current_pts - is->frame_last_pts;
1105
    if (delay <= 0 || delay >= 10.0) {
1106
        /* if incorrect delay, use previous one */
1107
        delay = is->frame_last_delay;
1108
    } else {
1109
        is->frame_last_delay = delay;
1110
    }
1111
    is->frame_last_pts = frame_current_pts;
1112

    
1113
    /* update delay to follow master synchronisation source */
1114
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1115
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1116
        /* if video is slave, we try to correct big delays by
1117
           duplicating or deleting a frame */
1118
        diff = get_video_clock(is) - get_master_clock(is);
1119

    
1120
        /* skip or repeat frame. We take into account the
1121
           delay to compute the threshold. I still don't know
1122
           if it is the best guess */
1123
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1124
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1125
            if (diff <= -sync_threshold)
1126
                delay = 0;
1127
            else if (diff >= sync_threshold)
1128
                delay = 2 * delay;
1129
        }
1130
    }
1131

    
1132
    is->frame_timer += delay;
1133
    /* compute the REAL delay (we need to do that to avoid
1134
       long term errors */
1135
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1136
    if (actual_delay < 0.010) {
1137
        /* XXX: should skip picture */
1138
        actual_delay = 0.010;
1139
    }
1140

    
1141
#if defined(DEBUG_SYNC)
1142
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1143
            delay, actual_delay, frame_current_pts, -diff);
1144
#endif
1145

    
1146
    return actual_delay;
1147
}
1148

    
1149
/* called to display each frame */
1150
static void video_refresh_timer(void *opaque)
1151
{
1152
    VideoState *is = opaque;
1153
    VideoPicture *vp;
1154

    
1155
    SubPicture *sp, *sp2;
1156

    
1157
    if (is->video_st) {
1158
        if (is->pictq_size == 0) {
1159
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1160
        } else {
1161
            /* dequeue the picture */
1162
            vp = &is->pictq[is->pictq_rindex];
1163

    
1164
            /* update current video pts */
1165
            is->video_current_pts = vp->pts;
1166
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1167
            is->video_current_pos = vp->pos;
1168

    
1169
            if(is->subtitle_st) {
1170
                if (is->subtitle_stream_changed) {
1171
                    SDL_LockMutex(is->subpq_mutex);
1172

    
1173
                    while (is->subpq_size) {
1174
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1175

    
1176
                        /* update queue size and signal for next picture */
1177
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1178
                            is->subpq_rindex = 0;
1179

    
1180
                        is->subpq_size--;
1181
                    }
1182
                    is->subtitle_stream_changed = 0;
1183

    
1184
                    SDL_CondSignal(is->subpq_cond);
1185
                    SDL_UnlockMutex(is->subpq_mutex);
1186
                } else {
1187
                    if (is->subpq_size > 0) {
1188
                        sp = &is->subpq[is->subpq_rindex];
1189

    
1190
                        if (is->subpq_size > 1)
1191
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1192
                        else
1193
                            sp2 = NULL;
1194

    
1195
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1196
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1197
                        {
1198
                            free_subpicture(sp);
1199

    
1200
                            /* update queue size and signal for next picture */
1201
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1202
                                is->subpq_rindex = 0;
1203

    
1204
                            SDL_LockMutex(is->subpq_mutex);
1205
                            is->subpq_size--;
1206
                            SDL_CondSignal(is->subpq_cond);
1207
                            SDL_UnlockMutex(is->subpq_mutex);
1208
                        }
1209
                    }
1210
                }
1211
            }
1212

    
1213
            /* display picture */
1214
            video_display(is);
1215

    
1216
            /* update queue size and signal for next picture */
1217
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1218
                is->pictq_rindex = 0;
1219

    
1220
            SDL_LockMutex(is->pictq_mutex);
1221
            vp->timer_id= 0;
1222
            is->pictq_size--;
1223
            SDL_CondSignal(is->pictq_cond);
1224
            SDL_UnlockMutex(is->pictq_mutex);
1225
        }
1226
    } else if (is->audio_st) {
1227
        /* draw the next audio frame */
1228

    
1229
        schedule_refresh(is, 40);
1230

    
1231
        /* if only audio stream, then display the audio bars (better
1232
           than nothing, just to test the implementation */
1233

    
1234
        /* display picture */
1235
        video_display(is);
1236
    } else {
1237
        schedule_refresh(is, 100);
1238
    }
1239
    if (show_status) {
1240
        static int64_t last_time;
1241
        int64_t cur_time;
1242
        int aqsize, vqsize, sqsize;
1243
        double av_diff;
1244

    
1245
        cur_time = av_gettime();
1246
        if (!last_time || (cur_time - last_time) >= 30000) {
1247
            aqsize = 0;
1248
            vqsize = 0;
1249
            sqsize = 0;
1250
            if (is->audio_st)
1251
                aqsize = is->audioq.size;
1252
            if (is->video_st)
1253
                vqsize = is->videoq.size;
1254
            if (is->subtitle_st)
1255
                sqsize = is->subtitleq.size;
1256
            av_diff = 0;
1257
            if (is->audio_st && is->video_st)
1258
                av_diff = get_audio_clock(is) - get_video_clock(is);
1259
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1260
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1261
            fflush(stdout);
1262
            last_time = cur_time;
1263
        }
1264
    }
1265
}
1266

    
1267
/* allocate a picture (needs to do that in main thread to avoid
1268
   potential locking problems */
1269
static void alloc_picture(void *opaque)
1270
{
1271
    VideoState *is = opaque;
1272
    VideoPicture *vp;
1273

    
1274
    vp = &is->pictq[is->pictq_windex];
1275

    
1276
    if (vp->bmp)
1277
        SDL_FreeYUVOverlay(vp->bmp);
1278

    
1279
#if CONFIG_AVFILTER
1280
    if (vp->picref)
1281
        avfilter_unref_pic(vp->picref);
1282
    vp->picref = NULL;
1283

    
1284
    vp->width   = is->out_video_filter->inputs[0]->w;
1285
    vp->height  = is->out_video_filter->inputs[0]->h;
1286
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1287
#else
1288
    vp->width   = is->video_st->codec->width;
1289
    vp->height  = is->video_st->codec->height;
1290
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1291
#endif
1292

    
1293
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1294
                                   SDL_YV12_OVERLAY,
1295
                                   screen);
1296

    
1297
    SDL_LockMutex(is->pictq_mutex);
1298
    vp->allocated = 1;
1299
    SDL_CondSignal(is->pictq_cond);
1300
    SDL_UnlockMutex(is->pictq_mutex);
1301
}
1302

    
1303
/**
1304
 *
1305
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1306
 */
1307
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1308
{
1309
    VideoPicture *vp;
1310
    int dst_pix_fmt;
1311
#if CONFIG_AVFILTER
1312
    AVPicture pict_src;
1313
#endif
1314
    /* wait until we have space to put a new picture */
1315
    SDL_LockMutex(is->pictq_mutex);
1316
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1317
           !is->videoq.abort_request) {
1318
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1319
    }
1320
    SDL_UnlockMutex(is->pictq_mutex);
1321

    
1322
    if (is->videoq.abort_request)
1323
        return -1;
1324

    
1325
    vp = &is->pictq[is->pictq_windex];
1326

    
1327
    /* alloc or resize hardware picture buffer */
1328
    if (!vp->bmp ||
1329
#if CONFIG_AVFILTER
1330
        vp->width  != is->out_video_filter->inputs[0]->w ||
1331
        vp->height != is->out_video_filter->inputs[0]->h) {
1332
#else
1333
        vp->width != is->video_st->codec->width ||
1334
        vp->height != is->video_st->codec->height) {
1335
#endif
1336
        SDL_Event event;
1337

    
1338
        vp->allocated = 0;
1339

    
1340
        /* the allocation must be done in the main thread to avoid
1341
           locking problems */
1342
        event.type = FF_ALLOC_EVENT;
1343
        event.user.data1 = is;
1344
        SDL_PushEvent(&event);
1345

    
1346
        /* wait until the picture is allocated */
1347
        SDL_LockMutex(is->pictq_mutex);
1348
        while (!vp->allocated && !is->videoq.abort_request) {
1349
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1350
        }
1351
        SDL_UnlockMutex(is->pictq_mutex);
1352

    
1353
        if (is->videoq.abort_request)
1354
            return -1;
1355
    }
1356

    
1357
    /* if the frame is not skipped, then display it */
1358
    if (vp->bmp) {
1359
        AVPicture pict;
1360
#if CONFIG_AVFILTER
1361
        if(vp->picref)
1362
            avfilter_unref_pic(vp->picref);
1363
        vp->picref = src_frame->opaque;
1364
#endif
1365

    
1366
        /* get a pointer on the bitmap */
1367
        SDL_LockYUVOverlay (vp->bmp);
1368

    
1369
        dst_pix_fmt = PIX_FMT_YUV420P;
1370
        memset(&pict,0,sizeof(AVPicture));
1371
        pict.data[0] = vp->bmp->pixels[0];
1372
        pict.data[1] = vp->bmp->pixels[2];
1373
        pict.data[2] = vp->bmp->pixels[1];
1374

    
1375
        pict.linesize[0] = vp->bmp->pitches[0];
1376
        pict.linesize[1] = vp->bmp->pitches[2];
1377
        pict.linesize[2] = vp->bmp->pitches[1];
1378

    
1379
#if CONFIG_AVFILTER
1380
        pict_src.data[0] = src_frame->data[0];
1381
        pict_src.data[1] = src_frame->data[1];
1382
        pict_src.data[2] = src_frame->data[2];
1383

    
1384
        pict_src.linesize[0] = src_frame->linesize[0];
1385
        pict_src.linesize[1] = src_frame->linesize[1];
1386
        pict_src.linesize[2] = src_frame->linesize[2];
1387

    
1388
        //FIXME use direct rendering
1389
        av_picture_copy(&pict, &pict_src,
1390
                        vp->pix_fmt, vp->width, vp->height);
1391
#else
1392
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1396
        if (is->img_convert_ctx == NULL) {
1397
            fprintf(stderr, "Cannot initialize the conversion context\n");
1398
            exit(1);
1399
        }
1400
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401
                  0, vp->height, pict.data, pict.linesize);
1402
#endif
1403
        /* update the bitmap content */
1404
        SDL_UnlockYUVOverlay(vp->bmp);
1405

    
1406
        vp->pts = pts;
1407
        vp->pos = pos;
1408

    
1409
        /* now we can update the picture count */
1410
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411
            is->pictq_windex = 0;
1412
        SDL_LockMutex(is->pictq_mutex);
1413
        is->pictq_size++;
1414
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1415
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1416
        SDL_UnlockMutex(is->pictq_mutex);
1417
    }
1418
    return 0;
1419
}
1420

    
1421
/**
1422
 * compute the exact PTS for the picture if it is omitted in the stream
1423
 * @param pts1 the dts of the pkt / pts of the frame
1424
 */
1425
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1426
{
1427
    double frame_delay, pts;
1428

    
1429
    pts = pts1;
1430

    
1431
    if (pts != 0) {
1432
        /* update video clock with pts, if present */
1433
        is->video_clock = pts;
1434
    } else {
1435
        pts = is->video_clock;
1436
    }
1437
    /* update video clock for next frame */
1438
    frame_delay = av_q2d(is->video_st->codec->time_base);
1439
    /* for MPEG2, the frame can be repeated, so we update the
1440
       clock accordingly */
1441
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1442
    is->video_clock += frame_delay;
1443

    
1444
#if defined(DEBUG_SYNC) && 0
1445
    {
1446
        int ftype;
1447
        if (src_frame->pict_type == FF_B_TYPE)
1448
            ftype = 'B';
1449
        else if (src_frame->pict_type == FF_I_TYPE)
1450
            ftype = 'I';
1451
        else
1452
            ftype = 'P';
1453
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1454
               ftype, pts, pts1);
1455
    }
1456
#endif
1457
    return queue_picture(is, src_frame, pts, pos);
1458
}
1459

    
1460
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1461
{
1462
    int len1, got_picture, i;
1463

    
1464
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1465
            return -1;
1466

    
1467
        if(pkt->data == flush_pkt.data){
1468
            avcodec_flush_buffers(is->video_st->codec);
1469

    
1470
            SDL_LockMutex(is->pictq_mutex);
1471
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1472
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1473
                if(is->pictq[i].timer_id){
1474
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1475
                    is->pictq[i].timer_id=0;
1476
                    schedule_refresh(is, 1);
1477
                }
1478
            }
1479
            while (is->pictq_size && !is->videoq.abort_request) {
1480
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1481
            }
1482
            is->video_current_pos= -1;
1483
            SDL_UnlockMutex(is->pictq_mutex);
1484

    
1485
            is->last_dts_for_fault_detection=
1486
            is->last_pts_for_fault_detection= INT64_MIN;
1487
            is->frame_last_pts= AV_NOPTS_VALUE;
1488
            is->frame_last_delay = 0;
1489
            is->frame_timer = (double)av_gettime() / 1000000.0;
1490

    
1491
            return 0;
1492
        }
1493

    
1494
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1495
           this packet, if any */
1496
        is->video_st->codec->reordered_opaque= pkt->pts;
1497
        len1 = avcodec_decode_video2(is->video_st->codec,
1498
                                    frame, &got_picture,
1499
                                    pkt);
1500

    
1501
        if (got_picture) {
1502
            if(pkt->dts != AV_NOPTS_VALUE){
1503
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1504
                is->last_dts_for_fault_detection= pkt->dts;
1505
            }
1506
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1507
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1508
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1509
            }
1510
        }
1511

    
1512
        if(   (   decoder_reorder_pts==1
1513
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1514
               || pkt->dts == AV_NOPTS_VALUE)
1515
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1516
            *pts= frame->reordered_opaque;
1517
        else if(pkt->dts != AV_NOPTS_VALUE)
1518
            *pts= pkt->dts;
1519
        else
1520
            *pts= 0;
1521

    
1522
//            if (len1 < 0)
1523
//                break;
1524
    if (got_picture)
1525
        return 1;
1526
    return 0;
1527
}
1528

    
1529
#if CONFIG_AVFILTER
1530
typedef struct {
1531
    VideoState *is;
1532
    AVFrame *frame;
1533
} FilterPriv;
1534

    
1535
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1536
{
1537
    FilterPriv *priv = ctx->priv;
1538
    if(!opaque) return -1;
1539

    
1540
    priv->is = opaque;
1541
    priv->frame = avcodec_alloc_frame();
1542

    
1543
    return 0;
1544
}
1545

    
1546
static void input_uninit(AVFilterContext *ctx)
1547
{
1548
    FilterPriv *priv = ctx->priv;
1549
    av_free(priv->frame);
1550
}
1551

    
1552
static int input_request_frame(AVFilterLink *link)
1553
{
1554
    FilterPriv *priv = link->src->priv;
1555
    AVFilterPicRef *picref;
1556
    int64_t pts = 0;
1557
    AVPacket pkt;
1558
    int ret;
1559

    
1560
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1561
        av_free_packet(&pkt);
1562
    if (ret < 0)
1563
        return -1;
1564

    
1565
    /* FIXME: until I figure out how to hook everything up to the codec
1566
     * right, we're just copying the entire frame. */
1567
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1568
    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1569
                    picref->pic->format, link->w, link->h);
1570
    av_free_packet(&pkt);
1571

    
1572
    picref->pts = pts;
1573
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1574
    avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1575
    avfilter_draw_slice(link, 0, link->h, 1);
1576
    avfilter_end_frame(link);
1577
    avfilter_unref_pic(picref);
1578

    
1579
    return 0;
1580
}
1581

    
1582
static int input_query_formats(AVFilterContext *ctx)
1583
{
1584
    FilterPriv *priv = ctx->priv;
1585
    enum PixelFormat pix_fmts[] = {
1586
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1587
    };
1588

    
1589
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1590
    return 0;
1591
}
1592

    
1593
static int input_config_props(AVFilterLink *link)
1594
{
1595
    FilterPriv *priv  = link->src->priv;
1596
    AVCodecContext *c = priv->is->video_st->codec;
1597

    
1598
    link->w = c->width;
1599
    link->h = c->height;
1600

    
1601
    return 0;
1602
}
1603

    
1604
static AVFilter input_filter =
1605
{
1606
    .name      = "ffplay_input",
1607

    
1608
    .priv_size = sizeof(FilterPriv),
1609

    
1610
    .init      = input_init,
1611
    .uninit    = input_uninit,
1612

    
1613
    .query_formats = input_query_formats,
1614

    
1615
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1616
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1617
                                    .type = CODEC_TYPE_VIDEO,
1618
                                    .request_frame = input_request_frame,
1619
                                    .config_props  = input_config_props, },
1620
                                  { .name = NULL }},
1621
};
1622

    
1623
static void output_end_frame(AVFilterLink *link)
1624
{
1625
}
1626

    
1627
static int output_query_formats(AVFilterContext *ctx)
1628
{
1629
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1630

    
1631
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1632
    return 0;
1633
}
1634

    
1635
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1636
                                    int64_t *pts)
1637
{
1638
    AVFilterPicRef *pic;
1639

    
1640
    if(avfilter_request_frame(ctx->inputs[0]))
1641
        return -1;
1642
    if(!(pic = ctx->inputs[0]->cur_pic))
1643
        return -1;
1644
    ctx->inputs[0]->cur_pic = NULL;
1645

    
1646
    frame->opaque = pic;
1647
    *pts          = pic->pts;
1648

    
1649
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1650
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1651

    
1652
    return 1;
1653
}
1654

    
1655
static AVFilter output_filter =
1656
{
1657
    .name      = "ffplay_output",
1658

    
1659
    .query_formats = output_query_formats,
1660

    
1661
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1662
                                    .type          = CODEC_TYPE_VIDEO,
1663
                                    .end_frame     = output_end_frame,
1664
                                    .min_perms     = AV_PERM_READ, },
1665
                                  { .name = NULL }},
1666
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1667
};
1668
#endif  /* CONFIG_AVFILTER */
1669

    
1670
static int video_thread(void *arg)
1671
{
1672
    VideoState *is = arg;
1673
    AVFrame *frame= avcodec_alloc_frame();
1674
    int64_t pts_int;
1675
    double pts;
1676
    int ret;
1677

    
1678
#if CONFIG_AVFILTER
1679
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1680
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1681
    graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1682

    
1683
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1684
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1685

    
1686
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1687
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1688

    
1689

    
1690
    if(vfilters) {
1691
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1692
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1693

    
1694
        outputs->name    = av_strdup("in");
1695
        outputs->filter  = filt_src;
1696
        outputs->pad_idx = 0;
1697
        outputs->next    = NULL;
1698

    
1699
        inputs->name    = av_strdup("out");
1700
        inputs->filter  = filt_out;
1701
        inputs->pad_idx = 0;
1702
        inputs->next    = NULL;
1703

    
1704
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1705
            goto the_end;
1706
        av_freep(&vfilters);
1707
    } else {
1708
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1709
    }
1710
    avfilter_graph_add_filter(graph, filt_src);
1711
    avfilter_graph_add_filter(graph, filt_out);
1712

    
1713
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1714
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1715
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1716

    
1717
    is->out_video_filter = filt_out;
1718
#endif
1719

    
1720
    for(;;) {
1721
#if !CONFIG_AVFILTER
1722
        AVPacket pkt;
1723
#endif
1724
        while (is->paused && !is->videoq.abort_request)
1725
            SDL_Delay(10);
1726
#if CONFIG_AVFILTER
1727
        ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1728
#else
1729
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1730
#endif
1731

    
1732
        if (ret < 0) goto the_end;
1733

    
1734
        if (!ret)
1735
            continue;
1736

    
1737
        pts = pts_int*av_q2d(is->video_st->time_base);
1738

    
1739
#if CONFIG_AVFILTER
1740
        ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1741
#else
1742
        ret = output_picture2(is, frame, pts,  pkt.pos);
1743
        av_free_packet(&pkt);
1744
#endif
1745
        if (ret < 0)
1746
            goto the_end;
1747

    
1748
        if (step)
1749
            if (cur_stream)
1750
                stream_pause(cur_stream);
1751
    }
1752
 the_end:
1753
#if CONFIG_AVFILTER
1754
    avfilter_graph_destroy(graph);
1755
    av_freep(&graph);
1756
#endif
1757
    av_free(frame);
1758
    return 0;
1759
}
1760

    
1761
static int subtitle_thread(void *arg)
1762
{
1763
    VideoState *is = arg;
1764
    SubPicture *sp;
1765
    AVPacket pkt1, *pkt = &pkt1;
1766
    int len1, got_subtitle;
1767
    double pts;
1768
    int i, j;
1769
    int r, g, b, y, u, v, a;
1770

    
1771
    for(;;) {
1772
        while (is->paused && !is->subtitleq.abort_request) {
1773
            SDL_Delay(10);
1774
        }
1775
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1776
            break;
1777

    
1778
        if(pkt->data == flush_pkt.data){
1779
            avcodec_flush_buffers(is->subtitle_st->codec);
1780
            continue;
1781
        }
1782
        SDL_LockMutex(is->subpq_mutex);
1783
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1784
               !is->subtitleq.abort_request) {
1785
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1786
        }
1787
        SDL_UnlockMutex(is->subpq_mutex);
1788

    
1789
        if (is->subtitleq.abort_request)
1790
            goto the_end;
1791

    
1792
        sp = &is->subpq[is->subpq_windex];
1793

    
1794
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1795
           this packet, if any */
1796
        pts = 0;
1797
        if (pkt->pts != AV_NOPTS_VALUE)
1798
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1799

    
1800
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1801
                                    &sp->sub, &got_subtitle,
1802
                                    pkt);
1803
//            if (len1 < 0)
1804
//                break;
1805
        if (got_subtitle && sp->sub.format == 0) {
1806
            sp->pts = pts;
1807

    
1808
            for (i = 0; i < sp->sub.num_rects; i++)
1809
            {
1810
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1811
                {
1812
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1813
                    y = RGB_TO_Y_CCIR(r, g, b);
1814
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1815
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1816
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1817
                }
1818
            }
1819

    
1820
            /* now we can update the picture count */
1821
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1822
                is->subpq_windex = 0;
1823
            SDL_LockMutex(is->subpq_mutex);
1824
            is->subpq_size++;
1825
            SDL_UnlockMutex(is->subpq_mutex);
1826
        }
1827
        av_free_packet(pkt);
1828
//        if (step)
1829
//            if (cur_stream)
1830
//                stream_pause(cur_stream);
1831
    }
1832
 the_end:
1833
    return 0;
1834
}
1835

    
1836
/* copy samples for viewing in editor window */
1837
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1838
{
1839
    int size, len, channels;
1840

    
1841
    channels = is->audio_st->codec->channels;
1842

    
1843
    size = samples_size / sizeof(short);
1844
    while (size > 0) {
1845
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1846
        if (len > size)
1847
            len = size;
1848
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1849
        samples += len;
1850
        is->sample_array_index += len;
1851
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1852
            is->sample_array_index = 0;
1853
        size -= len;
1854
    }
1855
}
1856

    
1857
/* return the new audio buffer size (samples can be added or deleted
1858
   to get better sync if video or external master clock) */
1859
static int synchronize_audio(VideoState *is, short *samples,
1860
                             int samples_size1, double pts)
1861
{
1862
    int n, samples_size;
1863
    double ref_clock;
1864

    
1865
    n = 2 * is->audio_st->codec->channels;
1866
    samples_size = samples_size1;
1867

    
1868
    /* if not master, then we try to remove or add samples to correct the clock */
1869
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1870
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1871
        double diff, avg_diff;
1872
        int wanted_size, min_size, max_size, nb_samples;
1873

    
1874
        ref_clock = get_master_clock(is);
1875
        diff = get_audio_clock(is) - ref_clock;
1876

    
1877
        if (diff < AV_NOSYNC_THRESHOLD) {
1878
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1879
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1880
                /* not enough measures to have a correct estimate */
1881
                is->audio_diff_avg_count++;
1882
            } else {
1883
                /* estimate the A-V difference */
1884
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1885

    
1886
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1887
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1888
                    nb_samples = samples_size / n;
1889

    
1890
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1891
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1892
                    if (wanted_size < min_size)
1893
                        wanted_size = min_size;
1894
                    else if (wanted_size > max_size)
1895
                        wanted_size = max_size;
1896

    
1897
                    /* add or remove samples to correction the synchro */
1898
                    if (wanted_size < samples_size) {
1899
                        /* remove samples */
1900
                        samples_size = wanted_size;
1901
                    } else if (wanted_size > samples_size) {
1902
                        uint8_t *samples_end, *q;
1903
                        int nb;
1904

    
1905
                        /* add samples */
1906
                        nb = (samples_size - wanted_size);
1907
                        samples_end = (uint8_t *)samples + samples_size - n;
1908
                        q = samples_end + n;
1909
                        while (nb > 0) {
1910
                            memcpy(q, samples_end, n);
1911
                            q += n;
1912
                            nb -= n;
1913
                        }
1914
                        samples_size = wanted_size;
1915
                    }
1916
                }
1917
#if 0
1918
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1919
                       diff, avg_diff, samples_size - samples_size1,
1920
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1921
#endif
1922
            }
1923
        } else {
1924
            /* too big difference : may be initial PTS errors, so
1925
               reset A-V filter */
1926
            is->audio_diff_avg_count = 0;
1927
            is->audio_diff_cum = 0;
1928
        }
1929
    }
1930

    
1931
    return samples_size;
1932
}
1933

    
1934
/* decode one audio frame and returns its uncompressed size */
1935
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1936
{
1937
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1938
    AVPacket *pkt = &is->audio_pkt;
1939
    AVCodecContext *dec= is->audio_st->codec;
1940
    int n, len1, data_size;
1941
    double pts;
1942

    
1943
    for(;;) {
1944
        /* NOTE: the audio packet can contain several frames */
1945
        while (pkt_temp->size > 0) {
1946
            data_size = sizeof(is->audio_buf1);
1947
            len1 = avcodec_decode_audio3(dec,
1948
                                        (int16_t *)is->audio_buf1, &data_size,
1949
                                        pkt_temp);
1950
            if (len1 < 0) {
1951
                /* if error, we skip the frame */
1952
                pkt_temp->size = 0;
1953
                break;
1954
            }
1955

    
1956
            pkt_temp->data += len1;
1957
            pkt_temp->size -= len1;
1958
            if (data_size <= 0)
1959
                continue;
1960

    
1961
            if (dec->sample_fmt != is->audio_src_fmt) {
1962
                if (is->reformat_ctx)
1963
                    av_audio_convert_free(is->reformat_ctx);
1964
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1965
                                                         dec->sample_fmt, 1, NULL, 0);
1966
                if (!is->reformat_ctx) {
1967
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1968
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1969
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1970
                        break;
1971
                }
1972
                is->audio_src_fmt= dec->sample_fmt;
1973
            }
1974

    
1975
            if (is->reformat_ctx) {
1976
                const void *ibuf[6]= {is->audio_buf1};
1977
                void *obuf[6]= {is->audio_buf2};
1978
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1979
                int ostride[6]= {2};
1980
                int len= data_size/istride[0];
1981
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1982
                    printf("av_audio_convert() failed\n");
1983
                    break;
1984
                }
1985
                is->audio_buf= is->audio_buf2;
1986
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1987
                          remove this legacy cruft */
1988
                data_size= len*2;
1989
            }else{
1990
                is->audio_buf= is->audio_buf1;
1991
            }
1992

    
1993
            /* if no pts, then compute it */
1994
            pts = is->audio_clock;
1995
            *pts_ptr = pts;
1996
            n = 2 * dec->channels;
1997
            is->audio_clock += (double)data_size /
1998
                (double)(n * dec->sample_rate);
1999
#if defined(DEBUG_SYNC)
2000
            {
2001
                static double last_clock;
2002
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2003
                       is->audio_clock - last_clock,
2004
                       is->audio_clock, pts);
2005
                last_clock = is->audio_clock;
2006
            }
2007
#endif
2008
            return data_size;
2009
        }
2010

    
2011
        /* free the current packet */
2012
        if (pkt->data)
2013
            av_free_packet(pkt);
2014

    
2015
        if (is->paused || is->audioq.abort_request) {
2016
            return -1;
2017
        }
2018

    
2019
        /* read next packet */
2020
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2021
            return -1;
2022
        if(pkt->data == flush_pkt.data){
2023
            avcodec_flush_buffers(dec);
2024
            continue;
2025
        }
2026

    
2027
        pkt_temp->data = pkt->data;
2028
        pkt_temp->size = pkt->size;
2029

    
2030
        /* if update the audio clock with the pts */
2031
        if (pkt->pts != AV_NOPTS_VALUE) {
2032
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2033
        }
2034
    }
2035
}
2036

    
2037
/* get the current audio output buffer size, in samples. With SDL, we
2038
   cannot have a precise information */
2039
static int audio_write_get_buf_size(VideoState *is)
2040
{
2041
    return is->audio_buf_size - is->audio_buf_index;
2042
}
2043

    
2044

    
2045
/* prepare a new audio buffer */
2046
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2047
{
2048
    VideoState *is = opaque;
2049
    int audio_size, len1;
2050
    double pts;
2051

    
2052
    audio_callback_time = av_gettime();
2053

    
2054
    while (len > 0) {
2055
        if (is->audio_buf_index >= is->audio_buf_size) {
2056
           audio_size = audio_decode_frame(is, &pts);
2057
           if (audio_size < 0) {
2058
                /* if error, just output silence */
2059
               is->audio_buf = is->audio_buf1;
2060
               is->audio_buf_size = 1024;
2061
               memset(is->audio_buf, 0, is->audio_buf_size);
2062
           } else {
2063
               if (is->show_audio)
2064
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2065
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2066
                                              pts);
2067
               is->audio_buf_size = audio_size;
2068
           }
2069
           is->audio_buf_index = 0;
2070
        }
2071
        len1 = is->audio_buf_size - is->audio_buf_index;
2072
        if (len1 > len)
2073
            len1 = len;
2074
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2075
        len -= len1;
2076
        stream += len1;
2077
        is->audio_buf_index += len1;
2078
    }
2079
}
2080

    
2081
/* open a given stream. Return 0 if OK */
2082
static int stream_component_open(VideoState *is, int stream_index)
2083
{
2084
    AVFormatContext *ic = is->ic;
2085
    AVCodecContext *avctx;
2086
    AVCodec *codec;
2087
    SDL_AudioSpec wanted_spec, spec;
2088

    
2089
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2090
        return -1;
2091
    avctx = ic->streams[stream_index]->codec;
2092

    
2093
    /* prepare audio output */
2094
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2095
        if (avctx->channels > 0) {
2096
            avctx->request_channels = FFMIN(2, avctx->channels);
2097
        } else {
2098
            avctx->request_channels = 2;
2099
        }
2100
    }
2101

    
2102
    codec = avcodec_find_decoder(avctx->codec_id);
2103
    avctx->debug_mv = debug_mv;
2104
    avctx->debug = debug;
2105
    avctx->workaround_bugs = workaround_bugs;
2106
    avctx->lowres = lowres;
2107
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2108
    avctx->idct_algo= idct;
2109
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2110
    avctx->skip_frame= skip_frame;
2111
    avctx->skip_idct= skip_idct;
2112
    avctx->skip_loop_filter= skip_loop_filter;
2113
    avctx->error_recognition= error_recognition;
2114
    avctx->error_concealment= error_concealment;
2115
    avcodec_thread_init(avctx, thread_count);
2116

    
2117
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2118

    
2119
    if (!codec ||
2120
        avcodec_open(avctx, codec) < 0)
2121
        return -1;
2122

    
2123
    /* prepare audio output */
2124
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2125
        wanted_spec.freq = avctx->sample_rate;
2126
        wanted_spec.format = AUDIO_S16SYS;
2127
        wanted_spec.channels = avctx->channels;
2128
        wanted_spec.silence = 0;
2129
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2130
        wanted_spec.callback = sdl_audio_callback;
2131
        wanted_spec.userdata = is;
2132
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2133
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2134
            return -1;
2135
        }
2136
        is->audio_hw_buf_size = spec.size;
2137
        is->audio_src_fmt= SAMPLE_FMT_S16;
2138
    }
2139

    
2140
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2141
    switch(avctx->codec_type) {
2142
    case CODEC_TYPE_AUDIO:
2143
        is->audio_stream = stream_index;
2144
        is->audio_st = ic->streams[stream_index];
2145
        is->audio_buf_size = 0;
2146
        is->audio_buf_index = 0;
2147

    
2148
        /* init averaging filter */
2149
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2150
        is->audio_diff_avg_count = 0;
2151
        /* since we do not have a precise anough audio fifo fullness,
2152
           we correct audio sync only if larger than this threshold */
2153
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2154

    
2155
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2156
        packet_queue_init(&is->audioq);
2157
        SDL_PauseAudio(0);
2158
        break;
2159
    case CODEC_TYPE_VIDEO:
2160
        is->video_stream = stream_index;
2161
        is->video_st = ic->streams[stream_index];
2162

    
2163
//        is->video_current_pts_time = av_gettime();
2164

    
2165
        packet_queue_init(&is->videoq);
2166
        is->video_tid = SDL_CreateThread(video_thread, is);
2167
        break;
2168
    case CODEC_TYPE_SUBTITLE:
2169
        is->subtitle_stream = stream_index;
2170
        is->subtitle_st = ic->streams[stream_index];
2171
        packet_queue_init(&is->subtitleq);
2172

    
2173
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2174
        break;
2175
    default:
2176
        break;
2177
    }
2178
    return 0;
2179
}
2180

    
2181
static void stream_component_close(VideoState *is, int stream_index)
2182
{
2183
    AVFormatContext *ic = is->ic;
2184
    AVCodecContext *avctx;
2185

    
2186
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2187
        return;
2188
    avctx = ic->streams[stream_index]->codec;
2189

    
2190
    switch(avctx->codec_type) {
2191
    case CODEC_TYPE_AUDIO:
2192
        packet_queue_abort(&is->audioq);
2193

    
2194
        SDL_CloseAudio();
2195

    
2196
        packet_queue_end(&is->audioq);
2197
        if (is->reformat_ctx)
2198
            av_audio_convert_free(is->reformat_ctx);
2199
        is->reformat_ctx = NULL;
2200
        break;
2201
    case CODEC_TYPE_VIDEO:
2202
        packet_queue_abort(&is->videoq);
2203

    
2204
        /* note: we also signal this mutex to make sure we deblock the
2205
           video thread in all cases */
2206
        SDL_LockMutex(is->pictq_mutex);
2207
        SDL_CondSignal(is->pictq_cond);
2208
        SDL_UnlockMutex(is->pictq_mutex);
2209

    
2210
        SDL_WaitThread(is->video_tid, NULL);
2211

    
2212
        packet_queue_end(&is->videoq);
2213
        break;
2214
    case CODEC_TYPE_SUBTITLE:
2215
        packet_queue_abort(&is->subtitleq);
2216

    
2217
        /* note: we also signal this mutex to make sure we deblock the
2218
           video thread in all cases */
2219
        SDL_LockMutex(is->subpq_mutex);
2220
        is->subtitle_stream_changed = 1;
2221

    
2222
        SDL_CondSignal(is->subpq_cond);
2223
        SDL_UnlockMutex(is->subpq_mutex);
2224

    
2225
        SDL_WaitThread(is->subtitle_tid, NULL);
2226

    
2227
        packet_queue_end(&is->subtitleq);
2228
        break;
2229
    default:
2230
        break;
2231
    }
2232

    
2233
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2234
    avcodec_close(avctx);
2235
    switch(avctx->codec_type) {
2236
    case CODEC_TYPE_AUDIO:
2237
        is->audio_st = NULL;
2238
        is->audio_stream = -1;
2239
        break;
2240
    case CODEC_TYPE_VIDEO:
2241
        is->video_st = NULL;
2242
        is->video_stream = -1;
2243
        break;
2244
    case CODEC_TYPE_SUBTITLE:
2245
        is->subtitle_st = NULL;
2246
        is->subtitle_stream = -1;
2247
        break;
2248
    default:
2249
        break;
2250
    }
2251
}
2252

    
2253
/* since we have only one decoding thread, we can use a global
2254
   variable instead of a thread local variable */
2255
static VideoState *global_video_state;
2256

    
2257
static int decode_interrupt_cb(void)
2258
{
2259
    return (global_video_state && global_video_state->abort_request);
2260
}
2261

    
2262
/* this thread gets the stream from the disk or the network */
2263
static int decode_thread(void *arg)
2264
{
2265
    VideoState *is = arg;
2266
    AVFormatContext *ic;
2267
    int err, i, ret;
2268
    int st_index[CODEC_TYPE_NB];
2269
    int st_count[CODEC_TYPE_NB]={0};
2270
    int st_best_packet_count[CODEC_TYPE_NB];
2271
    AVPacket pkt1, *pkt = &pkt1;
2272
    AVFormatParameters params, *ap = &params;
2273
    int eof=0;
2274

    
2275
    ic = avformat_alloc_context();
2276

    
2277
    memset(st_index, -1, sizeof(st_index));
2278
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2279
    is->video_stream = -1;
2280
    is->audio_stream = -1;
2281
    is->subtitle_stream = -1;
2282

    
2283
    global_video_state = is;
2284
    url_set_interrupt_cb(decode_interrupt_cb);
2285

    
2286
    memset(ap, 0, sizeof(*ap));
2287

    
2288
    ap->prealloced_context = 1;
2289
    ap->width = frame_width;
2290
    ap->height= frame_height;
2291
    ap->time_base= (AVRational){1, 25};
2292
    ap->pix_fmt = frame_pix_fmt;
2293

    
2294
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2295

    
2296
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2297
    if (err < 0) {
2298
        print_error(is->filename, err);
2299
        ret = -1;
2300
        goto fail;
2301
    }
2302
    is->ic = ic;
2303

    
2304
    if(genpts)
2305
        ic->flags |= AVFMT_FLAG_GENPTS;
2306

    
2307
    err = av_find_stream_info(ic);
2308
    if (err < 0) {
2309
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2310
        ret = -1;
2311
        goto fail;
2312
    }
2313
    if(ic->pb)
2314
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2315

    
2316
    if(seek_by_bytes<0)
2317
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2318

    
2319
    /* if seeking requested, we execute it */
2320
    if (start_time != AV_NOPTS_VALUE) {
2321
        int64_t timestamp;
2322

    
2323
        timestamp = start_time;
2324
        /* add the stream start time */
2325
        if (ic->start_time != AV_NOPTS_VALUE)
2326
            timestamp += ic->start_time;
2327
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2328
        if (ret < 0) {
2329
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2330
                    is->filename, (double)timestamp / AV_TIME_BASE);
2331
        }
2332
    }
2333

    
2334
    for(i = 0; i < ic->nb_streams; i++) {
2335
        AVStream *st= ic->streams[i];
2336
        AVCodecContext *avctx = st->codec;
2337
        ic->streams[i]->discard = AVDISCARD_ALL;
2338
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2339
            continue;
2340
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2341
            continue;
2342

    
2343
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2344
            continue;
2345
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2346

    
2347
        switch(avctx->codec_type) {
2348
        case CODEC_TYPE_AUDIO:
2349
            if (!audio_disable)
2350
                st_index[CODEC_TYPE_AUDIO] = i;
2351
            break;
2352
        case CODEC_TYPE_VIDEO:
2353
        case CODEC_TYPE_SUBTITLE:
2354
            if (!video_disable)
2355
                st_index[avctx->codec_type] = i;
2356
            break;
2357
        default:
2358
            break;
2359
        }
2360
    }
2361
    if (show_status) {
2362
        dump_format(ic, 0, is->filename, 0);
2363
    }
2364

    
2365
    /* open the streams */
2366
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2367
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2368
    }
2369

    
2370
    ret=-1;
2371
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2372
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2373
    }
2374
    if(ret<0) {
2375
        /* add the refresh timer to draw the picture */
2376
        schedule_refresh(is, 40);
2377

    
2378
        if (!display_disable)
2379
            is->show_audio = 2;
2380
    }
2381

    
2382
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2383
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2384
    }
2385

    
2386
    if (is->video_stream < 0 && is->audio_stream < 0) {
2387
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2388
        ret = -1;
2389
        goto fail;
2390
    }
2391

    
2392
    for(;;) {
2393
        if (is->abort_request)
2394
            break;
2395
        if (is->paused != is->last_paused) {
2396
            is->last_paused = is->paused;
2397
            if (is->paused)
2398
                is->read_pause_return= av_read_pause(ic);
2399
            else
2400
                av_read_play(ic);
2401
        }
2402
#if CONFIG_RTSP_DEMUXER
2403
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2404
            /* wait 10 ms to avoid trying to get another packet */
2405
            /* XXX: horrible */
2406
            SDL_Delay(10);
2407
            continue;
2408
        }
2409
#endif
2410
        if (is->seek_req) {
2411
            int64_t seek_target= is->seek_pos;
2412
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2413
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2414
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2415
//      of the seek_pos/seek_rel variables
2416

    
2417
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2418
            if (ret < 0) {
2419
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2420
            }else{
2421
                if (is->audio_stream >= 0) {
2422
                    packet_queue_flush(&is->audioq);
2423
                    packet_queue_put(&is->audioq, &flush_pkt);
2424
                }
2425
                if (is->subtitle_stream >= 0) {
2426
                    packet_queue_flush(&is->subtitleq);
2427
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2428
                }
2429
                if (is->video_stream >= 0) {
2430
                    packet_queue_flush(&is->videoq);
2431
                    packet_queue_put(&is->videoq, &flush_pkt);
2432
                }
2433
            }
2434
            is->seek_req = 0;
2435
            eof= 0;
2436
        }
2437

    
2438
        /* if the queue are full, no need to read more */
2439
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2440
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2441
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2442
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2443
            /* wait 10 ms */
2444
            SDL_Delay(10);
2445
            continue;
2446
        }
2447
        if(url_feof(ic->pb) || eof) {
2448
            if(is->video_stream >= 0){
2449
                av_init_packet(pkt);
2450
                pkt->data=NULL;
2451
                pkt->size=0;
2452
                pkt->stream_index= is->video_stream;
2453
                packet_queue_put(&is->videoq, pkt);
2454
            }
2455
            SDL_Delay(10);
2456
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2457
                ret=AVERROR_EOF;
2458
                goto fail;
2459
            }
2460
            continue;
2461
        }
2462
        ret = av_read_frame(ic, pkt);
2463
        if (ret < 0) {
2464
            if (ret == AVERROR_EOF)
2465
                eof=1;
2466
            if (url_ferror(ic->pb))
2467
                break;
2468
            SDL_Delay(100); /* wait for user event */
2469
            continue;
2470
        }
2471
        if (pkt->stream_index == is->audio_stream) {
2472
            packet_queue_put(&is->audioq, pkt);
2473
        } else if (pkt->stream_index == is->video_stream) {
2474
            packet_queue_put(&is->videoq, pkt);
2475
        } else if (pkt->stream_index == is->subtitle_stream) {
2476
            packet_queue_put(&is->subtitleq, pkt);
2477
        } else {
2478
            av_free_packet(pkt);
2479
        }
2480
    }
2481
    /* wait until the end */
2482
    while (!is->abort_request) {
2483
        SDL_Delay(100);
2484
    }
2485

    
2486
    ret = 0;
2487
 fail:
2488
    /* disable interrupting */
2489
    global_video_state = NULL;
2490

    
2491
    /* close each stream */
2492
    if (is->audio_stream >= 0)
2493
        stream_component_close(is, is->audio_stream);
2494
    if (is->video_stream >= 0)
2495
        stream_component_close(is, is->video_stream);
2496
    if (is->subtitle_stream >= 0)
2497
        stream_component_close(is, is->subtitle_stream);
2498
    if (is->ic) {
2499
        av_close_input_file(is->ic);
2500
        is->ic = NULL; /* safety */
2501
    }
2502
    url_set_interrupt_cb(NULL);
2503

    
2504
    if (ret != 0) {
2505
        SDL_Event event;
2506

    
2507
        event.type = FF_QUIT_EVENT;
2508
        event.user.data1 = is;
2509
        SDL_PushEvent(&event);
2510
    }
2511
    return 0;
2512
}
2513

    
2514
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2515
{
2516
    VideoState *is;
2517

    
2518
    is = av_mallocz(sizeof(VideoState));
2519
    if (!is)
2520
        return NULL;
2521
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2522
    is->iformat = iformat;
2523
    is->ytop = 0;
2524
    is->xleft = 0;
2525

    
2526
    /* start video display */
2527
    is->pictq_mutex = SDL_CreateMutex();
2528
    is->pictq_cond = SDL_CreateCond();
2529

    
2530
    is->subpq_mutex = SDL_CreateMutex();
2531
    is->subpq_cond = SDL_CreateCond();
2532

    
2533
    is->av_sync_type = av_sync_type;
2534
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2535
    if (!is->parse_tid) {
2536
        av_free(is);
2537
        return NULL;
2538
    }
2539
    return is;
2540
}
2541

    
2542
static void stream_close(VideoState *is)
2543
{
2544
    VideoPicture *vp;
2545
    int i;
2546
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2547
    is->abort_request = 1;
2548
    SDL_WaitThread(is->parse_tid, NULL);
2549

    
2550
    /* free all pictures */
2551
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2552
        vp = &is->pictq[i];
2553
#if CONFIG_AVFILTER
2554
        if (vp->picref) {
2555
            avfilter_unref_pic(vp->picref);
2556
            vp->picref = NULL;
2557
        }
2558
#endif
2559
        if (vp->bmp) {
2560
            SDL_FreeYUVOverlay(vp->bmp);
2561
            vp->bmp = NULL;
2562
        }
2563
    }
2564
    SDL_DestroyMutex(is->pictq_mutex);
2565
    SDL_DestroyCond(is->pictq_cond);
2566
    SDL_DestroyMutex(is->subpq_mutex);
2567
    SDL_DestroyCond(is->subpq_cond);
2568
#if !CONFIG_AVFILTER
2569
    if (is->img_convert_ctx)
2570
        sws_freeContext(is->img_convert_ctx);
2571
#endif
2572
    av_free(is);
2573
}
2574

    
2575
static void stream_cycle_channel(VideoState *is, int codec_type)
2576
{
2577
    AVFormatContext *ic = is->ic;
2578
    int start_index, stream_index;
2579
    AVStream *st;
2580

    
2581
    if (codec_type == CODEC_TYPE_VIDEO)
2582
        start_index = is->video_stream;
2583
    else if (codec_type == CODEC_TYPE_AUDIO)
2584
        start_index = is->audio_stream;
2585
    else
2586
        start_index = is->subtitle_stream;
2587
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2588
        return;
2589
    stream_index = start_index;
2590
    for(;;) {
2591
        if (++stream_index >= is->ic->nb_streams)
2592
        {
2593
            if (codec_type == CODEC_TYPE_SUBTITLE)
2594
            {
2595
                stream_index = -1;
2596
                goto the_end;
2597
            } else
2598
                stream_index = 0;
2599
        }
2600
        if (stream_index == start_index)
2601
            return;
2602
        st = ic->streams[stream_index];
2603
        if (st->codec->codec_type == codec_type) {
2604
            /* check that parameters are OK */
2605
            switch(codec_type) {
2606
            case CODEC_TYPE_AUDIO:
2607
                if (st->codec->sample_rate != 0 &&
2608
                    st->codec->channels != 0)
2609
                    goto the_end;
2610
                break;
2611
            case CODEC_TYPE_VIDEO:
2612
            case CODEC_TYPE_SUBTITLE:
2613
                goto the_end;
2614
            default:
2615
                break;
2616
            }
2617
        }
2618
    }
2619
 the_end:
2620
    stream_component_close(is, start_index);
2621
    stream_component_open(is, stream_index);
2622
}
2623

    
2624

    
2625
static void toggle_full_screen(void)
2626
{
2627
    is_full_screen = !is_full_screen;
2628
    if (!fs_screen_width) {
2629
        /* use default SDL method */
2630
//        SDL_WM_ToggleFullScreen(screen);
2631
    }
2632
    video_open(cur_stream);
2633
}
2634

    
2635
static void toggle_pause(void)
2636
{
2637
    if (cur_stream)
2638
        stream_pause(cur_stream);
2639
    step = 0;
2640
}
2641

    
2642
static void step_to_next_frame(void)
2643
{
2644
    if (cur_stream) {
2645
        /* if the stream is paused unpause it, then step */
2646
        if (cur_stream->paused)
2647
            stream_pause(cur_stream);
2648
    }
2649
    step = 1;
2650
}
2651

    
2652
static void do_exit(void)
2653
{
2654
    int i;
2655
    if (cur_stream) {
2656
        stream_close(cur_stream);
2657
        cur_stream = NULL;
2658
    }
2659
    for (i = 0; i < CODEC_TYPE_NB; i++)
2660
        av_free(avcodec_opts[i]);
2661
    av_free(avformat_opts);
2662
    av_free(sws_opts);
2663
#if CONFIG_AVFILTER
2664
    avfilter_uninit();
2665
#endif
2666
    if (show_status)
2667
        printf("\n");
2668
    SDL_Quit();
2669
    exit(0);
2670
}
2671

    
2672
static void toggle_audio_display(void)
2673
{
2674
    if (cur_stream) {
2675
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2676
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2677
        fill_rectangle(screen,
2678
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2679
                    bgcolor);
2680
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2681
    }
2682
}
2683

    
2684
/* handle an event sent by the GUI */
2685
static void event_loop(void)
2686
{
2687
    SDL_Event event;
2688
    double incr, pos, frac;
2689

    
2690
    for(;;) {
2691
        double x;
2692
        SDL_WaitEvent(&event);
2693
        switch(event.type) {
2694
        case SDL_KEYDOWN:
2695
            switch(event.key.keysym.sym) {
2696
            case SDLK_ESCAPE:
2697
            case SDLK_q:
2698
                do_exit();
2699
                break;
2700
            case SDLK_f:
2701
                toggle_full_screen();
2702
                break;
2703
            case SDLK_p:
2704
            case SDLK_SPACE:
2705
                toggle_pause();
2706
                break;
2707
            case SDLK_s: //S: Step to next frame
2708
                step_to_next_frame();
2709
                break;
2710
            case SDLK_a:
2711
                if (cur_stream)
2712
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2713
                break;
2714
            case SDLK_v:
2715
                if (cur_stream)
2716
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2717
                break;
2718
            case SDLK_t:
2719
                if (cur_stream)
2720
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2721
                break;
2722
            case SDLK_w:
2723
                toggle_audio_display();
2724
                break;
2725
            case SDLK_LEFT:
2726
                incr = -10.0;
2727
                goto do_seek;
2728
            case SDLK_RIGHT:
2729
                incr = 10.0;
2730
                goto do_seek;
2731
            case SDLK_UP:
2732
                incr = 60.0;
2733
                goto do_seek;
2734
            case SDLK_DOWN:
2735
                incr = -60.0;
2736
            do_seek:
2737
                if (cur_stream) {
2738
                    if (seek_by_bytes) {
2739
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2740
                            pos= cur_stream->video_current_pos;
2741
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2742
                            pos= cur_stream->audio_pkt.pos;
2743
                        }else
2744
                            pos = url_ftell(cur_stream->ic->pb);
2745
                        if (cur_stream->ic->bit_rate)
2746
                            incr *= cur_stream->ic->bit_rate / 8.0;
2747
                        else
2748
                            incr *= 180000.0;
2749
                        pos += incr;
2750
                        stream_seek(cur_stream, pos, incr, 1);
2751
                    } else {
2752
                        pos = get_master_clock(cur_stream);
2753
                        pos += incr;
2754
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2755
                    }
2756
                }
2757
                break;
2758
            default:
2759
                break;
2760
            }
2761
            break;
2762
        case SDL_MOUSEBUTTONDOWN:
2763
        case SDL_MOUSEMOTION:
2764
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2765
                x= event.button.x;
2766
            }else{
2767
                if(event.motion.state != SDL_PRESSED)
2768
                    break;
2769
                x= event.motion.x;
2770
            }
2771
            if (cur_stream) {
2772
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2773
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2774
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2775
                }else{
2776
                    int64_t ts;
2777
                    int ns, hh, mm, ss;
2778
                    int tns, thh, tmm, tss;
2779
                    tns = cur_stream->ic->duration/1000000LL;
2780
                    thh = tns/3600;
2781
                    tmm = (tns%3600)/60;
2782
                    tss = (tns%60);
2783
                    frac = x/cur_stream->width;
2784
                    ns = frac*tns;
2785
                    hh = ns/3600;
2786
                    mm = (ns%3600)/60;
2787
                    ss = (ns%60);
2788
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2789
                            hh, mm, ss, thh, tmm, tss);
2790
                    ts = frac*cur_stream->ic->duration;
2791
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2792
                        ts += cur_stream->ic->start_time;
2793
                    stream_seek(cur_stream, ts, 0, 0);
2794
                }
2795
            }
2796
            break;
2797
        case SDL_VIDEORESIZE:
2798
            if (cur_stream) {
2799
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2800
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2801
                screen_width = cur_stream->width = event.resize.w;
2802
                screen_height= cur_stream->height= event.resize.h;
2803
            }
2804
            break;
2805
        case SDL_QUIT:
2806
        case FF_QUIT_EVENT:
2807
            do_exit();
2808
            break;
2809
        case FF_ALLOC_EVENT:
2810
            video_open(event.user.data1);
2811
            alloc_picture(event.user.data1);
2812
            break;
2813
        case FF_REFRESH_EVENT:
2814
            video_refresh_timer(event.user.data1);
2815
            break;
2816
        default:
2817
            break;
2818
        }
2819
    }
2820
}
2821

    
2822
static void opt_frame_size(const char *arg)
2823
{
2824
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2825
        fprintf(stderr, "Incorrect frame size\n");
2826
        exit(1);
2827
    }
2828
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2829
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2830
        exit(1);
2831
    }
2832
}
2833

    
2834
static int opt_width(const char *opt, const char *arg)
2835
{
2836
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2837
    return 0;
2838
}
2839

    
2840
static int opt_height(const char *opt, const char *arg)
2841
{
2842
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2843
    return 0;
2844
}
2845

    
2846
static void opt_format(const char *arg)
2847
{
2848
    file_iformat = av_find_input_format(arg);
2849
    if (!file_iformat) {
2850
        fprintf(stderr, "Unknown input format: %s\n", arg);
2851
        exit(1);
2852
    }
2853
}
2854

    
2855
static void opt_frame_pix_fmt(const char *arg)
2856
{
2857
    frame_pix_fmt = av_get_pix_fmt(arg);
2858
}
2859

    
2860
static int opt_sync(const char *opt, const char *arg)
2861
{
2862
    if (!strcmp(arg, "audio"))
2863
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2864
    else if (!strcmp(arg, "video"))
2865
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2866
    else if (!strcmp(arg, "ext"))
2867
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2868
    else {
2869
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2870
        exit(1);
2871
    }
2872
    return 0;
2873
}
2874

    
2875
static int opt_seek(const char *opt, const char *arg)
2876
{
2877
    start_time = parse_time_or_die(opt, arg, 1);
2878
    return 0;
2879
}
2880

    
2881
static int opt_debug(const char *opt, const char *arg)
2882
{
2883
    av_log_set_level(99);
2884
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2885
    return 0;
2886
}
2887

    
2888
static int opt_vismv(const char *opt, const char *arg)
2889
{
2890
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2891
    return 0;
2892
}
2893

    
2894
static int opt_thread_count(const char *opt, const char *arg)
2895
{
2896
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2897
#if !HAVE_THREADS
2898
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2899
#endif
2900
    return 0;
2901
}
2902

    
2903
static const OptionDef options[] = {
2904
#include "cmdutils_common_opts.h"
2905
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2906
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2907
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2908
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2909
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2910
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2911
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2912
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2913
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2914
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2915
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2916
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2917
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2918
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2919
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2920
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2921
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2922
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2923
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2924
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2925
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2926
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2927
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2928
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2929
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2930
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2931
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2932
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2933
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2934
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2935
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2936
#if CONFIG_AVFILTER
2937
    { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2938
#endif
2939
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2940
    { NULL, },
2941
};
2942

    
2943
static void show_usage(void)
2944
{
2945
    printf("Simple media player\n");
2946
    printf("usage: ffplay [options] input_file\n");
2947
    printf("\n");
2948
}
2949

    
2950
static void show_help(void)
2951
{
2952
    show_usage();
2953
    show_help_options(options, "Main options:\n",
2954
                      OPT_EXPERT, 0);
2955
    show_help_options(options, "\nAdvanced options:\n",
2956
                      OPT_EXPERT, OPT_EXPERT);
2957
    printf("\nWhile playing:\n"
2958
           "q, ESC              quit\n"
2959
           "f                   toggle full screen\n"
2960
           "p, SPC              pause\n"
2961
           "a                   cycle audio channel\n"
2962
           "v                   cycle video channel\n"
2963
           "t                   cycle subtitle channel\n"
2964
           "w                   show audio waves\n"
2965
           "left/right          seek backward/forward 10 seconds\n"
2966
           "down/up             seek backward/forward 1 minute\n"
2967
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2968
           );
2969
}
2970

    
2971
static void opt_input_file(const char *filename)
2972
{
2973
    if (input_filename) {
2974
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2975
                filename, input_filename);
2976
        exit(1);
2977
    }
2978
    if (!strcmp(filename, "-"))
2979
        filename = "pipe:";
2980
    input_filename = filename;
2981
}
2982

    
2983
/* Called from the main */
2984
int main(int argc, char **argv)
2985
{
2986
    int flags, i;
2987

    
2988
    /* register all codecs, demux and protocols */
2989
    avcodec_register_all();
2990
    avdevice_register_all();
2991
#if CONFIG_AVFILTER
2992
    avfilter_register_all();
2993
#endif
2994
    av_register_all();
2995

    
2996
    for(i=0; i<CODEC_TYPE_NB; i++){
2997
        avcodec_opts[i]= avcodec_alloc_context2(i);
2998
    }
2999
    avformat_opts = avformat_alloc_context();
3000
#if !CONFIG_AVFILTER
3001
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3002
#endif
3003

    
3004
    show_banner();
3005

    
3006
    parse_options(argc, argv, options, opt_input_file);
3007

    
3008
    if (!input_filename) {
3009
        show_usage();
3010
        fprintf(stderr, "An input file must be specified\n");
3011
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3012
        exit(1);
3013
    }
3014

    
3015
    if (display_disable) {
3016
        video_disable = 1;
3017
    }
3018
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3019
#if !defined(__MINGW32__) && !defined(__APPLE__)
3020
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3021
#endif
3022
    if (SDL_Init (flags)) {
3023
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3024
        exit(1);
3025
    }
3026

    
3027
    if (!display_disable) {
3028
#if HAVE_SDL_VIDEO_SIZE
3029
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3030
        fs_screen_width = vi->current_w;
3031
        fs_screen_height = vi->current_h;
3032
#endif
3033
    }
3034

    
3035
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3036
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3037
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3038

    
3039
    av_init_packet(&flush_pkt);
3040
    flush_pkt.data= "FLUSH";
3041

    
3042
    cur_stream = stream_open(input_filename, file_iformat);
3043

    
3044
    event_loop();
3045

    
3046
    /* never returns */
3047

    
3048
    return 0;
3049
}