Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 917d2bb3

History | View | Annotate | Download (93.5 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/dsputil.h"
34

    
35
#if CONFIG_AVFILTER
36
# include "libavfilter/avfilter.h"
37
# include "libavfilter/avfiltergraph.h"
38
# include "libavfilter/graphparser.h"
39
#endif
40

    
41
#include "cmdutils.h"
42

    
43
#include <SDL.h>
44
#include <SDL_thread.h>
45

    
46
#ifdef __MINGW32__
47
#undef main /* We don't want SDL to override our main() */
48
#endif
49

    
50
#undef exit
51
#undef printf
52
#undef fprintf
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
/* maximum audio speed change to get correct sync */
73
#define SAMPLE_CORRECTION_PERCENT_MAX 10
74

    
75
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
76
#define AUDIO_DIFF_AVG_NB   20
77

    
78
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
79
#define SAMPLE_ARRAY_SIZE (2*65536)
80

    
81
#if !CONFIG_AVFILTER
82
static int sws_flags = SWS_BICUBIC;
83
#endif
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 1
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    int64_t pos;                                 ///<byte position in file
100
    SDL_Overlay *bmp;
101
    int width, height; /* source height & width */
102
    int allocated;
103
    SDL_TimerID timer_id;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    AVInputFormat *iformat;
126
    int no_background;
127
    int abort_request;
128
    int paused;
129
    int last_paused;
130
    int seek_req;
131
    int seek_flags;
132
    int64_t seek_pos;
133
    int64_t seek_rel;
134
    int read_pause_return;
135
    AVFormatContext *ic;
136
    int dtg_active_format;
137

    
138
    int audio_stream;
139

    
140
    int av_sync_type;
141
    double external_clock; /* external clock base */
142
    int64_t external_clock_time;
143

    
144
    double audio_clock;
145
    double audio_diff_cum; /* used for AV difference average computation */
146
    double audio_diff_avg_coef;
147
    double audio_diff_threshold;
148
    int audio_diff_avg_count;
149
    AVStream *audio_st;
150
    PacketQueue audioq;
151
    int audio_hw_buf_size;
152
    /* samples output by the codec. we reserve more space for avsync
153
       compensation */
154
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    uint8_t *audio_buf;
157
    unsigned int audio_buf_size; /* in bytes */
158
    int audio_buf_index; /* in bytes */
159
    AVPacket audio_pkt_temp;
160
    AVPacket audio_pkt;
161
    enum SampleFormat audio_src_fmt;
162
    AVAudioConvert *reformat_ctx;
163

    
164
    int show_audio; /* if true, display audio samples */
165
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
166
    int sample_array_index;
167
    int last_i_start;
168
    RDFTContext rdft;
169
    int rdft_bits;
170
    int xpos;
171

    
172
    SDL_Thread *subtitle_tid;
173
    int subtitle_stream;
174
    int subtitle_stream_changed;
175
    AVStream *subtitle_st;
176
    PacketQueue subtitleq;
177
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
178
    int subpq_size, subpq_rindex, subpq_windex;
179
    SDL_mutex *subpq_mutex;
180
    SDL_cond *subpq_cond;
181

    
182
    double frame_timer;
183
    double frame_last_pts;
184
    double frame_last_delay;
185
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
186
    int video_stream;
187
    AVStream *video_st;
188
    PacketQueue videoq;
189
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
190
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
191
    int64_t video_current_pos;                   ///<current displayed file pos
192
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
193
    int pictq_size, pictq_rindex, pictq_windex;
194
    SDL_mutex *pictq_mutex;
195
    SDL_cond *pictq_cond;
196
#if !CONFIG_AVFILTER
197
    struct SwsContext *img_convert_ctx;
198
#endif
199

    
200
    //    QETimer *video_timer;
201
    char filename[1024];
202
    int width, height, xleft, ytop;
203

    
204
    int64_t faulty_pts;
205
    int64_t faulty_dts;
206
    int64_t last_dts_for_fault_detection;
207
    int64_t last_pts_for_fault_detection;
208

    
209
#if CONFIG_AVFILTER
210
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
211
#endif
212
} VideoState;
213

    
214
static void show_help(void);
215
static int audio_write_get_buf_size(VideoState *is);
216

    
217
/* options specified by the user */
218
static AVInputFormat *file_iformat;
219
static const char *input_filename;
220
static int fs_screen_width;
221
static int fs_screen_height;
222
static int screen_width = 0;
223
static int screen_height = 0;
224
static int frame_width = 0;
225
static int frame_height = 0;
226
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
227
static int audio_disable;
228
static int video_disable;
229
static int wanted_stream[CODEC_TYPE_NB]={
230
    [CODEC_TYPE_AUDIO]=-1,
231
    [CODEC_TYPE_VIDEO]=-1,
232
    [CODEC_TYPE_SUBTITLE]=-1,
233
};
234
static int seek_by_bytes=-1;
235
static int display_disable;
236
static int show_status = 1;
237
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
238
static int64_t start_time = AV_NOPTS_VALUE;
239
static int debug = 0;
240
static int debug_mv = 0;
241
static int step = 0;
242
static int thread_count = 1;
243
static int workaround_bugs = 1;
244
static int fast = 0;
245
static int genpts = 0;
246
static int lowres = 0;
247
static int idct = FF_IDCT_AUTO;
248
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
249
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
250
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
251
static int error_recognition = FF_ER_CAREFUL;
252
static int error_concealment = 3;
253
static int decoder_reorder_pts= -1;
254
static int autoexit;
255
#if CONFIG_AVFILTER
256
static char *vfilters = NULL;
257
#endif
258

    
259
/* current context */
260
static int is_full_screen;
261
static VideoState *cur_stream;
262
static int64_t audio_callback_time;
263

    
264
static AVPacket flush_pkt;
265

    
266
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
267
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
268
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
269

    
270
static SDL_Surface *screen;
271

    
272
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
273

    
274
/* packet queue handling */
275
static void packet_queue_init(PacketQueue *q)
276
{
277
    memset(q, 0, sizeof(PacketQueue));
278
    q->mutex = SDL_CreateMutex();
279
    q->cond = SDL_CreateCond();
280
    packet_queue_put(q, &flush_pkt);
281
}
282

    
283
static void packet_queue_flush(PacketQueue *q)
284
{
285
    AVPacketList *pkt, *pkt1;
286

    
287
    SDL_LockMutex(q->mutex);
288
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
289
        pkt1 = pkt->next;
290
        av_free_packet(&pkt->pkt);
291
        av_freep(&pkt);
292
    }
293
    q->last_pkt = NULL;
294
    q->first_pkt = NULL;
295
    q->nb_packets = 0;
296
    q->size = 0;
297
    SDL_UnlockMutex(q->mutex);
298
}
299

    
300
static void packet_queue_end(PacketQueue *q)
301
{
302
    packet_queue_flush(q);
303
    SDL_DestroyMutex(q->mutex);
304
    SDL_DestroyCond(q->cond);
305
}
306

    
307
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
308
{
309
    AVPacketList *pkt1;
310

    
311
    /* duplicate the packet */
312
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
313
        return -1;
314

    
315
    pkt1 = av_malloc(sizeof(AVPacketList));
316
    if (!pkt1)
317
        return -1;
318
    pkt1->pkt = *pkt;
319
    pkt1->next = NULL;
320

    
321

    
322
    SDL_LockMutex(q->mutex);
323

    
324
    if (!q->last_pkt)
325

    
326
        q->first_pkt = pkt1;
327
    else
328
        q->last_pkt->next = pkt1;
329
    q->last_pkt = pkt1;
330
    q->nb_packets++;
331
    q->size += pkt1->pkt.size + sizeof(*pkt1);
332
    /* XXX: should duplicate packet data in DV case */
333
    SDL_CondSignal(q->cond);
334

    
335
    SDL_UnlockMutex(q->mutex);
336
    return 0;
337
}
338

    
339
static void packet_queue_abort(PacketQueue *q)
340
{
341
    SDL_LockMutex(q->mutex);
342

    
343
    q->abort_request = 1;
344

    
345
    SDL_CondSignal(q->cond);
346

    
347
    SDL_UnlockMutex(q->mutex);
348
}
349

    
350
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
351
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
352
{
353
    AVPacketList *pkt1;
354
    int ret;
355

    
356
    SDL_LockMutex(q->mutex);
357

    
358
    for(;;) {
359
        if (q->abort_request) {
360
            ret = -1;
361
            break;
362
        }
363

    
364
        pkt1 = q->first_pkt;
365
        if (pkt1) {
366
            q->first_pkt = pkt1->next;
367
            if (!q->first_pkt)
368
                q->last_pkt = NULL;
369
            q->nb_packets--;
370
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
371
            *pkt = pkt1->pkt;
372
            av_free(pkt1);
373
            ret = 1;
374
            break;
375
        } else if (!block) {
376
            ret = 0;
377
            break;
378
        } else {
379
            SDL_CondWait(q->cond, q->mutex);
380
        }
381
    }
382
    SDL_UnlockMutex(q->mutex);
383
    return ret;
384
}
385

    
386
static inline void fill_rectangle(SDL_Surface *screen,
387
                                  int x, int y, int w, int h, int color)
388
{
389
    SDL_Rect rect;
390
    rect.x = x;
391
    rect.y = y;
392
    rect.w = w;
393
    rect.h = h;
394
    SDL_FillRect(screen, &rect, color);
395
}
396

    
397
#if 0
398
/* draw only the border of a rectangle */
399
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
400
{
401
    int w1, w2, h1, h2;
402

403
    /* fill the background */
404
    w1 = x;
405
    if (w1 < 0)
406
        w1 = 0;
407
    w2 = s->width - (x + w);
408
    if (w2 < 0)
409
        w2 = 0;
410
    h1 = y;
411
    if (h1 < 0)
412
        h1 = 0;
413
    h2 = s->height - (y + h);
414
    if (h2 < 0)
415
        h2 = 0;
416
    fill_rectangle(screen,
417
                   s->xleft, s->ytop,
418
                   w1, s->height,
419
                   color);
420
    fill_rectangle(screen,
421
                   s->xleft + s->width - w2, s->ytop,
422
                   w2, s->height,
423
                   color);
424
    fill_rectangle(screen,
425
                   s->xleft + w1, s->ytop,
426
                   s->width - w1 - w2, h1,
427
                   color);
428
    fill_rectangle(screen,
429
                   s->xleft + w1, s->ytop + s->height - h2,
430
                   s->width - w1 - w2, h2,
431
                   color);
432
}
433
#endif
434

    
435
#define ALPHA_BLEND(a, oldp, newp, s)\
436
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
437

    
438
#define RGBA_IN(r, g, b, a, s)\
439
{\
440
    unsigned int v = ((const uint32_t *)(s))[0];\
441
    a = (v >> 24) & 0xff;\
442
    r = (v >> 16) & 0xff;\
443
    g = (v >> 8) & 0xff;\
444
    b = v & 0xff;\
445
}
446

    
447
#define YUVA_IN(y, u, v, a, s, pal)\
448
{\
449
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
450
    a = (val >> 24) & 0xff;\
451
    y = (val >> 16) & 0xff;\
452
    u = (val >> 8) & 0xff;\
453
    v = val & 0xff;\
454
}
455

    
456
#define YUVA_OUT(d, y, u, v, a)\
457
{\
458
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
459
}
460

    
461

    
462
#define BPP 1
463

    
464
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
465
{
466
    int wrap, wrap3, width2, skip2;
467
    int y, u, v, a, u1, v1, a1, w, h;
468
    uint8_t *lum, *cb, *cr;
469
    const uint8_t *p;
470
    const uint32_t *pal;
471
    int dstx, dsty, dstw, dsth;
472

    
473
    dstw = av_clip(rect->w, 0, imgw);
474
    dsth = av_clip(rect->h, 0, imgh);
475
    dstx = av_clip(rect->x, 0, imgw - dstw);
476
    dsty = av_clip(rect->y, 0, imgh - dsth);
477
    lum = dst->data[0] + dsty * dst->linesize[0];
478
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
479
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
480

    
481
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
482
    skip2 = dstx >> 1;
483
    wrap = dst->linesize[0];
484
    wrap3 = rect->pict.linesize[0];
485
    p = rect->pict.data[0];
486
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
487

    
488
    if (dsty & 1) {
489
        lum += dstx;
490
        cb += skip2;
491
        cr += skip2;
492

    
493
        if (dstx & 1) {
494
            YUVA_IN(y, u, v, a, p, pal);
495
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498
            cb++;
499
            cr++;
500
            lum++;
501
            p += BPP;
502
        }
503
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
504
            YUVA_IN(y, u, v, a, p, pal);
505
            u1 = u;
506
            v1 = v;
507
            a1 = a;
508
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509

    
510
            YUVA_IN(y, u, v, a, p + BPP, pal);
511
            u1 += u;
512
            v1 += v;
513
            a1 += a;
514
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
515
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
516
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
517
            cb++;
518
            cr++;
519
            p += 2 * BPP;
520
            lum += 2;
521
        }
522
        if (w) {
523
            YUVA_IN(y, u, v, a, p, pal);
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
526
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
527
            p++;
528
            lum++;
529
        }
530
        p += wrap3 - dstw * BPP;
531
        lum += wrap - dstw - dstx;
532
        cb += dst->linesize[1] - width2 - skip2;
533
        cr += dst->linesize[2] - width2 - skip2;
534
    }
535
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
536
        lum += dstx;
537
        cb += skip2;
538
        cr += skip2;
539

    
540
        if (dstx & 1) {
541
            YUVA_IN(y, u, v, a, p, pal);
542
            u1 = u;
543
            v1 = v;
544
            a1 = a;
545
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
            p += wrap3;
547
            lum += wrap;
548
            YUVA_IN(y, u, v, a, p, pal);
549
            u1 += u;
550
            v1 += v;
551
            a1 += a;
552
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
553
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
554
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
555
            cb++;
556
            cr++;
557
            p += -wrap3 + BPP;
558
            lum += -wrap + 1;
559
        }
560
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 = u;
563
            v1 = v;
564
            a1 = a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566

    
567
            YUVA_IN(y, u, v, a, p + BPP, pal);
568
            u1 += u;
569
            v1 += v;
570
            a1 += a;
571
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
572
            p += wrap3;
573
            lum += wrap;
574

    
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 += u;
577
            v1 += v;
578
            a1 += a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586

    
587
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
588
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
589

    
590
            cb++;
591
            cr++;
592
            p += -wrap3 + 2 * BPP;
593
            lum += -wrap + 2;
594
        }
595
        if (w) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            u1 = u;
598
            v1 = v;
599
            a1 = a;
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601
            p += wrap3;
602
            lum += wrap;
603
            YUVA_IN(y, u, v, a, p, pal);
604
            u1 += u;
605
            v1 += v;
606
            a1 += a;
607
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
609
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
610
            cb++;
611
            cr++;
612
            p += -wrap3 + BPP;
613
            lum += -wrap + 1;
614
        }
615
        p += wrap3 + (wrap3 - dstw * BPP);
616
        lum += wrap + (wrap - dstw - dstx);
617
        cb += dst->linesize[1] - width2 - skip2;
618
        cr += dst->linesize[2] - width2 - skip2;
619
    }
620
    /* handle odd height */
621
    if (h) {
622
        lum += dstx;
623
        cb += skip2;
624
        cr += skip2;
625

    
626
        if (dstx & 1) {
627
            YUVA_IN(y, u, v, a, p, pal);
628
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
629
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
630
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
631
            cb++;
632
            cr++;
633
            lum++;
634
            p += BPP;
635
        }
636
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
637
            YUVA_IN(y, u, v, a, p, pal);
638
            u1 = u;
639
            v1 = v;
640
            a1 = a;
641
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
642

    
643
            YUVA_IN(y, u, v, a, p + BPP, pal);
644
            u1 += u;
645
            v1 += v;
646
            a1 += a;
647
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
648
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
649
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
650
            cb++;
651
            cr++;
652
            p += 2 * BPP;
653
            lum += 2;
654
        }
655
        if (w) {
656
            YUVA_IN(y, u, v, a, p, pal);
657
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
659
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
660
        }
661
    }
662
}
663

    
664
static void free_subpicture(SubPicture *sp)
665
{
666
    int i;
667

    
668
    for (i = 0; i < sp->sub.num_rects; i++)
669
    {
670
        av_freep(&sp->sub.rects[i]->pict.data[0]);
671
        av_freep(&sp->sub.rects[i]->pict.data[1]);
672
        av_freep(&sp->sub.rects[i]);
673
    }
674

    
675
    av_free(sp->sub.rects);
676

    
677
    memset(&sp->sub, 0, sizeof(AVSubtitle));
678
}
679

    
680
static void video_image_display(VideoState *is)
681
{
682
    VideoPicture *vp;
683
    SubPicture *sp;
684
    AVPicture pict;
685
    float aspect_ratio;
686
    int width, height, x, y;
687
    SDL_Rect rect;
688
    int i;
689

    
690
    vp = &is->pictq[is->pictq_rindex];
691
    if (vp->bmp) {
692
#if CONFIG_AVFILTER
693
         if (vp->picref->pixel_aspect.num == 0)
694
             aspect_ratio = 0;
695
         else
696
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
697
#else
698

    
699
        /* XXX: use variable in the frame */
700
        if (is->video_st->sample_aspect_ratio.num)
701
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
702
        else if (is->video_st->codec->sample_aspect_ratio.num)
703
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
704
        else
705
            aspect_ratio = 0;
706
#endif
707
        if (aspect_ratio <= 0.0)
708
            aspect_ratio = 1.0;
709
        aspect_ratio *= (float)vp->width / (float)vp->height;
710
        /* if an active format is indicated, then it overrides the
711
           mpeg format */
712
#if 0
713
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
714
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
715
            printf("dtg_active_format=%d\n", is->dtg_active_format);
716
        }
717
#endif
718
#if 0
719
        switch(is->video_st->codec->dtg_active_format) {
720
        case FF_DTG_AFD_SAME:
721
        default:
722
            /* nothing to do */
723
            break;
724
        case FF_DTG_AFD_4_3:
725
            aspect_ratio = 4.0 / 3.0;
726
            break;
727
        case FF_DTG_AFD_16_9:
728
            aspect_ratio = 16.0 / 9.0;
729
            break;
730
        case FF_DTG_AFD_14_9:
731
            aspect_ratio = 14.0 / 9.0;
732
            break;
733
        case FF_DTG_AFD_4_3_SP_14_9:
734
            aspect_ratio = 14.0 / 9.0;
735
            break;
736
        case FF_DTG_AFD_16_9_SP_14_9:
737
            aspect_ratio = 14.0 / 9.0;
738
            break;
739
        case FF_DTG_AFD_SP_4_3:
740
            aspect_ratio = 4.0 / 3.0;
741
            break;
742
        }
743
#endif
744

    
745
        if (is->subtitle_st)
746
        {
747
            if (is->subpq_size > 0)
748
            {
749
                sp = &is->subpq[is->subpq_rindex];
750

    
751
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
752
                {
753
                    SDL_LockYUVOverlay (vp->bmp);
754

    
755
                    pict.data[0] = vp->bmp->pixels[0];
756
                    pict.data[1] = vp->bmp->pixels[2];
757
                    pict.data[2] = vp->bmp->pixels[1];
758

    
759
                    pict.linesize[0] = vp->bmp->pitches[0];
760
                    pict.linesize[1] = vp->bmp->pitches[2];
761
                    pict.linesize[2] = vp->bmp->pitches[1];
762

    
763
                    for (i = 0; i < sp->sub.num_rects; i++)
764
                        blend_subrect(&pict, sp->sub.rects[i],
765
                                      vp->bmp->w, vp->bmp->h);
766

    
767
                    SDL_UnlockYUVOverlay (vp->bmp);
768
                }
769
            }
770
        }
771

    
772

    
773
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
774
        height = is->height;
775
        width = ((int)rint(height * aspect_ratio)) & ~1;
776
        if (width > is->width) {
777
            width = is->width;
778
            height = ((int)rint(width / aspect_ratio)) & ~1;
779
        }
780
        x = (is->width - width) / 2;
781
        y = (is->height - height) / 2;
782
        if (!is->no_background) {
783
            /* fill the background */
784
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
785
        } else {
786
            is->no_background = 0;
787
        }
788
        rect.x = is->xleft + x;
789
        rect.y = is->ytop  + y;
790
        rect.w = width;
791
        rect.h = height;
792
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
793
    } else {
794
#if 0
795
        fill_rectangle(screen,
796
                       is->xleft, is->ytop, is->width, is->height,
797
                       QERGB(0x00, 0x00, 0x00));
798
#endif
799
    }
800
}
801

    
802
static inline int compute_mod(int a, int b)
803
{
804
    a = a % b;
805
    if (a >= 0)
806
        return a;
807
    else
808
        return a + b;
809
}
810

    
811
static void video_audio_display(VideoState *s)
812
{
813
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
814
    int ch, channels, h, h2, bgcolor, fgcolor;
815
    int16_t time_diff;
816
    int rdft_bits, nb_freq;
817

    
818
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
819
        ;
820
    nb_freq= 1<<(rdft_bits-1);
821

    
822
    /* compute display index : center on currently output samples */
823
    channels = s->audio_st->codec->channels;
824
    nb_display_channels = channels;
825
    if (!s->paused) {
826
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
827
        n = 2 * channels;
828
        delay = audio_write_get_buf_size(s);
829
        delay /= n;
830

    
831
        /* to be more precise, we take into account the time spent since
832
           the last buffer computation */
833
        if (audio_callback_time) {
834
            time_diff = av_gettime() - audio_callback_time;
835
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
836
        }
837

    
838
        delay -= data_used / 2;
839
        if (delay < data_used)
840
            delay = data_used;
841

    
842
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
843
        if(s->show_audio==1){
844
            h= INT_MIN;
845
            for(i=0; i<1000; i+=channels){
846
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
847
                int a= s->sample_array[idx];
848
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
849
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
850
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
851
                int score= a-d;
852
                if(h<score && (b^c)<0){
853
                    h= score;
854
                    i_start= idx;
855
                }
856
            }
857
        }
858

    
859
        s->last_i_start = i_start;
860
    } else {
861
        i_start = s->last_i_start;
862
    }
863

    
864
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
865
    if(s->show_audio==1){
866
        fill_rectangle(screen,
867
                       s->xleft, s->ytop, s->width, s->height,
868
                       bgcolor);
869

    
870
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
871

    
872
        /* total height for one channel */
873
        h = s->height / nb_display_channels;
874
        /* graph height / 2 */
875
        h2 = (h * 9) / 20;
876
        for(ch = 0;ch < nb_display_channels; ch++) {
877
            i = i_start + ch;
878
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
879
            for(x = 0; x < s->width; x++) {
880
                y = (s->sample_array[i] * h2) >> 15;
881
                if (y < 0) {
882
                    y = -y;
883
                    ys = y1 - y;
884
                } else {
885
                    ys = y1;
886
                }
887
                fill_rectangle(screen,
888
                               s->xleft + x, ys, 1, y,
889
                               fgcolor);
890
                i += channels;
891
                if (i >= SAMPLE_ARRAY_SIZE)
892
                    i -= SAMPLE_ARRAY_SIZE;
893
            }
894
        }
895

    
896
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
897

    
898
        for(ch = 1;ch < nb_display_channels; ch++) {
899
            y = s->ytop + ch * h;
900
            fill_rectangle(screen,
901
                           s->xleft, y, s->width, 1,
902
                           fgcolor);
903
        }
904
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
905
    }else{
906
        nb_display_channels= FFMIN(nb_display_channels, 2);
907
        if(rdft_bits != s->rdft_bits){
908
            ff_rdft_end(&s->rdft);
909
            ff_rdft_init(&s->rdft, rdft_bits, RDFT);
910
            s->rdft_bits= rdft_bits;
911
        }
912
        {
913
            FFTSample data[2][2*nb_freq];
914
            for(ch = 0;ch < nb_display_channels; ch++) {
915
                i = i_start + ch;
916
                for(x = 0; x < 2*nb_freq; x++) {
917
                    double w= (x-nb_freq)*(1.0/nb_freq);
918
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
919
                    i += channels;
920
                    if (i >= SAMPLE_ARRAY_SIZE)
921
                        i -= SAMPLE_ARRAY_SIZE;
922
                }
923
                ff_rdft_calc(&s->rdft, data[ch]);
924
            }
925
            //least efficient way to do this, we should of course directly access it but its more than fast enough
926
            for(y=0; y<s->height; y++){
927
                double w= 1/sqrt(nb_freq);
928
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
929
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
930
                a= FFMIN(a,255);
931
                b= FFMIN(b,255);
932
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
933

    
934
                fill_rectangle(screen,
935
                            s->xpos, s->height-y, 1, 1,
936
                            fgcolor);
937
            }
938
        }
939
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
940
        s->xpos++;
941
        if(s->xpos >= s->width)
942
            s->xpos= s->xleft;
943
    }
944
}
945

    
946
static int video_open(VideoState *is){
947
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
948
    int w,h;
949

    
950
    if(is_full_screen) flags |= SDL_FULLSCREEN;
951
    else               flags |= SDL_RESIZABLE;
952

    
953
    if (is_full_screen && fs_screen_width) {
954
        w = fs_screen_width;
955
        h = fs_screen_height;
956
    } else if(!is_full_screen && screen_width){
957
        w = screen_width;
958
        h = screen_height;
959
#if CONFIG_AVFILTER
960
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
961
        w = is->out_video_filter->inputs[0]->w;
962
        h = is->out_video_filter->inputs[0]->h;
963
#else
964
    }else if (is->video_st && is->video_st->codec->width){
965
        w = is->video_st->codec->width;
966
        h = is->video_st->codec->height;
967
#endif
968
    } else {
969
        w = 640;
970
        h = 480;
971
    }
972
#ifndef __APPLE__
973
    screen = SDL_SetVideoMode(w, h, 0, flags);
974
#else
975
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
976
    screen = SDL_SetVideoMode(w, h, 24, flags);
977
#endif
978
    if (!screen) {
979
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
980
        return -1;
981
    }
982
    SDL_WM_SetCaption("FFplay", "FFplay");
983

    
984
    is->width = screen->w;
985
    is->height = screen->h;
986

    
987
    return 0;
988
}
989

    
990
/* display the current picture, if any */
991
static void video_display(VideoState *is)
992
{
993
    if(!screen)
994
        video_open(cur_stream);
995
    if (is->audio_st && is->show_audio)
996
        video_audio_display(is);
997
    else if (is->video_st)
998
        video_image_display(is);
999
}
1000

    
1001
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
1002
{
1003
    SDL_Event event;
1004
    event.type = FF_REFRESH_EVENT;
1005
    event.user.data1 = opaque;
1006
    SDL_PushEvent(&event);
1007
    return 0; /* 0 means stop timer */
1008
}
1009

    
1010
/* schedule a video refresh in 'delay' ms */
1011
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
1012
{
1013
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
1014
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
1015
}
1016

    
1017
/* get the current audio clock value */
1018
static double get_audio_clock(VideoState *is)
1019
{
1020
    double pts;
1021
    int hw_buf_size, bytes_per_sec;
1022
    pts = is->audio_clock;
1023
    hw_buf_size = audio_write_get_buf_size(is);
1024
    bytes_per_sec = 0;
1025
    if (is->audio_st) {
1026
        bytes_per_sec = is->audio_st->codec->sample_rate *
1027
            2 * is->audio_st->codec->channels;
1028
    }
1029
    if (bytes_per_sec)
1030
        pts -= (double)hw_buf_size / bytes_per_sec;
1031
    return pts;
1032
}
1033

    
1034
/* get the current video clock value */
1035
static double get_video_clock(VideoState *is)
1036
{
1037
    if (is->paused) {
1038
        return is->video_current_pts;
1039
    } else {
1040
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1041
    }
1042
}
1043

    
1044
/* get the current external clock value */
1045
static double get_external_clock(VideoState *is)
1046
{
1047
    int64_t ti;
1048
    ti = av_gettime();
1049
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1050
}
1051

    
1052
/* get the current master clock value */
1053
static double get_master_clock(VideoState *is)
1054
{
1055
    double val;
1056

    
1057
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1058
        if (is->video_st)
1059
            val = get_video_clock(is);
1060
        else
1061
            val = get_audio_clock(is);
1062
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1063
        if (is->audio_st)
1064
            val = get_audio_clock(is);
1065
        else
1066
            val = get_video_clock(is);
1067
    } else {
1068
        val = get_external_clock(is);
1069
    }
1070
    return val;
1071
}
1072

    
1073
/* seek in the stream */
1074
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1075
{
1076
    if (!is->seek_req) {
1077
        is->seek_pos = pos;
1078
        is->seek_rel = rel;
1079
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1080
        if (seek_by_bytes)
1081
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1082
        is->seek_req = 1;
1083
    }
1084
}
1085

    
1086
/* pause or resume the video */
1087
static void stream_pause(VideoState *is)
1088
{
1089
    if (is->paused) {
1090
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1091
        if(is->read_pause_return != AVERROR(ENOSYS)){
1092
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1093
        }
1094
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1095
    }
1096
    is->paused = !is->paused;
1097
}
1098

    
1099
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1100
{
1101
    double actual_delay, delay, sync_threshold, diff;
1102

    
1103
    /* compute nominal delay */
1104
    delay = frame_current_pts - is->frame_last_pts;
1105
    if (delay <= 0 || delay >= 10.0) {
1106
        /* if incorrect delay, use previous one */
1107
        delay = is->frame_last_delay;
1108
    } else {
1109
        is->frame_last_delay = delay;
1110
    }
1111
    is->frame_last_pts = frame_current_pts;
1112

    
1113
    /* update delay to follow master synchronisation source */
1114
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1115
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1116
        /* if video is slave, we try to correct big delays by
1117
           duplicating or deleting a frame */
1118
        diff = get_video_clock(is) - get_master_clock(is);
1119

    
1120
        /* skip or repeat frame. We take into account the
1121
           delay to compute the threshold. I still don't know
1122
           if it is the best guess */
1123
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1124
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1125
            if (diff <= -sync_threshold)
1126
                delay = 0;
1127
            else if (diff >= sync_threshold)
1128
                delay = 2 * delay;
1129
        }
1130
    }
1131

    
1132
    is->frame_timer += delay;
1133
    /* compute the REAL delay (we need to do that to avoid
1134
       long term errors */
1135
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1136
    if (actual_delay < 0.010) {
1137
        /* XXX: should skip picture */
1138
        actual_delay = 0.010;
1139
    }
1140

    
1141
#if defined(DEBUG_SYNC)
1142
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1143
            delay, actual_delay, frame_current_pts, -diff);
1144
#endif
1145

    
1146
    return actual_delay;
1147
}
1148

    
1149
/* called to display each frame */
1150
static void video_refresh_timer(void *opaque)
1151
{
1152
    VideoState *is = opaque;
1153
    VideoPicture *vp;
1154

    
1155
    SubPicture *sp, *sp2;
1156

    
1157
    if (is->video_st) {
1158
        if (is->pictq_size == 0) {
1159
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1160
        } else {
1161
            /* dequeue the picture */
1162
            vp = &is->pictq[is->pictq_rindex];
1163

    
1164
            /* update current video pts */
1165
            is->video_current_pts = vp->pts;
1166
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1167
            is->video_current_pos = vp->pos;
1168

    
1169
            if(is->subtitle_st) {
1170
                if (is->subtitle_stream_changed) {
1171
                    SDL_LockMutex(is->subpq_mutex);
1172

    
1173
                    while (is->subpq_size) {
1174
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1175

    
1176
                        /* update queue size and signal for next picture */
1177
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1178
                            is->subpq_rindex = 0;
1179

    
1180
                        is->subpq_size--;
1181
                    }
1182
                    is->subtitle_stream_changed = 0;
1183

    
1184
                    SDL_CondSignal(is->subpq_cond);
1185
                    SDL_UnlockMutex(is->subpq_mutex);
1186
                } else {
1187
                    if (is->subpq_size > 0) {
1188
                        sp = &is->subpq[is->subpq_rindex];
1189

    
1190
                        if (is->subpq_size > 1)
1191
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1192
                        else
1193
                            sp2 = NULL;
1194

    
1195
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1196
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1197
                        {
1198
                            free_subpicture(sp);
1199

    
1200
                            /* update queue size and signal for next picture */
1201
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1202
                                is->subpq_rindex = 0;
1203

    
1204
                            SDL_LockMutex(is->subpq_mutex);
1205
                            is->subpq_size--;
1206
                            SDL_CondSignal(is->subpq_cond);
1207
                            SDL_UnlockMutex(is->subpq_mutex);
1208
                        }
1209
                    }
1210
                }
1211
            }
1212

    
1213
            /* display picture */
1214
            video_display(is);
1215

    
1216
            /* update queue size and signal for next picture */
1217
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1218
                is->pictq_rindex = 0;
1219

    
1220
            SDL_LockMutex(is->pictq_mutex);
1221
            vp->timer_id= 0;
1222
            is->pictq_size--;
1223
            SDL_CondSignal(is->pictq_cond);
1224
            SDL_UnlockMutex(is->pictq_mutex);
1225
        }
1226
    } else if (is->audio_st) {
1227
        /* draw the next audio frame */
1228

    
1229
        schedule_refresh(is, 40);
1230

    
1231
        /* if only audio stream, then display the audio bars (better
1232
           than nothing, just to test the implementation */
1233

    
1234
        /* display picture */
1235
        video_display(is);
1236
    } else {
1237
        schedule_refresh(is, 100);
1238
    }
1239
    if (show_status) {
1240
        static int64_t last_time;
1241
        int64_t cur_time;
1242
        int aqsize, vqsize, sqsize;
1243
        double av_diff;
1244

    
1245
        cur_time = av_gettime();
1246
        if (!last_time || (cur_time - last_time) >= 30000) {
1247
            aqsize = 0;
1248
            vqsize = 0;
1249
            sqsize = 0;
1250
            if (is->audio_st)
1251
                aqsize = is->audioq.size;
1252
            if (is->video_st)
1253
                vqsize = is->videoq.size;
1254
            if (is->subtitle_st)
1255
                sqsize = is->subtitleq.size;
1256
            av_diff = 0;
1257
            if (is->audio_st && is->video_st)
1258
                av_diff = get_audio_clock(is) - get_video_clock(is);
1259
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1260
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1261
            fflush(stdout);
1262
            last_time = cur_time;
1263
        }
1264
    }
1265
}
1266

    
1267
/* allocate a picture (needs to do that in main thread to avoid
1268
   potential locking problems */
1269
static void alloc_picture(void *opaque)
1270
{
1271
    VideoState *is = opaque;
1272
    VideoPicture *vp;
1273

    
1274
    vp = &is->pictq[is->pictq_windex];
1275

    
1276
    if (vp->bmp)
1277
        SDL_FreeYUVOverlay(vp->bmp);
1278

    
1279
#if CONFIG_AVFILTER
1280
    if (vp->picref)
1281
        avfilter_unref_pic(vp->picref);
1282
    vp->picref = NULL;
1283

    
1284
    vp->width   = is->out_video_filter->inputs[0]->w;
1285
    vp->height  = is->out_video_filter->inputs[0]->h;
1286
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1287
#else
1288
    vp->width   = is->video_st->codec->width;
1289
    vp->height  = is->video_st->codec->height;
1290
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1291
#endif
1292

    
1293
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1294
                                   SDL_YV12_OVERLAY,
1295
                                   screen);
1296

    
1297
    SDL_LockMutex(is->pictq_mutex);
1298
    vp->allocated = 1;
1299
    SDL_CondSignal(is->pictq_cond);
1300
    SDL_UnlockMutex(is->pictq_mutex);
1301
}
1302

    
1303
/**
1304
 *
1305
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1306
 */
1307
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1308
{
1309
    VideoPicture *vp;
1310
    int dst_pix_fmt;
1311
#if CONFIG_AVFILTER
1312
    AVPicture pict_src;
1313
#endif
1314
    /* wait until we have space to put a new picture */
1315
    SDL_LockMutex(is->pictq_mutex);
1316
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1317
           !is->videoq.abort_request) {
1318
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1319
    }
1320
    SDL_UnlockMutex(is->pictq_mutex);
1321

    
1322
    if (is->videoq.abort_request)
1323
        return -1;
1324

    
1325
    vp = &is->pictq[is->pictq_windex];
1326

    
1327
    /* alloc or resize hardware picture buffer */
1328
    if (!vp->bmp ||
1329
#if CONFIG_AVFILTER
1330
        vp->width  != is->out_video_filter->inputs[0]->w ||
1331
        vp->height != is->out_video_filter->inputs[0]->h) {
1332
#else
1333
        vp->width != is->video_st->codec->width ||
1334
        vp->height != is->video_st->codec->height) {
1335
#endif
1336
        SDL_Event event;
1337

    
1338
        vp->allocated = 0;
1339

    
1340
        /* the allocation must be done in the main thread to avoid
1341
           locking problems */
1342
        event.type = FF_ALLOC_EVENT;
1343
        event.user.data1 = is;
1344
        SDL_PushEvent(&event);
1345

    
1346
        /* wait until the picture is allocated */
1347
        SDL_LockMutex(is->pictq_mutex);
1348
        while (!vp->allocated && !is->videoq.abort_request) {
1349
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1350
        }
1351
        SDL_UnlockMutex(is->pictq_mutex);
1352

    
1353
        if (is->videoq.abort_request)
1354
            return -1;
1355
    }
1356

    
1357
    /* if the frame is not skipped, then display it */
1358
    if (vp->bmp) {
1359
        AVPicture pict;
1360
#if CONFIG_AVFILTER
1361
        if(vp->picref)
1362
            avfilter_unref_pic(vp->picref);
1363
        vp->picref = src_frame->opaque;
1364
#endif
1365

    
1366
        /* get a pointer on the bitmap */
1367
        SDL_LockYUVOverlay (vp->bmp);
1368

    
1369
        dst_pix_fmt = PIX_FMT_YUV420P;
1370
        memset(&pict,0,sizeof(AVPicture));
1371
        pict.data[0] = vp->bmp->pixels[0];
1372
        pict.data[1] = vp->bmp->pixels[2];
1373
        pict.data[2] = vp->bmp->pixels[1];
1374

    
1375
        pict.linesize[0] = vp->bmp->pitches[0];
1376
        pict.linesize[1] = vp->bmp->pitches[2];
1377
        pict.linesize[2] = vp->bmp->pitches[1];
1378

    
1379
#if CONFIG_AVFILTER
1380
        pict_src.data[0] = src_frame->data[0];
1381
        pict_src.data[1] = src_frame->data[1];
1382
        pict_src.data[2] = src_frame->data[2];
1383

    
1384
        pict_src.linesize[0] = src_frame->linesize[0];
1385
        pict_src.linesize[1] = src_frame->linesize[1];
1386
        pict_src.linesize[2] = src_frame->linesize[2];
1387

    
1388
        //FIXME use direct rendering
1389
        av_picture_copy(&pict, &pict_src,
1390
                        vp->pix_fmt, vp->width, vp->height);
1391
#else
1392
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1396
        if (is->img_convert_ctx == NULL) {
1397
            fprintf(stderr, "Cannot initialize the conversion context\n");
1398
            exit(1);
1399
        }
1400
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401
                  0, vp->height, pict.data, pict.linesize);
1402
#endif
1403
        /* update the bitmap content */
1404
        SDL_UnlockYUVOverlay(vp->bmp);
1405

    
1406
        vp->pts = pts;
1407
        vp->pos = pos;
1408

    
1409
        /* now we can update the picture count */
1410
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411
            is->pictq_windex = 0;
1412
        SDL_LockMutex(is->pictq_mutex);
1413
        is->pictq_size++;
1414
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1415
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1416
        SDL_UnlockMutex(is->pictq_mutex);
1417
    }
1418
    return 0;
1419
}
1420

    
1421
/**
1422
 * compute the exact PTS for the picture if it is omitted in the stream
1423
 * @param pts1 the dts of the pkt / pts of the frame
1424
 */
1425
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1426
{
1427
    double frame_delay, pts;
1428

    
1429
    pts = pts1;
1430

    
1431
    if (pts != 0) {
1432
        /* update video clock with pts, if present */
1433
        is->video_clock = pts;
1434
    } else {
1435
        pts = is->video_clock;
1436
    }
1437
    /* update video clock for next frame */
1438
    frame_delay = av_q2d(is->video_st->codec->time_base);
1439
    /* for MPEG2, the frame can be repeated, so we update the
1440
       clock accordingly */
1441
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1442
    is->video_clock += frame_delay;
1443

    
1444
#if defined(DEBUG_SYNC) && 0
1445
    {
1446
        int ftype;
1447
        if (src_frame->pict_type == FF_B_TYPE)
1448
            ftype = 'B';
1449
        else if (src_frame->pict_type == FF_I_TYPE)
1450
            ftype = 'I';
1451
        else
1452
            ftype = 'P';
1453
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1454
               ftype, pts, pts1);
1455
    }
1456
#endif
1457
    return queue_picture(is, src_frame, pts, pos);
1458
}
1459

    
1460
static int get_video_frame(VideoState *is, AVFrame *frame, uint64_t *pts, AVPacket *pkt)
1461
{
1462
    int len1, got_picture, i;
1463

    
1464
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1465
            return -1;
1466

    
1467
        if(pkt->data == flush_pkt.data){
1468
            avcodec_flush_buffers(is->video_st->codec);
1469

    
1470
            SDL_LockMutex(is->pictq_mutex);
1471
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1472
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1473
                if(is->pictq[i].timer_id){
1474
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1475
                    is->pictq[i].timer_id=0;
1476
                    schedule_refresh(is, 1);
1477
                }
1478
            }
1479
            while (is->pictq_size && !is->videoq.abort_request) {
1480
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1481
            }
1482
            is->video_current_pos= -1;
1483
            SDL_UnlockMutex(is->pictq_mutex);
1484

    
1485
            is->last_dts_for_fault_detection=
1486
            is->last_pts_for_fault_detection= INT64_MIN;
1487
            is->frame_last_pts= AV_NOPTS_VALUE;
1488
            is->frame_last_delay = 0;
1489
            is->frame_timer = (double)av_gettime() / 1000000.0;
1490

    
1491
            return 0;
1492
        }
1493

    
1494
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1495
           this packet, if any */
1496
        is->video_st->codec->reordered_opaque= pkt->pts;
1497
        len1 = avcodec_decode_video2(is->video_st->codec,
1498
                                    frame, &got_picture,
1499
                                    pkt);
1500

    
1501
        if (got_picture) {
1502
            if(pkt->dts != AV_NOPTS_VALUE){
1503
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1504
                is->last_dts_for_fault_detection= pkt->dts;
1505
            }
1506
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1507
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1508
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1509
            }
1510
        }
1511

    
1512
        if(   (   decoder_reorder_pts==1
1513
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1514
               || pkt->dts == AV_NOPTS_VALUE)
1515
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1516
            *pts= frame->reordered_opaque;
1517
        else if(pkt->dts != AV_NOPTS_VALUE)
1518
            *pts= pkt->dts;
1519
        else
1520
            *pts= 0;
1521

    
1522
        /* put pts into units of 1/AV_TIME_BASE */
1523
        *pts = av_rescale_q(pts,is->video_st->time_base, AV_TIME_BASE_Q);
1524

    
1525
//            if (len1 < 0)
1526
//                break;
1527
    if (got_picture)
1528
        return 1;
1529
    return 0;
1530
}
1531

    
1532
#if CONFIG_AVFILTER
1533
typedef struct {
1534
    VideoState *is;
1535
    AVFrame *frame;
1536
} FilterPriv;
1537

    
1538
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1539
{
1540
    FilterPriv *priv = ctx->priv;
1541
    if(!opaque) return -1;
1542

    
1543
    priv->is = opaque;
1544
    priv->frame = avcodec_alloc_frame();
1545

    
1546
    return 0;
1547
}
1548

    
1549
static void input_uninit(AVFilterContext *ctx)
1550
{
1551
    FilterPriv *priv = ctx->priv;
1552
    av_free(priv->frame);
1553
}
1554

    
1555
static int input_request_frame(AVFilterLink *link)
1556
{
1557
    FilterPriv *priv = link->src->priv;
1558
    AVFilterPicRef *picref;
1559
    uint64_t pts = 0;
1560
    AVPacket pkt;
1561
    int ret;
1562

    
1563
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1564
        av_free_packet(&pkt);
1565
    if (ret < 0)
1566
        return -1;
1567

    
1568
    /* FIXME: until I figure out how to hook everything up to the codec
1569
     * right, we're just copying the entire frame. */
1570
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1571
    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1572
                    picref->pic->format, link->w, link->h);
1573
    av_free_packet(&pkt);
1574

    
1575
    picref->pts = pts;
1576
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1577
    avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1578
    avfilter_draw_slice(link, 0, link->h, 1);
1579
    avfilter_end_frame(link);
1580
    avfilter_unref_pic(picref);
1581

    
1582
    return 0;
1583
}
1584

    
1585
static int input_query_formats(AVFilterContext *ctx)
1586
{
1587
    FilterPriv *priv = ctx->priv;
1588
    enum PixelFormat pix_fmts[] = {
1589
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1590
    };
1591

    
1592
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1593
    return 0;
1594
}
1595

    
1596
static int input_config_props(AVFilterLink *link)
1597
{
1598
    FilterPriv *priv  = link->src->priv;
1599
    AVCodecContext *c = priv->is->video_st->codec;
1600

    
1601
    link->w = c->width;
1602
    link->h = c->height;
1603

    
1604
    return 0;
1605
}
1606

    
1607
static AVFilter input_filter =
1608
{
1609
    .name      = "ffplay_input",
1610

    
1611
    .priv_size = sizeof(FilterPriv),
1612

    
1613
    .init      = input_init,
1614
    .uninit    = input_uninit,
1615

    
1616
    .query_formats = input_query_formats,
1617

    
1618
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1619
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1620
                                    .type = CODEC_TYPE_VIDEO,
1621
                                    .request_frame = input_request_frame,
1622
                                    .config_props  = input_config_props, },
1623
                                  { .name = NULL }},
1624
};
1625

    
1626
static void output_end_frame(AVFilterLink *link)
1627
{
1628
}
1629

    
1630
static int output_query_formats(AVFilterContext *ctx)
1631
{
1632
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1633

    
1634
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1635
    return 0;
1636
}
1637

    
1638
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1639
                                    uint64_t *pts)
1640
{
1641
    AVFilterPicRef *pic;
1642

    
1643
    if(avfilter_request_frame(ctx->inputs[0]))
1644
        return -1;
1645
    if(!(pic = ctx->inputs[0]->cur_pic))
1646
        return -1;
1647
    ctx->inputs[0]->cur_pic = NULL;
1648

    
1649
    frame->opaque = pic;
1650
    *pts          = pic->pts;
1651

    
1652
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1653
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1654

    
1655
    return 1;
1656
}
1657

    
1658
static AVFilter output_filter =
1659
{
1660
    .name      = "ffplay_output",
1661

    
1662
    .query_formats = output_query_formats,
1663

    
1664
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1665
                                    .type          = CODEC_TYPE_VIDEO,
1666
                                    .end_frame     = output_end_frame,
1667
                                    .min_perms     = AV_PERM_READ, },
1668
                                  { .name = NULL }},
1669
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1670
};
1671
#endif  /* CONFIG_AVFILTER */
1672

    
1673
static int video_thread(void *arg)
1674
{
1675
    VideoState *is = arg;
1676
    AVFrame *frame= avcodec_alloc_frame();
1677
    uint64_t pts_int;
1678
    double pts;
1679
    int ret;
1680

    
1681
#if CONFIG_AVFILTER
1682
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1683
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1684
    graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1685

    
1686
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1687
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1688

    
1689
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1690
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1691

    
1692

    
1693
    if(vfilters) {
1694
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1695
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1696

    
1697
        outputs->name    = av_strdup("in");
1698
        outputs->filter  = filt_src;
1699
        outputs->pad_idx = 0;
1700
        outputs->next    = NULL;
1701

    
1702
        inputs->name    = av_strdup("out");
1703
        inputs->filter  = filt_out;
1704
        inputs->pad_idx = 0;
1705
        inputs->next    = NULL;
1706

    
1707
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1708
            goto the_end;
1709
        av_freep(&vfilters);
1710
    } else {
1711
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1712
    }
1713
    avfilter_graph_add_filter(graph, filt_src);
1714
    avfilter_graph_add_filter(graph, filt_out);
1715

    
1716
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1717
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1718
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1719

    
1720
    is->out_video_filter = filt_out;
1721
#endif
1722

    
1723
    for(;;) {
1724
#if !CONFIG_AVFILTER
1725
        AVPacket pkt;
1726
#endif
1727
        while (is->paused && !is->videoq.abort_request)
1728
            SDL_Delay(10);
1729
#if CONFIG_AVFILTER
1730
        ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1731
#else
1732
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1733
#endif
1734

    
1735
        if (ret < 0) goto the_end;
1736

    
1737
        if (!ret)
1738
            continue;
1739

    
1740
        pts  = pts_int;
1741
        pts /= AV_TIME_BASE;
1742

    
1743
#if CONFIG_AVFILTER
1744
        ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1745
#else
1746
        ret = output_picture2(is, frame, pts,  pkt->pos);
1747
        av_free_packet(&pkt);
1748
#endif
1749
        if (ret < 0)
1750
            goto the_end;
1751

    
1752
        if (step)
1753
            if (cur_stream)
1754
                stream_pause(cur_stream);
1755
    }
1756
 the_end:
1757
#if CONFIG_AVFILTER
1758
    avfilter_graph_destroy(graph);
1759
    av_freep(&graph);
1760
#endif
1761
    av_free(frame);
1762
    return 0;
1763
}
1764

    
1765
static int subtitle_thread(void *arg)
1766
{
1767
    VideoState *is = arg;
1768
    SubPicture *sp;
1769
    AVPacket pkt1, *pkt = &pkt1;
1770
    int len1, got_subtitle;
1771
    double pts;
1772
    int i, j;
1773
    int r, g, b, y, u, v, a;
1774

    
1775
    for(;;) {
1776
        while (is->paused && !is->subtitleq.abort_request) {
1777
            SDL_Delay(10);
1778
        }
1779
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1780
            break;
1781

    
1782
        if(pkt->data == flush_pkt.data){
1783
            avcodec_flush_buffers(is->subtitle_st->codec);
1784
            continue;
1785
        }
1786
        SDL_LockMutex(is->subpq_mutex);
1787
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1788
               !is->subtitleq.abort_request) {
1789
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1790
        }
1791
        SDL_UnlockMutex(is->subpq_mutex);
1792

    
1793
        if (is->subtitleq.abort_request)
1794
            goto the_end;
1795

    
1796
        sp = &is->subpq[is->subpq_windex];
1797

    
1798
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1799
           this packet, if any */
1800
        pts = 0;
1801
        if (pkt->pts != AV_NOPTS_VALUE)
1802
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1803

    
1804
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1805
                                    &sp->sub, &got_subtitle,
1806
                                    pkt);
1807
//            if (len1 < 0)
1808
//                break;
1809
        if (got_subtitle && sp->sub.format == 0) {
1810
            sp->pts = pts;
1811

    
1812
            for (i = 0; i < sp->sub.num_rects; i++)
1813
            {
1814
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1815
                {
1816
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1817
                    y = RGB_TO_Y_CCIR(r, g, b);
1818
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1819
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1820
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1821
                }
1822
            }
1823

    
1824
            /* now we can update the picture count */
1825
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1826
                is->subpq_windex = 0;
1827
            SDL_LockMutex(is->subpq_mutex);
1828
            is->subpq_size++;
1829
            SDL_UnlockMutex(is->subpq_mutex);
1830
        }
1831
        av_free_packet(pkt);
1832
//        if (step)
1833
//            if (cur_stream)
1834
//                stream_pause(cur_stream);
1835
    }
1836
 the_end:
1837
    return 0;
1838
}
1839

    
1840
/* copy samples for viewing in editor window */
1841
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1842
{
1843
    int size, len, channels;
1844

    
1845
    channels = is->audio_st->codec->channels;
1846

    
1847
    size = samples_size / sizeof(short);
1848
    while (size > 0) {
1849
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1850
        if (len > size)
1851
            len = size;
1852
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1853
        samples += len;
1854
        is->sample_array_index += len;
1855
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1856
            is->sample_array_index = 0;
1857
        size -= len;
1858
    }
1859
}
1860

    
1861
/* return the new audio buffer size (samples can be added or deleted
1862
   to get better sync if video or external master clock) */
1863
static int synchronize_audio(VideoState *is, short *samples,
1864
                             int samples_size1, double pts)
1865
{
1866
    int n, samples_size;
1867
    double ref_clock;
1868

    
1869
    n = 2 * is->audio_st->codec->channels;
1870
    samples_size = samples_size1;
1871

    
1872
    /* if not master, then we try to remove or add samples to correct the clock */
1873
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1874
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1875
        double diff, avg_diff;
1876
        int wanted_size, min_size, max_size, nb_samples;
1877

    
1878
        ref_clock = get_master_clock(is);
1879
        diff = get_audio_clock(is) - ref_clock;
1880

    
1881
        if (diff < AV_NOSYNC_THRESHOLD) {
1882
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1883
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1884
                /* not enough measures to have a correct estimate */
1885
                is->audio_diff_avg_count++;
1886
            } else {
1887
                /* estimate the A-V difference */
1888
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1889

    
1890
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1891
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1892
                    nb_samples = samples_size / n;
1893

    
1894
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1895
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1896
                    if (wanted_size < min_size)
1897
                        wanted_size = min_size;
1898
                    else if (wanted_size > max_size)
1899
                        wanted_size = max_size;
1900

    
1901
                    /* add or remove samples to correction the synchro */
1902
                    if (wanted_size < samples_size) {
1903
                        /* remove samples */
1904
                        samples_size = wanted_size;
1905
                    } else if (wanted_size > samples_size) {
1906
                        uint8_t *samples_end, *q;
1907
                        int nb;
1908

    
1909
                        /* add samples */
1910
                        nb = (samples_size - wanted_size);
1911
                        samples_end = (uint8_t *)samples + samples_size - n;
1912
                        q = samples_end + n;
1913
                        while (nb > 0) {
1914
                            memcpy(q, samples_end, n);
1915
                            q += n;
1916
                            nb -= n;
1917
                        }
1918
                        samples_size = wanted_size;
1919
                    }
1920
                }
1921
#if 0
1922
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1923
                       diff, avg_diff, samples_size - samples_size1,
1924
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1925
#endif
1926
            }
1927
        } else {
1928
            /* too big difference : may be initial PTS errors, so
1929
               reset A-V filter */
1930
            is->audio_diff_avg_count = 0;
1931
            is->audio_diff_cum = 0;
1932
        }
1933
    }
1934

    
1935
    return samples_size;
1936
}
1937

    
1938
/* decode one audio frame and returns its uncompressed size */
1939
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1940
{
1941
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1942
    AVPacket *pkt = &is->audio_pkt;
1943
    AVCodecContext *dec= is->audio_st->codec;
1944
    int n, len1, data_size;
1945
    double pts;
1946

    
1947
    for(;;) {
1948
        /* NOTE: the audio packet can contain several frames */
1949
        while (pkt_temp->size > 0) {
1950
            data_size = sizeof(is->audio_buf1);
1951
            len1 = avcodec_decode_audio3(dec,
1952
                                        (int16_t *)is->audio_buf1, &data_size,
1953
                                        pkt_temp);
1954
            if (len1 < 0) {
1955
                /* if error, we skip the frame */
1956
                pkt_temp->size = 0;
1957
                break;
1958
            }
1959

    
1960
            pkt_temp->data += len1;
1961
            pkt_temp->size -= len1;
1962
            if (data_size <= 0)
1963
                continue;
1964

    
1965
            if (dec->sample_fmt != is->audio_src_fmt) {
1966
                if (is->reformat_ctx)
1967
                    av_audio_convert_free(is->reformat_ctx);
1968
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1969
                                                         dec->sample_fmt, 1, NULL, 0);
1970
                if (!is->reformat_ctx) {
1971
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1972
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1973
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1974
                        break;
1975
                }
1976
                is->audio_src_fmt= dec->sample_fmt;
1977
            }
1978

    
1979
            if (is->reformat_ctx) {
1980
                const void *ibuf[6]= {is->audio_buf1};
1981
                void *obuf[6]= {is->audio_buf2};
1982
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1983
                int ostride[6]= {2};
1984
                int len= data_size/istride[0];
1985
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1986
                    printf("av_audio_convert() failed\n");
1987
                    break;
1988
                }
1989
                is->audio_buf= is->audio_buf2;
1990
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1991
                          remove this legacy cruft */
1992
                data_size= len*2;
1993
            }else{
1994
                is->audio_buf= is->audio_buf1;
1995
            }
1996

    
1997
            /* if no pts, then compute it */
1998
            pts = is->audio_clock;
1999
            *pts_ptr = pts;
2000
            n = 2 * dec->channels;
2001
            is->audio_clock += (double)data_size /
2002
                (double)(n * dec->sample_rate);
2003
#if defined(DEBUG_SYNC)
2004
            {
2005
                static double last_clock;
2006
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2007
                       is->audio_clock - last_clock,
2008
                       is->audio_clock, pts);
2009
                last_clock = is->audio_clock;
2010
            }
2011
#endif
2012
            return data_size;
2013
        }
2014

    
2015
        /* free the current packet */
2016
        if (pkt->data)
2017
            av_free_packet(pkt);
2018

    
2019
        if (is->paused || is->audioq.abort_request) {
2020
            return -1;
2021
        }
2022

    
2023
        /* read next packet */
2024
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2025
            return -1;
2026
        if(pkt->data == flush_pkt.data){
2027
            avcodec_flush_buffers(dec);
2028
            continue;
2029
        }
2030

    
2031
        pkt_temp->data = pkt->data;
2032
        pkt_temp->size = pkt->size;
2033

    
2034
        /* if update the audio clock with the pts */
2035
        if (pkt->pts != AV_NOPTS_VALUE) {
2036
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2037
        }
2038
    }
2039
}
2040

    
2041
/* get the current audio output buffer size, in samples. With SDL, we
2042
   cannot have a precise information */
2043
static int audio_write_get_buf_size(VideoState *is)
2044
{
2045
    return is->audio_buf_size - is->audio_buf_index;
2046
}
2047

    
2048

    
2049
/* prepare a new audio buffer */
2050
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2051
{
2052
    VideoState *is = opaque;
2053
    int audio_size, len1;
2054
    double pts;
2055

    
2056
    audio_callback_time = av_gettime();
2057

    
2058
    while (len > 0) {
2059
        if (is->audio_buf_index >= is->audio_buf_size) {
2060
           audio_size = audio_decode_frame(is, &pts);
2061
           if (audio_size < 0) {
2062
                /* if error, just output silence */
2063
               is->audio_buf = is->audio_buf1;
2064
               is->audio_buf_size = 1024;
2065
               memset(is->audio_buf, 0, is->audio_buf_size);
2066
           } else {
2067
               if (is->show_audio)
2068
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2069
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2070
                                              pts);
2071
               is->audio_buf_size = audio_size;
2072
           }
2073
           is->audio_buf_index = 0;
2074
        }
2075
        len1 = is->audio_buf_size - is->audio_buf_index;
2076
        if (len1 > len)
2077
            len1 = len;
2078
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2079
        len -= len1;
2080
        stream += len1;
2081
        is->audio_buf_index += len1;
2082
    }
2083
}
2084

    
2085
/* open a given stream. Return 0 if OK */
2086
static int stream_component_open(VideoState *is, int stream_index)
2087
{
2088
    AVFormatContext *ic = is->ic;
2089
    AVCodecContext *avctx;
2090
    AVCodec *codec;
2091
    SDL_AudioSpec wanted_spec, spec;
2092

    
2093
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2094
        return -1;
2095
    avctx = ic->streams[stream_index]->codec;
2096

    
2097
    /* prepare audio output */
2098
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2099
        if (avctx->channels > 0) {
2100
            avctx->request_channels = FFMIN(2, avctx->channels);
2101
        } else {
2102
            avctx->request_channels = 2;
2103
        }
2104
    }
2105

    
2106
    codec = avcodec_find_decoder(avctx->codec_id);
2107
    avctx->debug_mv = debug_mv;
2108
    avctx->debug = debug;
2109
    avctx->workaround_bugs = workaround_bugs;
2110
    avctx->lowres = lowres;
2111
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2112
    avctx->idct_algo= idct;
2113
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2114
    avctx->skip_frame= skip_frame;
2115
    avctx->skip_idct= skip_idct;
2116
    avctx->skip_loop_filter= skip_loop_filter;
2117
    avctx->error_recognition= error_recognition;
2118
    avctx->error_concealment= error_concealment;
2119
    avcodec_thread_init(avctx, thread_count);
2120

    
2121
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2122

    
2123
    if (!codec ||
2124
        avcodec_open(avctx, codec) < 0)
2125
        return -1;
2126

    
2127
    /* prepare audio output */
2128
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2129
        wanted_spec.freq = avctx->sample_rate;
2130
        wanted_spec.format = AUDIO_S16SYS;
2131
        wanted_spec.channels = avctx->channels;
2132
        wanted_spec.silence = 0;
2133
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2134
        wanted_spec.callback = sdl_audio_callback;
2135
        wanted_spec.userdata = is;
2136
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2137
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2138
            return -1;
2139
        }
2140
        is->audio_hw_buf_size = spec.size;
2141
        is->audio_src_fmt= SAMPLE_FMT_S16;
2142
    }
2143

    
2144
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2145
    switch(avctx->codec_type) {
2146
    case CODEC_TYPE_AUDIO:
2147
        is->audio_stream = stream_index;
2148
        is->audio_st = ic->streams[stream_index];
2149
        is->audio_buf_size = 0;
2150
        is->audio_buf_index = 0;
2151

    
2152
        /* init averaging filter */
2153
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2154
        is->audio_diff_avg_count = 0;
2155
        /* since we do not have a precise anough audio fifo fullness,
2156
           we correct audio sync only if larger than this threshold */
2157
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2158

    
2159
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2160
        packet_queue_init(&is->audioq);
2161
        SDL_PauseAudio(0);
2162
        break;
2163
    case CODEC_TYPE_VIDEO:
2164
        is->video_stream = stream_index;
2165
        is->video_st = ic->streams[stream_index];
2166

    
2167
//        is->video_current_pts_time = av_gettime();
2168

    
2169
        packet_queue_init(&is->videoq);
2170
        is->video_tid = SDL_CreateThread(video_thread, is);
2171
        break;
2172
    case CODEC_TYPE_SUBTITLE:
2173
        is->subtitle_stream = stream_index;
2174
        is->subtitle_st = ic->streams[stream_index];
2175
        packet_queue_init(&is->subtitleq);
2176

    
2177
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2178
        break;
2179
    default:
2180
        break;
2181
    }
2182
    return 0;
2183
}
2184

    
2185
static void stream_component_close(VideoState *is, int stream_index)
2186
{
2187
    AVFormatContext *ic = is->ic;
2188
    AVCodecContext *avctx;
2189

    
2190
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2191
        return;
2192
    avctx = ic->streams[stream_index]->codec;
2193

    
2194
    switch(avctx->codec_type) {
2195
    case CODEC_TYPE_AUDIO:
2196
        packet_queue_abort(&is->audioq);
2197

    
2198
        SDL_CloseAudio();
2199

    
2200
        packet_queue_end(&is->audioq);
2201
        if (is->reformat_ctx)
2202
            av_audio_convert_free(is->reformat_ctx);
2203
        is->reformat_ctx = NULL;
2204
        break;
2205
    case CODEC_TYPE_VIDEO:
2206
        packet_queue_abort(&is->videoq);
2207

    
2208
        /* note: we also signal this mutex to make sure we deblock the
2209
           video thread in all cases */
2210
        SDL_LockMutex(is->pictq_mutex);
2211
        SDL_CondSignal(is->pictq_cond);
2212
        SDL_UnlockMutex(is->pictq_mutex);
2213

    
2214
        SDL_WaitThread(is->video_tid, NULL);
2215

    
2216
        packet_queue_end(&is->videoq);
2217
        break;
2218
    case CODEC_TYPE_SUBTITLE:
2219
        packet_queue_abort(&is->subtitleq);
2220

    
2221
        /* note: we also signal this mutex to make sure we deblock the
2222
           video thread in all cases */
2223
        SDL_LockMutex(is->subpq_mutex);
2224
        is->subtitle_stream_changed = 1;
2225

    
2226
        SDL_CondSignal(is->subpq_cond);
2227
        SDL_UnlockMutex(is->subpq_mutex);
2228

    
2229
        SDL_WaitThread(is->subtitle_tid, NULL);
2230

    
2231
        packet_queue_end(&is->subtitleq);
2232
        break;
2233
    default:
2234
        break;
2235
    }
2236

    
2237
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2238
    avcodec_close(avctx);
2239
    switch(avctx->codec_type) {
2240
    case CODEC_TYPE_AUDIO:
2241
        is->audio_st = NULL;
2242
        is->audio_stream = -1;
2243
        break;
2244
    case CODEC_TYPE_VIDEO:
2245
        is->video_st = NULL;
2246
        is->video_stream = -1;
2247
        break;
2248
    case CODEC_TYPE_SUBTITLE:
2249
        is->subtitle_st = NULL;
2250
        is->subtitle_stream = -1;
2251
        break;
2252
    default:
2253
        break;
2254
    }
2255
}
2256

    
2257
/* since we have only one decoding thread, we can use a global
2258
   variable instead of a thread local variable */
2259
static VideoState *global_video_state;
2260

    
2261
static int decode_interrupt_cb(void)
2262
{
2263
    return (global_video_state && global_video_state->abort_request);
2264
}
2265

    
2266
/* this thread gets the stream from the disk or the network */
2267
static int decode_thread(void *arg)
2268
{
2269
    VideoState *is = arg;
2270
    AVFormatContext *ic;
2271
    int err, i, ret;
2272
    int st_index[CODEC_TYPE_NB];
2273
    int st_count[CODEC_TYPE_NB]={0};
2274
    int st_best_packet_count[CODEC_TYPE_NB];
2275
    AVPacket pkt1, *pkt = &pkt1;
2276
    AVFormatParameters params, *ap = &params;
2277
    int eof=0;
2278

    
2279
    ic = avformat_alloc_context();
2280

    
2281
    memset(st_index, -1, sizeof(st_index));
2282
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2283
    is->video_stream = -1;
2284
    is->audio_stream = -1;
2285
    is->subtitle_stream = -1;
2286

    
2287
    global_video_state = is;
2288
    url_set_interrupt_cb(decode_interrupt_cb);
2289

    
2290
    memset(ap, 0, sizeof(*ap));
2291

    
2292
    ap->prealloced_context = 1;
2293
    ap->width = frame_width;
2294
    ap->height= frame_height;
2295
    ap->time_base= (AVRational){1, 25};
2296
    ap->pix_fmt = frame_pix_fmt;
2297

    
2298
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2299

    
2300
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2301
    if (err < 0) {
2302
        print_error(is->filename, err);
2303
        ret = -1;
2304
        goto fail;
2305
    }
2306
    is->ic = ic;
2307

    
2308
    if(genpts)
2309
        ic->flags |= AVFMT_FLAG_GENPTS;
2310

    
2311
    err = av_find_stream_info(ic);
2312
    if (err < 0) {
2313
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2314
        ret = -1;
2315
        goto fail;
2316
    }
2317
    if(ic->pb)
2318
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2319

    
2320
    if(seek_by_bytes<0)
2321
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2322

    
2323
    /* if seeking requested, we execute it */
2324
    if (start_time != AV_NOPTS_VALUE) {
2325
        int64_t timestamp;
2326

    
2327
        timestamp = start_time;
2328
        /* add the stream start time */
2329
        if (ic->start_time != AV_NOPTS_VALUE)
2330
            timestamp += ic->start_time;
2331
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2332
        if (ret < 0) {
2333
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2334
                    is->filename, (double)timestamp / AV_TIME_BASE);
2335
        }
2336
    }
2337

    
2338
    for(i = 0; i < ic->nb_streams; i++) {
2339
        AVStream *st= ic->streams[i];
2340
        AVCodecContext *avctx = st->codec;
2341
        ic->streams[i]->discard = AVDISCARD_ALL;
2342
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2343
            continue;
2344
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2345
            continue;
2346

    
2347
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2348
            continue;
2349
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2350

    
2351
        switch(avctx->codec_type) {
2352
        case CODEC_TYPE_AUDIO:
2353
            if (!audio_disable)
2354
                st_index[CODEC_TYPE_AUDIO] = i;
2355
            break;
2356
        case CODEC_TYPE_VIDEO:
2357
        case CODEC_TYPE_SUBTITLE:
2358
            if (!video_disable)
2359
                st_index[avctx->codec_type] = i;
2360
            break;
2361
        default:
2362
            break;
2363
        }
2364
    }
2365
    if (show_status) {
2366
        dump_format(ic, 0, is->filename, 0);
2367
    }
2368

    
2369
    /* open the streams */
2370
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2371
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2372
    }
2373

    
2374
    ret=-1;
2375
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2376
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2377
    }
2378
    if(ret<0) {
2379
        /* add the refresh timer to draw the picture */
2380
        schedule_refresh(is, 40);
2381

    
2382
        if (!display_disable)
2383
            is->show_audio = 2;
2384
    }
2385

    
2386
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2387
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2388
    }
2389

    
2390
    if (is->video_stream < 0 && is->audio_stream < 0) {
2391
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2392
        ret = -1;
2393
        goto fail;
2394
    }
2395

    
2396
    for(;;) {
2397
        if (is->abort_request)
2398
            break;
2399
        if (is->paused != is->last_paused) {
2400
            is->last_paused = is->paused;
2401
            if (is->paused)
2402
                is->read_pause_return= av_read_pause(ic);
2403
            else
2404
                av_read_play(ic);
2405
        }
2406
#if CONFIG_RTSP_DEMUXER
2407
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2408
            /* wait 10 ms to avoid trying to get another packet */
2409
            /* XXX: horrible */
2410
            SDL_Delay(10);
2411
            continue;
2412
        }
2413
#endif
2414
        if (is->seek_req) {
2415
            int64_t seek_target= is->seek_pos;
2416
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2417
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2418
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2419
//      of the seek_pos/seek_rel variables
2420

    
2421
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2422
            if (ret < 0) {
2423
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2424
            }else{
2425
                if (is->audio_stream >= 0) {
2426
                    packet_queue_flush(&is->audioq);
2427
                    packet_queue_put(&is->audioq, &flush_pkt);
2428
                }
2429
                if (is->subtitle_stream >= 0) {
2430
                    packet_queue_flush(&is->subtitleq);
2431
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2432
                }
2433
                if (is->video_stream >= 0) {
2434
                    packet_queue_flush(&is->videoq);
2435
                    packet_queue_put(&is->videoq, &flush_pkt);
2436
                }
2437
            }
2438
            is->seek_req = 0;
2439
            eof= 0;
2440
        }
2441

    
2442
        /* if the queue are full, no need to read more */
2443
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2444
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2445
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2446
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2447
            /* wait 10 ms */
2448
            SDL_Delay(10);
2449
            continue;
2450
        }
2451
        if(url_feof(ic->pb) || eof) {
2452
            if(is->video_stream >= 0){
2453
                av_init_packet(pkt);
2454
                pkt->data=NULL;
2455
                pkt->size=0;
2456
                pkt->stream_index= is->video_stream;
2457
                packet_queue_put(&is->videoq, pkt);
2458
            }
2459
            SDL_Delay(10);
2460
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2461
                ret=AVERROR_EOF;
2462
                goto fail;
2463
            }
2464
            continue;
2465
        }
2466
        ret = av_read_frame(ic, pkt);
2467
        if (ret < 0) {
2468
            if (ret == AVERROR_EOF)
2469
                eof=1;
2470
            if (url_ferror(ic->pb))
2471
                break;
2472
            SDL_Delay(100); /* wait for user event */
2473
            continue;
2474
        }
2475
        if (pkt->stream_index == is->audio_stream) {
2476
            packet_queue_put(&is->audioq, pkt);
2477
        } else if (pkt->stream_index == is->video_stream) {
2478
            packet_queue_put(&is->videoq, pkt);
2479
        } else if (pkt->stream_index == is->subtitle_stream) {
2480
            packet_queue_put(&is->subtitleq, pkt);
2481
        } else {
2482
            av_free_packet(pkt);
2483
        }
2484
    }
2485
    /* wait until the end */
2486
    while (!is->abort_request) {
2487
        SDL_Delay(100);
2488
    }
2489

    
2490
    ret = 0;
2491
 fail:
2492
    /* disable interrupting */
2493
    global_video_state = NULL;
2494

    
2495
    /* close each stream */
2496
    if (is->audio_stream >= 0)
2497
        stream_component_close(is, is->audio_stream);
2498
    if (is->video_stream >= 0)
2499
        stream_component_close(is, is->video_stream);
2500
    if (is->subtitle_stream >= 0)
2501
        stream_component_close(is, is->subtitle_stream);
2502
    if (is->ic) {
2503
        av_close_input_file(is->ic);
2504
        is->ic = NULL; /* safety */
2505
    }
2506
    url_set_interrupt_cb(NULL);
2507

    
2508
    if (ret != 0) {
2509
        SDL_Event event;
2510

    
2511
        event.type = FF_QUIT_EVENT;
2512
        event.user.data1 = is;
2513
        SDL_PushEvent(&event);
2514
    }
2515
    return 0;
2516
}
2517

    
2518
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2519
{
2520
    VideoState *is;
2521

    
2522
    is = av_mallocz(sizeof(VideoState));
2523
    if (!is)
2524
        return NULL;
2525
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2526
    is->iformat = iformat;
2527
    is->ytop = 0;
2528
    is->xleft = 0;
2529

    
2530
    /* start video display */
2531
    is->pictq_mutex = SDL_CreateMutex();
2532
    is->pictq_cond = SDL_CreateCond();
2533

    
2534
    is->subpq_mutex = SDL_CreateMutex();
2535
    is->subpq_cond = SDL_CreateCond();
2536

    
2537
    is->av_sync_type = av_sync_type;
2538
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2539
    if (!is->parse_tid) {
2540
        av_free(is);
2541
        return NULL;
2542
    }
2543
    return is;
2544
}
2545

    
2546
static void stream_close(VideoState *is)
2547
{
2548
    VideoPicture *vp;
2549
    int i;
2550
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2551
    is->abort_request = 1;
2552
    SDL_WaitThread(is->parse_tid, NULL);
2553

    
2554
    /* free all pictures */
2555
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2556
        vp = &is->pictq[i];
2557
#if CONFIG_AVFILTER
2558
        if (vp->picref) {
2559
            avfilter_unref_pic(vp->picref);
2560
            vp->picref = NULL;
2561
        }
2562
#endif
2563
        if (vp->bmp) {
2564
            SDL_FreeYUVOverlay(vp->bmp);
2565
            vp->bmp = NULL;
2566
        }
2567
    }
2568
    SDL_DestroyMutex(is->pictq_mutex);
2569
    SDL_DestroyCond(is->pictq_cond);
2570
    SDL_DestroyMutex(is->subpq_mutex);
2571
    SDL_DestroyCond(is->subpq_cond);
2572
#if !CONFIG_AVFILTER
2573
    if (is->img_convert_ctx)
2574
        sws_freeContext(is->img_convert_ctx);
2575
#endif
2576
    av_free(is);
2577
}
2578

    
2579
static void stream_cycle_channel(VideoState *is, int codec_type)
2580
{
2581
    AVFormatContext *ic = is->ic;
2582
    int start_index, stream_index;
2583
    AVStream *st;
2584

    
2585
    if (codec_type == CODEC_TYPE_VIDEO)
2586
        start_index = is->video_stream;
2587
    else if (codec_type == CODEC_TYPE_AUDIO)
2588
        start_index = is->audio_stream;
2589
    else
2590
        start_index = is->subtitle_stream;
2591
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2592
        return;
2593
    stream_index = start_index;
2594
    for(;;) {
2595
        if (++stream_index >= is->ic->nb_streams)
2596
        {
2597
            if (codec_type == CODEC_TYPE_SUBTITLE)
2598
            {
2599
                stream_index = -1;
2600
                goto the_end;
2601
            } else
2602
                stream_index = 0;
2603
        }
2604
        if (stream_index == start_index)
2605
            return;
2606
        st = ic->streams[stream_index];
2607
        if (st->codec->codec_type == codec_type) {
2608
            /* check that parameters are OK */
2609
            switch(codec_type) {
2610
            case CODEC_TYPE_AUDIO:
2611
                if (st->codec->sample_rate != 0 &&
2612
                    st->codec->channels != 0)
2613
                    goto the_end;
2614
                break;
2615
            case CODEC_TYPE_VIDEO:
2616
            case CODEC_TYPE_SUBTITLE:
2617
                goto the_end;
2618
            default:
2619
                break;
2620
            }
2621
        }
2622
    }
2623
 the_end:
2624
    stream_component_close(is, start_index);
2625
    stream_component_open(is, stream_index);
2626
}
2627

    
2628

    
2629
static void toggle_full_screen(void)
2630
{
2631
    is_full_screen = !is_full_screen;
2632
    if (!fs_screen_width) {
2633
        /* use default SDL method */
2634
//        SDL_WM_ToggleFullScreen(screen);
2635
    }
2636
    video_open(cur_stream);
2637
}
2638

    
2639
static void toggle_pause(void)
2640
{
2641
    if (cur_stream)
2642
        stream_pause(cur_stream);
2643
    step = 0;
2644
}
2645

    
2646
static void step_to_next_frame(void)
2647
{
2648
    if (cur_stream) {
2649
        /* if the stream is paused unpause it, then step */
2650
        if (cur_stream->paused)
2651
            stream_pause(cur_stream);
2652
    }
2653
    step = 1;
2654
}
2655

    
2656
static void do_exit(void)
2657
{
2658
    int i;
2659
    if (cur_stream) {
2660
        stream_close(cur_stream);
2661
        cur_stream = NULL;
2662
    }
2663
    for (i = 0; i < CODEC_TYPE_NB; i++)
2664
        av_free(avcodec_opts[i]);
2665
    av_free(avformat_opts);
2666
    av_free(sws_opts);
2667
#if CONFIG_AVFILTER
2668
    avfilter_uninit();
2669
#endif
2670
    if (show_status)
2671
        printf("\n");
2672
    SDL_Quit();
2673
    exit(0);
2674
}
2675

    
2676
static void toggle_audio_display(void)
2677
{
2678
    if (cur_stream) {
2679
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2680
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2681
        fill_rectangle(screen,
2682
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2683
                    bgcolor);
2684
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2685
    }
2686
}
2687

    
2688
/* handle an event sent by the GUI */
2689
static void event_loop(void)
2690
{
2691
    SDL_Event event;
2692
    double incr, pos, frac;
2693

    
2694
    for(;;) {
2695
        double x;
2696
        SDL_WaitEvent(&event);
2697
        switch(event.type) {
2698
        case SDL_KEYDOWN:
2699
            switch(event.key.keysym.sym) {
2700
            case SDLK_ESCAPE:
2701
            case SDLK_q:
2702
                do_exit();
2703
                break;
2704
            case SDLK_f:
2705
                toggle_full_screen();
2706
                break;
2707
            case SDLK_p:
2708
            case SDLK_SPACE:
2709
                toggle_pause();
2710
                break;
2711
            case SDLK_s: //S: Step to next frame
2712
                step_to_next_frame();
2713
                break;
2714
            case SDLK_a:
2715
                if (cur_stream)
2716
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2717
                break;
2718
            case SDLK_v:
2719
                if (cur_stream)
2720
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2721
                break;
2722
            case SDLK_t:
2723
                if (cur_stream)
2724
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2725
                break;
2726
            case SDLK_w:
2727
                toggle_audio_display();
2728
                break;
2729
            case SDLK_LEFT:
2730
                incr = -10.0;
2731
                goto do_seek;
2732
            case SDLK_RIGHT:
2733
                incr = 10.0;
2734
                goto do_seek;
2735
            case SDLK_UP:
2736
                incr = 60.0;
2737
                goto do_seek;
2738
            case SDLK_DOWN:
2739
                incr = -60.0;
2740
            do_seek:
2741
                if (cur_stream) {
2742
                    if (seek_by_bytes) {
2743
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2744
                            pos= cur_stream->video_current_pos;
2745
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2746
                            pos= cur_stream->audio_pkt.pos;
2747
                        }else
2748
                            pos = url_ftell(cur_stream->ic->pb);
2749
                        if (cur_stream->ic->bit_rate)
2750
                            incr *= cur_stream->ic->bit_rate / 8.0;
2751
                        else
2752
                            incr *= 180000.0;
2753
                        pos += incr;
2754
                        stream_seek(cur_stream, pos, incr, 1);
2755
                    } else {
2756
                        pos = get_master_clock(cur_stream);
2757
                        pos += incr;
2758
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2759
                    }
2760
                }
2761
                break;
2762
            default:
2763
                break;
2764
            }
2765
            break;
2766
        case SDL_MOUSEBUTTONDOWN:
2767
        case SDL_MOUSEMOTION:
2768
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2769
                x= event.button.x;
2770
            }else{
2771
                if(event.motion.state != SDL_PRESSED)
2772
                    break;
2773
                x= event.motion.x;
2774
            }
2775
            if (cur_stream) {
2776
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2777
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2778
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2779
                }else{
2780
                    int64_t ts;
2781
                    int ns, hh, mm, ss;
2782
                    int tns, thh, tmm, tss;
2783
                    tns = cur_stream->ic->duration/1000000LL;
2784
                    thh = tns/3600;
2785
                    tmm = (tns%3600)/60;
2786
                    tss = (tns%60);
2787
                    frac = x/cur_stream->width;
2788
                    ns = frac*tns;
2789
                    hh = ns/3600;
2790
                    mm = (ns%3600)/60;
2791
                    ss = (ns%60);
2792
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2793
                            hh, mm, ss, thh, tmm, tss);
2794
                    ts = frac*cur_stream->ic->duration;
2795
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2796
                        ts += cur_stream->ic->start_time;
2797
                    stream_seek(cur_stream, ts, 0, 0);
2798
                }
2799
            }
2800
            break;
2801
        case SDL_VIDEORESIZE:
2802
            if (cur_stream) {
2803
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2804
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2805
                screen_width = cur_stream->width = event.resize.w;
2806
                screen_height= cur_stream->height= event.resize.h;
2807
            }
2808
            break;
2809
        case SDL_QUIT:
2810
        case FF_QUIT_EVENT:
2811
            do_exit();
2812
            break;
2813
        case FF_ALLOC_EVENT:
2814
            video_open(event.user.data1);
2815
            alloc_picture(event.user.data1);
2816
            break;
2817
        case FF_REFRESH_EVENT:
2818
            video_refresh_timer(event.user.data1);
2819
            break;
2820
        default:
2821
            break;
2822
        }
2823
    }
2824
}
2825

    
2826
static void opt_frame_size(const char *arg)
2827
{
2828
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2829
        fprintf(stderr, "Incorrect frame size\n");
2830
        exit(1);
2831
    }
2832
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2833
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2834
        exit(1);
2835
    }
2836
}
2837

    
2838
static int opt_width(const char *opt, const char *arg)
2839
{
2840
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2841
    return 0;
2842
}
2843

    
2844
static int opt_height(const char *opt, const char *arg)
2845
{
2846
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2847
    return 0;
2848
}
2849

    
2850
static void opt_format(const char *arg)
2851
{
2852
    file_iformat = av_find_input_format(arg);
2853
    if (!file_iformat) {
2854
        fprintf(stderr, "Unknown input format: %s\n", arg);
2855
        exit(1);
2856
    }
2857
}
2858

    
2859
static void opt_frame_pix_fmt(const char *arg)
2860
{
2861
    frame_pix_fmt = av_get_pix_fmt(arg);
2862
}
2863

    
2864
static int opt_sync(const char *opt, const char *arg)
2865
{
2866
    if (!strcmp(arg, "audio"))
2867
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2868
    else if (!strcmp(arg, "video"))
2869
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2870
    else if (!strcmp(arg, "ext"))
2871
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2872
    else {
2873
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2874
        exit(1);
2875
    }
2876
    return 0;
2877
}
2878

    
2879
static int opt_seek(const char *opt, const char *arg)
2880
{
2881
    start_time = parse_time_or_die(opt, arg, 1);
2882
    return 0;
2883
}
2884

    
2885
static int opt_debug(const char *opt, const char *arg)
2886
{
2887
    av_log_set_level(99);
2888
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2889
    return 0;
2890
}
2891

    
2892
static int opt_vismv(const char *opt, const char *arg)
2893
{
2894
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2895
    return 0;
2896
}
2897

    
2898
static int opt_thread_count(const char *opt, const char *arg)
2899
{
2900
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2901
#if !HAVE_THREADS
2902
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2903
#endif
2904
    return 0;
2905
}
2906

    
2907
static const OptionDef options[] = {
2908
#include "cmdutils_common_opts.h"
2909
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2910
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2911
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2912
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2913
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2914
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2915
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2916
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2917
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2918
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2919
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2920
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2921
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2922
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2923
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2924
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2925
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2926
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2927
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2928
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2929
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2930
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2931
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2932
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2933
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2934
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2935
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2936
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2937
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2938
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2939
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2940
#if CONFIG_AVFILTER
2941
    { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2942
#endif
2943
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2944
    { NULL, },
2945
};
2946

    
2947
static void show_usage(void)
2948
{
2949
    printf("Simple media player\n");
2950
    printf("usage: ffplay [options] input_file\n");
2951
    printf("\n");
2952
}
2953

    
2954
static void show_help(void)
2955
{
2956
    show_usage();
2957
    show_help_options(options, "Main options:\n",
2958
                      OPT_EXPERT, 0);
2959
    show_help_options(options, "\nAdvanced options:\n",
2960
                      OPT_EXPERT, OPT_EXPERT);
2961
    printf("\nWhile playing:\n"
2962
           "q, ESC              quit\n"
2963
           "f                   toggle full screen\n"
2964
           "p, SPC              pause\n"
2965
           "a                   cycle audio channel\n"
2966
           "v                   cycle video channel\n"
2967
           "t                   cycle subtitle channel\n"
2968
           "w                   show audio waves\n"
2969
           "left/right          seek backward/forward 10 seconds\n"
2970
           "down/up             seek backward/forward 1 minute\n"
2971
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2972
           );
2973
}
2974

    
2975
static void opt_input_file(const char *filename)
2976
{
2977
    if (input_filename) {
2978
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2979
                filename, input_filename);
2980
        exit(1);
2981
    }
2982
    if (!strcmp(filename, "-"))
2983
        filename = "pipe:";
2984
    input_filename = filename;
2985
}
2986

    
2987
/* Called from the main */
2988
int main(int argc, char **argv)
2989
{
2990
    int flags, i;
2991

    
2992
    /* register all codecs, demux and protocols */
2993
    avcodec_register_all();
2994
    avdevice_register_all();
2995
#if CONFIG_AVFILTER
2996
    avfilter_register_all();
2997
#endif
2998
    av_register_all();
2999

    
3000
    for(i=0; i<CODEC_TYPE_NB; i++){
3001
        avcodec_opts[i]= avcodec_alloc_context2(i);
3002
    }
3003
    avformat_opts = avformat_alloc_context();
3004
#if !CONFIG_AVFILTER
3005
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3006
#endif
3007

    
3008
    show_banner();
3009

    
3010
    parse_options(argc, argv, options, opt_input_file);
3011

    
3012
    if (!input_filename) {
3013
        show_usage();
3014
        fprintf(stderr, "An input file must be specified\n");
3015
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3016
        exit(1);
3017
    }
3018

    
3019
    if (display_disable) {
3020
        video_disable = 1;
3021
    }
3022
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3023
#if !defined(__MINGW32__) && !defined(__APPLE__)
3024
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3025
#endif
3026
    if (SDL_Init (flags)) {
3027
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3028
        exit(1);
3029
    }
3030

    
3031
    if (!display_disable) {
3032
#if HAVE_SDL_VIDEO_SIZE
3033
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3034
        fs_screen_width = vi->current_w;
3035
        fs_screen_height = vi->current_h;
3036
#endif
3037
    }
3038

    
3039
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3040
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3041
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3042

    
3043
    av_init_packet(&flush_pkt);
3044
    flush_pkt.data= "FLUSH";
3045

    
3046
    cur_stream = stream_open(input_filename, file_iformat);
3047

    
3048
    event_loop();
3049

    
3050
    /* never returns */
3051

    
3052
    return 0;
3053
}