Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 60f198a7

History | View | Annotate | Download (95.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <inttypes.h>
24
#include <math.h>
25
#include <limits.h>
26
#include "libavutil/avstring.h"
27
#include "libavutil/pixdesc.h"
28
#include "libavformat/avformat.h"
29
#include "libavdevice/avdevice.h"
30
#include "libswscale/swscale.h"
31
#include "libavcodec/audioconvert.h"
32
#include "libavcodec/colorspace.h"
33
#include "libavcodec/opt.h"
34
#include "libavcodec/avfft.h"
35

    
36
#if CONFIG_AVFILTER
37
# include "libavfilter/avfilter.h"
38
# include "libavfilter/avfiltergraph.h"
39
# include "libavfilter/graphparser.h"
40
#endif
41

    
42
#include "cmdutils.h"
43

    
44
#include <SDL.h>
45
#include <SDL_thread.h>
46

    
47
#ifdef __MINGW32__
48
#undef main /* We don't want SDL to override our main() */
49
#endif
50

    
51
#include <unistd.h>
52
#include <assert.h>
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
#define FRAME_SKIP_FACTOR 0.05
73

    
74
/* maximum audio speed change to get correct sync */
75
#define SAMPLE_CORRECTION_PERCENT_MAX 10
76

    
77
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78
#define AUDIO_DIFF_AVG_NB   20
79

    
80
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81
#define SAMPLE_ARRAY_SIZE (2*65536)
82

    
83
#if !CONFIG_AVFILTER
84
static int sws_flags = SWS_BICUBIC;
85
#endif
86

    
87
typedef struct PacketQueue {
88
    AVPacketList *first_pkt, *last_pkt;
89
    int nb_packets;
90
    int size;
91
    int abort_request;
92
    SDL_mutex *mutex;
93
    SDL_cond *cond;
94
} PacketQueue;
95

    
96
#define VIDEO_PICTURE_QUEUE_SIZE 2
97
#define SUBPICTURE_QUEUE_SIZE 4
98

    
99
typedef struct VideoPicture {
100
    double pts;                                  ///<presentation time stamp for this picture
101
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102
    int64_t pos;                                 ///<byte position in file
103
    SDL_Overlay *bmp;
104
    int width, height; /* source height & width */
105
    int allocated;
106
    enum PixelFormat pix_fmt;
107

    
108
#if CONFIG_AVFILTER
109
    AVFilterPicRef *picref;
110
#endif
111
} VideoPicture;
112

    
113
typedef struct SubPicture {
114
    double pts; /* presentation time stamp for this picture */
115
    AVSubtitle sub;
116
} SubPicture;
117

    
118
enum {
119
    AV_SYNC_AUDIO_MASTER, /* default choice */
120
    AV_SYNC_VIDEO_MASTER,
121
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122
};
123

    
124
typedef struct VideoState {
125
    SDL_Thread *parse_tid;
126
    SDL_Thread *video_tid;
127
    SDL_Thread *refresh_tid;
128
    AVInputFormat *iformat;
129
    int no_background;
130
    int abort_request;
131
    int paused;
132
    int last_paused;
133
    int seek_req;
134
    int seek_flags;
135
    int64_t seek_pos;
136
    int64_t seek_rel;
137
    int read_pause_return;
138
    AVFormatContext *ic;
139
    int dtg_active_format;
140

    
141
    int audio_stream;
142

    
143
    int av_sync_type;
144
    double external_clock; /* external clock base */
145
    int64_t external_clock_time;
146

    
147
    double audio_clock;
148
    double audio_diff_cum; /* used for AV difference average computation */
149
    double audio_diff_avg_coef;
150
    double audio_diff_threshold;
151
    int audio_diff_avg_count;
152
    AVStream *audio_st;
153
    PacketQueue audioq;
154
    int audio_hw_buf_size;
155
    /* samples output by the codec. we reserve more space for avsync
156
       compensation */
157
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159
    uint8_t *audio_buf;
160
    unsigned int audio_buf_size; /* in bytes */
161
    int audio_buf_index; /* in bytes */
162
    AVPacket audio_pkt_temp;
163
    AVPacket audio_pkt;
164
    enum SampleFormat audio_src_fmt;
165
    AVAudioConvert *reformat_ctx;
166

    
167
    int show_audio; /* if true, display audio samples */
168
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
169
    int sample_array_index;
170
    int last_i_start;
171
    RDFTContext *rdft;
172
    int rdft_bits;
173
    int xpos;
174

    
175
    SDL_Thread *subtitle_tid;
176
    int subtitle_stream;
177
    int subtitle_stream_changed;
178
    AVStream *subtitle_st;
179
    PacketQueue subtitleq;
180
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181
    int subpq_size, subpq_rindex, subpq_windex;
182
    SDL_mutex *subpq_mutex;
183
    SDL_cond *subpq_cond;
184

    
185
    double frame_timer;
186
    double frame_last_pts;
187
    double frame_last_delay;
188
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189
    int video_stream;
190
    AVStream *video_st;
191
    PacketQueue videoq;
192
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194
    int64_t video_current_pos;                   ///<current displayed file pos
195
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196
    int pictq_size, pictq_rindex, pictq_windex;
197
    SDL_mutex *pictq_mutex;
198
    SDL_cond *pictq_cond;
199
#if !CONFIG_AVFILTER
200
    struct SwsContext *img_convert_ctx;
201
#endif
202

    
203
    //    QETimer *video_timer;
204
    char filename[1024];
205
    int width, height, xleft, ytop;
206

    
207
    int64_t faulty_pts;
208
    int64_t faulty_dts;
209
    int64_t last_dts_for_fault_detection;
210
    int64_t last_pts_for_fault_detection;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[CODEC_TYPE_NB]={
238
    [CODEC_TYPE_AUDIO]=-1,
239
    [CODEC_TYPE_VIDEO]=-1,
240
    [CODEC_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int loop=1;
264
static int framedrop=1;
265

    
266
static int rdftspeed=20;
267
#if CONFIG_AVFILTER
268
static char *vfilters = NULL;
269
#endif
270

    
271
/* current context */
272
static int is_full_screen;
273
static VideoState *cur_stream;
274
static int64_t audio_callback_time;
275

    
276
static AVPacket flush_pkt;
277

    
278
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
279
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281

    
282
static SDL_Surface *screen;
283

    
284
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285

    
286
/* packet queue handling */
287
static void packet_queue_init(PacketQueue *q)
288
{
289
    memset(q, 0, sizeof(PacketQueue));
290
    q->mutex = SDL_CreateMutex();
291
    q->cond = SDL_CreateCond();
292
    packet_queue_put(q, &flush_pkt);
293
}
294

    
295
static void packet_queue_flush(PacketQueue *q)
296
{
297
    AVPacketList *pkt, *pkt1;
298

    
299
    SDL_LockMutex(q->mutex);
300
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301
        pkt1 = pkt->next;
302
        av_free_packet(&pkt->pkt);
303
        av_freep(&pkt);
304
    }
305
    q->last_pkt = NULL;
306
    q->first_pkt = NULL;
307
    q->nb_packets = 0;
308
    q->size = 0;
309
    SDL_UnlockMutex(q->mutex);
310
}
311

    
312
static void packet_queue_end(PacketQueue *q)
313
{
314
    packet_queue_flush(q);
315
    SDL_DestroyMutex(q->mutex);
316
    SDL_DestroyCond(q->cond);
317
}
318

    
319
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320
{
321
    AVPacketList *pkt1;
322

    
323
    /* duplicate the packet */
324
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325
        return -1;
326

    
327
    pkt1 = av_malloc(sizeof(AVPacketList));
328
    if (!pkt1)
329
        return -1;
330
    pkt1->pkt = *pkt;
331
    pkt1->next = NULL;
332

    
333

    
334
    SDL_LockMutex(q->mutex);
335

    
336
    if (!q->last_pkt)
337

    
338
        q->first_pkt = pkt1;
339
    else
340
        q->last_pkt->next = pkt1;
341
    q->last_pkt = pkt1;
342
    q->nb_packets++;
343
    q->size += pkt1->pkt.size + sizeof(*pkt1);
344
    /* XXX: should duplicate packet data in DV case */
345
    SDL_CondSignal(q->cond);
346

    
347
    SDL_UnlockMutex(q->mutex);
348
    return 0;
349
}
350

    
351
static void packet_queue_abort(PacketQueue *q)
352
{
353
    SDL_LockMutex(q->mutex);
354

    
355
    q->abort_request = 1;
356

    
357
    SDL_CondSignal(q->cond);
358

    
359
    SDL_UnlockMutex(q->mutex);
360
}
361

    
362
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364
{
365
    AVPacketList *pkt1;
366
    int ret;
367

    
368
    SDL_LockMutex(q->mutex);
369

    
370
    for(;;) {
371
        if (q->abort_request) {
372
            ret = -1;
373
            break;
374
        }
375

    
376
        pkt1 = q->first_pkt;
377
        if (pkt1) {
378
            q->first_pkt = pkt1->next;
379
            if (!q->first_pkt)
380
                q->last_pkt = NULL;
381
            q->nb_packets--;
382
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
383
            *pkt = pkt1->pkt;
384
            av_free(pkt1);
385
            ret = 1;
386
            break;
387
        } else if (!block) {
388
            ret = 0;
389
            break;
390
        } else {
391
            SDL_CondWait(q->cond, q->mutex);
392
        }
393
    }
394
    SDL_UnlockMutex(q->mutex);
395
    return ret;
396
}
397

    
398
static inline void fill_rectangle(SDL_Surface *screen,
399
                                  int x, int y, int w, int h, int color)
400
{
401
    SDL_Rect rect;
402
    rect.x = x;
403
    rect.y = y;
404
    rect.w = w;
405
    rect.h = h;
406
    SDL_FillRect(screen, &rect, color);
407
}
408

    
409
#if 0
410
/* draw only the border of a rectangle */
411
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412
{
413
    int w1, w2, h1, h2;
414

415
    /* fill the background */
416
    w1 = x;
417
    if (w1 < 0)
418
        w1 = 0;
419
    w2 = s->width - (x + w);
420
    if (w2 < 0)
421
        w2 = 0;
422
    h1 = y;
423
    if (h1 < 0)
424
        h1 = 0;
425
    h2 = s->height - (y + h);
426
    if (h2 < 0)
427
        h2 = 0;
428
    fill_rectangle(screen,
429
                   s->xleft, s->ytop,
430
                   w1, s->height,
431
                   color);
432
    fill_rectangle(screen,
433
                   s->xleft + s->width - w2, s->ytop,
434
                   w2, s->height,
435
                   color);
436
    fill_rectangle(screen,
437
                   s->xleft + w1, s->ytop,
438
                   s->width - w1 - w2, h1,
439
                   color);
440
    fill_rectangle(screen,
441
                   s->xleft + w1, s->ytop + s->height - h2,
442
                   s->width - w1 - w2, h2,
443
                   color);
444
}
445
#endif
446

    
447
#define ALPHA_BLEND(a, oldp, newp, s)\
448
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449

    
450
#define RGBA_IN(r, g, b, a, s)\
451
{\
452
    unsigned int v = ((const uint32_t *)(s))[0];\
453
    a = (v >> 24) & 0xff;\
454
    r = (v >> 16) & 0xff;\
455
    g = (v >> 8) & 0xff;\
456
    b = v & 0xff;\
457
}
458

    
459
#define YUVA_IN(y, u, v, a, s, pal)\
460
{\
461
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462
    a = (val >> 24) & 0xff;\
463
    y = (val >> 16) & 0xff;\
464
    u = (val >> 8) & 0xff;\
465
    v = val & 0xff;\
466
}
467

    
468
#define YUVA_OUT(d, y, u, v, a)\
469
{\
470
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471
}
472

    
473

    
474
#define BPP 1
475

    
476
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477
{
478
    int wrap, wrap3, width2, skip2;
479
    int y, u, v, a, u1, v1, a1, w, h;
480
    uint8_t *lum, *cb, *cr;
481
    const uint8_t *p;
482
    const uint32_t *pal;
483
    int dstx, dsty, dstw, dsth;
484

    
485
    dstw = av_clip(rect->w, 0, imgw);
486
    dsth = av_clip(rect->h, 0, imgh);
487
    dstx = av_clip(rect->x, 0, imgw - dstw);
488
    dsty = av_clip(rect->y, 0, imgh - dsth);
489
    lum = dst->data[0] + dsty * dst->linesize[0];
490
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492

    
493
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494
    skip2 = dstx >> 1;
495
    wrap = dst->linesize[0];
496
    wrap3 = rect->pict.linesize[0];
497
    p = rect->pict.data[0];
498
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499

    
500
    if (dsty & 1) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510
            cb++;
511
            cr++;
512
            lum++;
513
            p += BPP;
514
        }
515
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 = u;
518
            v1 = v;
519
            a1 = a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521

    
522
            YUVA_IN(y, u, v, a, p + BPP, pal);
523
            u1 += u;
524
            v1 += v;
525
            a1 += a;
526
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529
            cb++;
530
            cr++;
531
            p += 2 * BPP;
532
            lum += 2;
533
        }
534
        if (w) {
535
            YUVA_IN(y, u, v, a, p, pal);
536
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539
            p++;
540
            lum++;
541
        }
542
        p += wrap3 - dstw * BPP;
543
        lum += wrap - dstw - dstx;
544
        cb += dst->linesize[1] - width2 - skip2;
545
        cr += dst->linesize[2] - width2 - skip2;
546
    }
547
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548
        lum += dstx;
549
        cb += skip2;
550
        cr += skip2;
551

    
552
        if (dstx & 1) {
553
            YUVA_IN(y, u, v, a, p, pal);
554
            u1 = u;
555
            v1 = v;
556
            a1 = a;
557
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558
            p += wrap3;
559
            lum += wrap;
560
            YUVA_IN(y, u, v, a, p, pal);
561
            u1 += u;
562
            v1 += v;
563
            a1 += a;
564
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567
            cb++;
568
            cr++;
569
            p += -wrap3 + BPP;
570
            lum += -wrap + 1;
571
        }
572
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573
            YUVA_IN(y, u, v, a, p, pal);
574
            u1 = u;
575
            v1 = v;
576
            a1 = a;
577
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578

    
579
            YUVA_IN(y, u, v, a, p + BPP, pal);
580
            u1 += u;
581
            v1 += v;
582
            a1 += a;
583
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584
            p += wrap3;
585
            lum += wrap;
586

    
587
            YUVA_IN(y, u, v, a, p, pal);
588
            u1 += u;
589
            v1 += v;
590
            a1 += a;
591
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592

    
593
            YUVA_IN(y, u, v, a, p + BPP, pal);
594
            u1 += u;
595
            v1 += v;
596
            a1 += a;
597
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598

    
599
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601

    
602
            cb++;
603
            cr++;
604
            p += -wrap3 + 2 * BPP;
605
            lum += -wrap + 2;
606
        }
607
        if (w) {
608
            YUVA_IN(y, u, v, a, p, pal);
609
            u1 = u;
610
            v1 = v;
611
            a1 = a;
612
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
            p += wrap3;
614
            lum += wrap;
615
            YUVA_IN(y, u, v, a, p, pal);
616
            u1 += u;
617
            v1 += v;
618
            a1 += a;
619
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622
            cb++;
623
            cr++;
624
            p += -wrap3 + BPP;
625
            lum += -wrap + 1;
626
        }
627
        p += wrap3 + (wrap3 - dstw * BPP);
628
        lum += wrap + (wrap - dstw - dstx);
629
        cb += dst->linesize[1] - width2 - skip2;
630
        cr += dst->linesize[2] - width2 - skip2;
631
    }
632
    /* handle odd height */
633
    if (h) {
634
        lum += dstx;
635
        cb += skip2;
636
        cr += skip2;
637

    
638
        if (dstx & 1) {
639
            YUVA_IN(y, u, v, a, p, pal);
640
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643
            cb++;
644
            cr++;
645
            lum++;
646
            p += BPP;
647
        }
648
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649
            YUVA_IN(y, u, v, a, p, pal);
650
            u1 = u;
651
            v1 = v;
652
            a1 = a;
653
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654

    
655
            YUVA_IN(y, u, v, a, p + BPP, pal);
656
            u1 += u;
657
            v1 += v;
658
            a1 += a;
659
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662
            cb++;
663
            cr++;
664
            p += 2 * BPP;
665
            lum += 2;
666
        }
667
        if (w) {
668
            YUVA_IN(y, u, v, a, p, pal);
669
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672
        }
673
    }
674
}
675

    
676
static void free_subpicture(SubPicture *sp)
677
{
678
    int i;
679

    
680
    for (i = 0; i < sp->sub.num_rects; i++)
681
    {
682
        av_freep(&sp->sub.rects[i]->pict.data[0]);
683
        av_freep(&sp->sub.rects[i]->pict.data[1]);
684
        av_freep(&sp->sub.rects[i]);
685
    }
686

    
687
    av_free(sp->sub.rects);
688

    
689
    memset(&sp->sub, 0, sizeof(AVSubtitle));
690
}
691

    
692
static void video_image_display(VideoState *is)
693
{
694
    VideoPicture *vp;
695
    SubPicture *sp;
696
    AVPicture pict;
697
    float aspect_ratio;
698
    int width, height, x, y;
699
    SDL_Rect rect;
700
    int i;
701

    
702
    vp = &is->pictq[is->pictq_rindex];
703
    if (vp->bmp) {
704
#if CONFIG_AVFILTER
705
         if (vp->picref->pixel_aspect.num == 0)
706
             aspect_ratio = 0;
707
         else
708
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
709
#else
710

    
711
        /* XXX: use variable in the frame */
712
        if (is->video_st->sample_aspect_ratio.num)
713
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
714
        else if (is->video_st->codec->sample_aspect_ratio.num)
715
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
716
        else
717
            aspect_ratio = 0;
718
#endif
719
        if (aspect_ratio <= 0.0)
720
            aspect_ratio = 1.0;
721
        aspect_ratio *= (float)vp->width / (float)vp->height;
722
        /* if an active format is indicated, then it overrides the
723
           mpeg format */
724
#if 0
725
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
726
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
727
            printf("dtg_active_format=%d\n", is->dtg_active_format);
728
        }
729
#endif
730
#if 0
731
        switch(is->video_st->codec->dtg_active_format) {
732
        case FF_DTG_AFD_SAME:
733
        default:
734
            /* nothing to do */
735
            break;
736
        case FF_DTG_AFD_4_3:
737
            aspect_ratio = 4.0 / 3.0;
738
            break;
739
        case FF_DTG_AFD_16_9:
740
            aspect_ratio = 16.0 / 9.0;
741
            break;
742
        case FF_DTG_AFD_14_9:
743
            aspect_ratio = 14.0 / 9.0;
744
            break;
745
        case FF_DTG_AFD_4_3_SP_14_9:
746
            aspect_ratio = 14.0 / 9.0;
747
            break;
748
        case FF_DTG_AFD_16_9_SP_14_9:
749
            aspect_ratio = 14.0 / 9.0;
750
            break;
751
        case FF_DTG_AFD_SP_4_3:
752
            aspect_ratio = 4.0 / 3.0;
753
            break;
754
        }
755
#endif
756

    
757
        if (is->subtitle_st)
758
        {
759
            if (is->subpq_size > 0)
760
            {
761
                sp = &is->subpq[is->subpq_rindex];
762

    
763
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
764
                {
765
                    SDL_LockYUVOverlay (vp->bmp);
766

    
767
                    pict.data[0] = vp->bmp->pixels[0];
768
                    pict.data[1] = vp->bmp->pixels[2];
769
                    pict.data[2] = vp->bmp->pixels[1];
770

    
771
                    pict.linesize[0] = vp->bmp->pitches[0];
772
                    pict.linesize[1] = vp->bmp->pitches[2];
773
                    pict.linesize[2] = vp->bmp->pitches[1];
774

    
775
                    for (i = 0; i < sp->sub.num_rects; i++)
776
                        blend_subrect(&pict, sp->sub.rects[i],
777
                                      vp->bmp->w, vp->bmp->h);
778

    
779
                    SDL_UnlockYUVOverlay (vp->bmp);
780
                }
781
            }
782
        }
783

    
784

    
785
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
786
        height = is->height;
787
        width = ((int)rint(height * aspect_ratio)) & ~1;
788
        if (width > is->width) {
789
            width = is->width;
790
            height = ((int)rint(width / aspect_ratio)) & ~1;
791
        }
792
        x = (is->width - width) / 2;
793
        y = (is->height - height) / 2;
794
        if (!is->no_background) {
795
            /* fill the background */
796
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
797
        } else {
798
            is->no_background = 0;
799
        }
800
        rect.x = is->xleft + x;
801
        rect.y = is->ytop  + y;
802
        rect.w = width;
803
        rect.h = height;
804
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
805
    } else {
806
#if 0
807
        fill_rectangle(screen,
808
                       is->xleft, is->ytop, is->width, is->height,
809
                       QERGB(0x00, 0x00, 0x00));
810
#endif
811
    }
812
}
813

    
814
static inline int compute_mod(int a, int b)
815
{
816
    a = a % b;
817
    if (a >= 0)
818
        return a;
819
    else
820
        return a + b;
821
}
822

    
823
static void video_audio_display(VideoState *s)
824
{
825
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
826
    int ch, channels, h, h2, bgcolor, fgcolor;
827
    int16_t time_diff;
828
    int rdft_bits, nb_freq;
829

    
830
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
831
        ;
832
    nb_freq= 1<<(rdft_bits-1);
833

    
834
    /* compute display index : center on currently output samples */
835
    channels = s->audio_st->codec->channels;
836
    nb_display_channels = channels;
837
    if (!s->paused) {
838
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
839
        n = 2 * channels;
840
        delay = audio_write_get_buf_size(s);
841
        delay /= n;
842

    
843
        /* to be more precise, we take into account the time spent since
844
           the last buffer computation */
845
        if (audio_callback_time) {
846
            time_diff = av_gettime() - audio_callback_time;
847
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
848
        }
849

    
850
        delay += 2*data_used;
851
        if (delay < data_used)
852
            delay = data_used;
853

    
854
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
855
        if(s->show_audio==1){
856
            h= INT_MIN;
857
            for(i=0; i<1000; i+=channels){
858
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
859
                int a= s->sample_array[idx];
860
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
861
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
862
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
863
                int score= a-d;
864
                if(h<score && (b^c)<0){
865
                    h= score;
866
                    i_start= idx;
867
                }
868
            }
869
        }
870

    
871
        s->last_i_start = i_start;
872
    } else {
873
        i_start = s->last_i_start;
874
    }
875

    
876
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
877
    if(s->show_audio==1){
878
        fill_rectangle(screen,
879
                       s->xleft, s->ytop, s->width, s->height,
880
                       bgcolor);
881

    
882
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
883

    
884
        /* total height for one channel */
885
        h = s->height / nb_display_channels;
886
        /* graph height / 2 */
887
        h2 = (h * 9) / 20;
888
        for(ch = 0;ch < nb_display_channels; ch++) {
889
            i = i_start + ch;
890
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
891
            for(x = 0; x < s->width; x++) {
892
                y = (s->sample_array[i] * h2) >> 15;
893
                if (y < 0) {
894
                    y = -y;
895
                    ys = y1 - y;
896
                } else {
897
                    ys = y1;
898
                }
899
                fill_rectangle(screen,
900
                               s->xleft + x, ys, 1, y,
901
                               fgcolor);
902
                i += channels;
903
                if (i >= SAMPLE_ARRAY_SIZE)
904
                    i -= SAMPLE_ARRAY_SIZE;
905
            }
906
        }
907

    
908
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
909

    
910
        for(ch = 1;ch < nb_display_channels; ch++) {
911
            y = s->ytop + ch * h;
912
            fill_rectangle(screen,
913
                           s->xleft, y, s->width, 1,
914
                           fgcolor);
915
        }
916
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
917
    }else{
918
        nb_display_channels= FFMIN(nb_display_channels, 2);
919
        if(rdft_bits != s->rdft_bits){
920
            av_rdft_end(s->rdft);
921
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
922
            s->rdft_bits= rdft_bits;
923
        }
924
        {
925
            FFTSample data[2][2*nb_freq];
926
            for(ch = 0;ch < nb_display_channels; ch++) {
927
                i = i_start + ch;
928
                for(x = 0; x < 2*nb_freq; x++) {
929
                    double w= (x-nb_freq)*(1.0/nb_freq);
930
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
931
                    i += channels;
932
                    if (i >= SAMPLE_ARRAY_SIZE)
933
                        i -= SAMPLE_ARRAY_SIZE;
934
                }
935
                av_rdft_calc(s->rdft, data[ch]);
936
            }
937
            //least efficient way to do this, we should of course directly access it but its more than fast enough
938
            for(y=0; y<s->height; y++){
939
                double w= 1/sqrt(nb_freq);
940
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
941
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
942
                a= FFMIN(a,255);
943
                b= FFMIN(b,255);
944
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
945

    
946
                fill_rectangle(screen,
947
                            s->xpos, s->height-y, 1, 1,
948
                            fgcolor);
949
            }
950
        }
951
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
952
        s->xpos++;
953
        if(s->xpos >= s->width)
954
            s->xpos= s->xleft;
955
    }
956
}
957

    
958
static int video_open(VideoState *is){
959
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
960
    int w,h;
961

    
962
    if(is_full_screen) flags |= SDL_FULLSCREEN;
963
    else               flags |= SDL_RESIZABLE;
964

    
965
    if (is_full_screen && fs_screen_width) {
966
        w = fs_screen_width;
967
        h = fs_screen_height;
968
    } else if(!is_full_screen && screen_width){
969
        w = screen_width;
970
        h = screen_height;
971
#if CONFIG_AVFILTER
972
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
973
        w = is->out_video_filter->inputs[0]->w;
974
        h = is->out_video_filter->inputs[0]->h;
975
#else
976
    }else if (is->video_st && is->video_st->codec->width){
977
        w = is->video_st->codec->width;
978
        h = is->video_st->codec->height;
979
#endif
980
    } else {
981
        w = 640;
982
        h = 480;
983
    }
984
    if(screen && is->width == screen->w && screen->w == w
985
       && is->height== screen->h && screen->h == h)
986
        return 0;
987

    
988
#ifndef __APPLE__
989
    screen = SDL_SetVideoMode(w, h, 0, flags);
990
#else
991
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
992
    screen = SDL_SetVideoMode(w, h, 24, flags);
993
#endif
994
    if (!screen) {
995
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
996
        return -1;
997
    }
998
    if (!window_title)
999
        window_title = input_filename;
1000
    SDL_WM_SetCaption(window_title, window_title);
1001

    
1002
    is->width = screen->w;
1003
    is->height = screen->h;
1004

    
1005
    return 0;
1006
}
1007

    
1008
/* display the current picture, if any */
1009
static void video_display(VideoState *is)
1010
{
1011
    if(!screen)
1012
        video_open(cur_stream);
1013
    if (is->audio_st && is->show_audio)
1014
        video_audio_display(is);
1015
    else if (is->video_st)
1016
        video_image_display(is);
1017
}
1018

    
1019
static int refresh_thread(void *opaque)
1020
{
1021
    VideoState *is= opaque;
1022
    while(!is->abort_request){
1023
    SDL_Event event;
1024
    event.type = FF_REFRESH_EVENT;
1025
    event.user.data1 = opaque;
1026
        if(!is->refresh){
1027
            is->refresh=1;
1028
    SDL_PushEvent(&event);
1029
        }
1030
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1031
    }
1032
    return 0;
1033
}
1034

    
1035
/* get the current audio clock value */
1036
static double get_audio_clock(VideoState *is)
1037
{
1038
    double pts;
1039
    int hw_buf_size, bytes_per_sec;
1040
    pts = is->audio_clock;
1041
    hw_buf_size = audio_write_get_buf_size(is);
1042
    bytes_per_sec = 0;
1043
    if (is->audio_st) {
1044
        bytes_per_sec = is->audio_st->codec->sample_rate *
1045
            2 * is->audio_st->codec->channels;
1046
    }
1047
    if (bytes_per_sec)
1048
        pts -= (double)hw_buf_size / bytes_per_sec;
1049
    return pts;
1050
}
1051

    
1052
/* get the current video clock value */
1053
static double get_video_clock(VideoState *is)
1054
{
1055
    if (is->paused) {
1056
        return is->video_current_pts;
1057
    } else {
1058
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1059
    }
1060
}
1061

    
1062
/* get the current external clock value */
1063
static double get_external_clock(VideoState *is)
1064
{
1065
    int64_t ti;
1066
    ti = av_gettime();
1067
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1068
}
1069

    
1070
/* get the current master clock value */
1071
static double get_master_clock(VideoState *is)
1072
{
1073
    double val;
1074

    
1075
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1076
        if (is->video_st)
1077
            val = get_video_clock(is);
1078
        else
1079
            val = get_audio_clock(is);
1080
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1081
        if (is->audio_st)
1082
            val = get_audio_clock(is);
1083
        else
1084
            val = get_video_clock(is);
1085
    } else {
1086
        val = get_external_clock(is);
1087
    }
1088
    return val;
1089
}
1090

    
1091
/* seek in the stream */
1092
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1093
{
1094
    if (!is->seek_req) {
1095
        is->seek_pos = pos;
1096
        is->seek_rel = rel;
1097
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1098
        if (seek_by_bytes)
1099
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1100
        is->seek_req = 1;
1101
    }
1102
}
1103

    
1104
/* pause or resume the video */
1105
static void stream_pause(VideoState *is)
1106
{
1107
    if (is->paused) {
1108
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1109
        if(is->read_pause_return != AVERROR(ENOSYS)){
1110
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1111
        }
1112
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1113
    }
1114
    is->paused = !is->paused;
1115
}
1116

    
1117
static double compute_target_time(double frame_current_pts, VideoState *is)
1118
{
1119
    double delay, sync_threshold, diff;
1120

    
1121
    /* compute nominal delay */
1122
    delay = frame_current_pts - is->frame_last_pts;
1123
    if (delay <= 0 || delay >= 10.0) {
1124
        /* if incorrect delay, use previous one */
1125
        delay = is->frame_last_delay;
1126
    } else {
1127
        is->frame_last_delay = delay;
1128
    }
1129
    is->frame_last_pts = frame_current_pts;
1130

    
1131
    /* update delay to follow master synchronisation source */
1132
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1133
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1134
        /* if video is slave, we try to correct big delays by
1135
           duplicating or deleting a frame */
1136
        diff = get_video_clock(is) - get_master_clock(is);
1137

    
1138
        /* skip or repeat frame. We take into account the
1139
           delay to compute the threshold. I still don't know
1140
           if it is the best guess */
1141
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1142
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1143
            if (diff <= -sync_threshold)
1144
                delay = 0;
1145
            else if (diff >= sync_threshold)
1146
                delay = 2 * delay;
1147
        }
1148
    }
1149
    is->frame_timer += delay;
1150
#if defined(DEBUG_SYNC)
1151
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1152
            delay, actual_delay, frame_current_pts, -diff);
1153
#endif
1154

    
1155
    return is->frame_timer;
1156
}
1157

    
1158
/* called to display each frame */
1159
static void video_refresh_timer(void *opaque)
1160
{
1161
    VideoState *is = opaque;
1162
    VideoPicture *vp;
1163

    
1164
    SubPicture *sp, *sp2;
1165

    
1166
    if (is->video_st) {
1167
retry:
1168
        if (is->pictq_size == 0) {
1169
            //nothing to do, no picture to display in the que
1170
        } else {
1171
            double time= av_gettime()/1000000.0;
1172
            double next_target;
1173
            /* dequeue the picture */
1174
            vp = &is->pictq[is->pictq_rindex];
1175

    
1176
            if(time < vp->target_clock)
1177
                return;
1178
            /* update current video pts */
1179
            is->video_current_pts = vp->pts;
1180
            is->video_current_pts_drift = is->video_current_pts - time;
1181
            is->video_current_pos = vp->pos;
1182
            if(is->pictq_size > 1){
1183
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1184
                assert(nextvp->target_clock >= vp->target_clock);
1185
                next_target= nextvp->target_clock;
1186
            }else{
1187
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1188
            }
1189
            if(framedrop && time > next_target){
1190
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1191
                if(is->pictq_size > 1 || time > next_target + 0.5){
1192
                    /* update queue size and signal for next picture */
1193
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1194
                        is->pictq_rindex = 0;
1195

    
1196
                    SDL_LockMutex(is->pictq_mutex);
1197
                    is->pictq_size--;
1198
                    SDL_CondSignal(is->pictq_cond);
1199
                    SDL_UnlockMutex(is->pictq_mutex);
1200
                    goto retry;
1201
                }
1202
            }
1203

    
1204
            if(is->subtitle_st) {
1205
                if (is->subtitle_stream_changed) {
1206
                    SDL_LockMutex(is->subpq_mutex);
1207

    
1208
                    while (is->subpq_size) {
1209
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1210

    
1211
                        /* update queue size and signal for next picture */
1212
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1213
                            is->subpq_rindex = 0;
1214

    
1215
                        is->subpq_size--;
1216
                    }
1217
                    is->subtitle_stream_changed = 0;
1218

    
1219
                    SDL_CondSignal(is->subpq_cond);
1220
                    SDL_UnlockMutex(is->subpq_mutex);
1221
                } else {
1222
                    if (is->subpq_size > 0) {
1223
                        sp = &is->subpq[is->subpq_rindex];
1224

    
1225
                        if (is->subpq_size > 1)
1226
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1227
                        else
1228
                            sp2 = NULL;
1229

    
1230
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1231
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1232
                        {
1233
                            free_subpicture(sp);
1234

    
1235
                            /* update queue size and signal for next picture */
1236
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1237
                                is->subpq_rindex = 0;
1238

    
1239
                            SDL_LockMutex(is->subpq_mutex);
1240
                            is->subpq_size--;
1241
                            SDL_CondSignal(is->subpq_cond);
1242
                            SDL_UnlockMutex(is->subpq_mutex);
1243
                        }
1244
                    }
1245
                }
1246
            }
1247

    
1248
            /* display picture */
1249
            video_display(is);
1250

    
1251
            /* update queue size and signal for next picture */
1252
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1253
                is->pictq_rindex = 0;
1254

    
1255
            SDL_LockMutex(is->pictq_mutex);
1256
            is->pictq_size--;
1257
            SDL_CondSignal(is->pictq_cond);
1258
            SDL_UnlockMutex(is->pictq_mutex);
1259
        }
1260
    } else if (is->audio_st) {
1261
        /* draw the next audio frame */
1262

    
1263
        /* if only audio stream, then display the audio bars (better
1264
           than nothing, just to test the implementation */
1265

    
1266
        /* display picture */
1267
        video_display(is);
1268
    }
1269
    if (show_status) {
1270
        static int64_t last_time;
1271
        int64_t cur_time;
1272
        int aqsize, vqsize, sqsize;
1273
        double av_diff;
1274

    
1275
        cur_time = av_gettime();
1276
        if (!last_time || (cur_time - last_time) >= 30000) {
1277
            aqsize = 0;
1278
            vqsize = 0;
1279
            sqsize = 0;
1280
            if (is->audio_st)
1281
                aqsize = is->audioq.size;
1282
            if (is->video_st)
1283
                vqsize = is->videoq.size;
1284
            if (is->subtitle_st)
1285
                sqsize = is->subtitleq.size;
1286
            av_diff = 0;
1287
            if (is->audio_st && is->video_st)
1288
                av_diff = get_audio_clock(is) - get_video_clock(is);
1289
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1290
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1291
            fflush(stdout);
1292
            last_time = cur_time;
1293
        }
1294
    }
1295
}
1296

    
1297
/* allocate a picture (needs to do that in main thread to avoid
1298
   potential locking problems */
1299
static void alloc_picture(void *opaque)
1300
{
1301
    VideoState *is = opaque;
1302
    VideoPicture *vp;
1303

    
1304
    vp = &is->pictq[is->pictq_windex];
1305

    
1306
    if (vp->bmp)
1307
        SDL_FreeYUVOverlay(vp->bmp);
1308

    
1309
#if CONFIG_AVFILTER
1310
    if (vp->picref)
1311
        avfilter_unref_pic(vp->picref);
1312
    vp->picref = NULL;
1313

    
1314
    vp->width   = is->out_video_filter->inputs[0]->w;
1315
    vp->height  = is->out_video_filter->inputs[0]->h;
1316
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1317
#else
1318
    vp->width   = is->video_st->codec->width;
1319
    vp->height  = is->video_st->codec->height;
1320
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1321
#endif
1322

    
1323
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1324
                                   SDL_YV12_OVERLAY,
1325
                                   screen);
1326

    
1327
    SDL_LockMutex(is->pictq_mutex);
1328
    vp->allocated = 1;
1329
    SDL_CondSignal(is->pictq_cond);
1330
    SDL_UnlockMutex(is->pictq_mutex);
1331
}
1332

    
1333
/**
1334
 *
1335
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1336
 */
1337
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1338
{
1339
    VideoPicture *vp;
1340
    int dst_pix_fmt;
1341
#if CONFIG_AVFILTER
1342
    AVPicture pict_src;
1343
#endif
1344
    /* wait until we have space to put a new picture */
1345
    SDL_LockMutex(is->pictq_mutex);
1346

    
1347
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1348
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1349

    
1350
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1351
           !is->videoq.abort_request) {
1352
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1353
    }
1354
    SDL_UnlockMutex(is->pictq_mutex);
1355

    
1356
    if (is->videoq.abort_request)
1357
        return -1;
1358

    
1359
    vp = &is->pictq[is->pictq_windex];
1360

    
1361
    /* alloc or resize hardware picture buffer */
1362
    if (!vp->bmp ||
1363
#if CONFIG_AVFILTER
1364
        vp->width  != is->out_video_filter->inputs[0]->w ||
1365
        vp->height != is->out_video_filter->inputs[0]->h) {
1366
#else
1367
        vp->width != is->video_st->codec->width ||
1368
        vp->height != is->video_st->codec->height) {
1369
#endif
1370
        SDL_Event event;
1371

    
1372
        vp->allocated = 0;
1373

    
1374
        /* the allocation must be done in the main thread to avoid
1375
           locking problems */
1376
        event.type = FF_ALLOC_EVENT;
1377
        event.user.data1 = is;
1378
        SDL_PushEvent(&event);
1379

    
1380
        /* wait until the picture is allocated */
1381
        SDL_LockMutex(is->pictq_mutex);
1382
        while (!vp->allocated && !is->videoq.abort_request) {
1383
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1384
        }
1385
        SDL_UnlockMutex(is->pictq_mutex);
1386

    
1387
        if (is->videoq.abort_request)
1388
            return -1;
1389
    }
1390

    
1391
    /* if the frame is not skipped, then display it */
1392
    if (vp->bmp) {
1393
        AVPicture pict;
1394
#if CONFIG_AVFILTER
1395
        if(vp->picref)
1396
            avfilter_unref_pic(vp->picref);
1397
        vp->picref = src_frame->opaque;
1398
#endif
1399

    
1400
        /* get a pointer on the bitmap */
1401
        SDL_LockYUVOverlay (vp->bmp);
1402

    
1403
        dst_pix_fmt = PIX_FMT_YUV420P;
1404
        memset(&pict,0,sizeof(AVPicture));
1405
        pict.data[0] = vp->bmp->pixels[0];
1406
        pict.data[1] = vp->bmp->pixels[2];
1407
        pict.data[2] = vp->bmp->pixels[1];
1408

    
1409
        pict.linesize[0] = vp->bmp->pitches[0];
1410
        pict.linesize[1] = vp->bmp->pitches[2];
1411
        pict.linesize[2] = vp->bmp->pitches[1];
1412

    
1413
#if CONFIG_AVFILTER
1414
        pict_src.data[0] = src_frame->data[0];
1415
        pict_src.data[1] = src_frame->data[1];
1416
        pict_src.data[2] = src_frame->data[2];
1417

    
1418
        pict_src.linesize[0] = src_frame->linesize[0];
1419
        pict_src.linesize[1] = src_frame->linesize[1];
1420
        pict_src.linesize[2] = src_frame->linesize[2];
1421

    
1422
        //FIXME use direct rendering
1423
        av_picture_copy(&pict, &pict_src,
1424
                        vp->pix_fmt, vp->width, vp->height);
1425
#else
1426
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1427
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1428
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1429
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1430
        if (is->img_convert_ctx == NULL) {
1431
            fprintf(stderr, "Cannot initialize the conversion context\n");
1432
            exit(1);
1433
        }
1434
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1435
                  0, vp->height, pict.data, pict.linesize);
1436
#endif
1437
        /* update the bitmap content */
1438
        SDL_UnlockYUVOverlay(vp->bmp);
1439

    
1440
        vp->pts = pts;
1441
        vp->pos = pos;
1442

    
1443
        /* now we can update the picture count */
1444
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1445
            is->pictq_windex = 0;
1446
        SDL_LockMutex(is->pictq_mutex);
1447
        vp->target_clock= compute_target_time(vp->pts, is);
1448

    
1449
        is->pictq_size++;
1450
        SDL_UnlockMutex(is->pictq_mutex);
1451
    }
1452
    return 0;
1453
}
1454

    
1455
/**
1456
 * compute the exact PTS for the picture if it is omitted in the stream
1457
 * @param pts1 the dts of the pkt / pts of the frame
1458
 */
1459
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1460
{
1461
    double frame_delay, pts;
1462

    
1463
    pts = pts1;
1464

    
1465
    if (pts != 0) {
1466
        /* update video clock with pts, if present */
1467
        is->video_clock = pts;
1468
    } else {
1469
        pts = is->video_clock;
1470
    }
1471
    /* update video clock for next frame */
1472
    frame_delay = av_q2d(is->video_st->codec->time_base);
1473
    /* for MPEG2, the frame can be repeated, so we update the
1474
       clock accordingly */
1475
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1476
    is->video_clock += frame_delay;
1477

    
1478
#if defined(DEBUG_SYNC) && 0
1479
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1480
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1481
#endif
1482
    return queue_picture(is, src_frame, pts, pos);
1483
}
1484

    
1485
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1486
{
1487
    int len1, got_picture, i;
1488

    
1489
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1490
            return -1;
1491

    
1492
        if(pkt->data == flush_pkt.data){
1493
            avcodec_flush_buffers(is->video_st->codec);
1494

    
1495
            SDL_LockMutex(is->pictq_mutex);
1496
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1497
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1498
                is->pictq[i].target_clock= 0;
1499
            }
1500
            while (is->pictq_size && !is->videoq.abort_request) {
1501
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1502
            }
1503
            is->video_current_pos= -1;
1504
            SDL_UnlockMutex(is->pictq_mutex);
1505

    
1506
            is->last_dts_for_fault_detection=
1507
            is->last_pts_for_fault_detection= INT64_MIN;
1508
            is->frame_last_pts= AV_NOPTS_VALUE;
1509
            is->frame_last_delay = 0;
1510
            is->frame_timer = (double)av_gettime() / 1000000.0;
1511
            is->skip_frames= 1;
1512
            is->skip_frames_index= 0;
1513
            return 0;
1514
        }
1515

    
1516
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1517
           this packet, if any */
1518
        is->video_st->codec->reordered_opaque= pkt->pts;
1519
        len1 = avcodec_decode_video2(is->video_st->codec,
1520
                                    frame, &got_picture,
1521
                                    pkt);
1522

    
1523
        if (got_picture) {
1524
            if(pkt->dts != AV_NOPTS_VALUE){
1525
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1526
                is->last_dts_for_fault_detection= pkt->dts;
1527
            }
1528
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1529
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1530
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1531
            }
1532
        }
1533

    
1534
        if(   (   decoder_reorder_pts==1
1535
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1536
               || pkt->dts == AV_NOPTS_VALUE)
1537
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1538
            *pts= frame->reordered_opaque;
1539
        else if(pkt->dts != AV_NOPTS_VALUE)
1540
            *pts= pkt->dts;
1541
        else
1542
            *pts= 0;
1543

    
1544
//            if (len1 < 0)
1545
//                break;
1546
    if (got_picture){
1547
        is->skip_frames_index += 1;
1548
        if(is->skip_frames_index >= is->skip_frames){
1549
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1550
            return 1;
1551
        }
1552

    
1553
    }
1554
    return 0;
1555
}
1556

    
1557
#if CONFIG_AVFILTER
1558
typedef struct {
1559
    VideoState *is;
1560
    AVFrame *frame;
1561
} FilterPriv;
1562

    
1563
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1564
{
1565
    FilterPriv *priv = ctx->priv;
1566
    if(!opaque) return -1;
1567

    
1568
    priv->is = opaque;
1569
    priv->frame = avcodec_alloc_frame();
1570

    
1571
    return 0;
1572
}
1573

    
1574
static void input_uninit(AVFilterContext *ctx)
1575
{
1576
    FilterPriv *priv = ctx->priv;
1577
    av_free(priv->frame);
1578
}
1579

    
1580
static int input_request_frame(AVFilterLink *link)
1581
{
1582
    FilterPriv *priv = link->src->priv;
1583
    AVFilterPicRef *picref;
1584
    int64_t pts = 0;
1585
    AVPacket pkt;
1586
    int ret;
1587

    
1588
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1589
        av_free_packet(&pkt);
1590
    if (ret < 0)
1591
        return -1;
1592

    
1593
    /* FIXME: until I figure out how to hook everything up to the codec
1594
     * right, we're just copying the entire frame. */
1595
    picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1596
    av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1597
                    picref->pic->format, link->w, link->h);
1598
    av_free_packet(&pkt);
1599

    
1600
    picref->pts = pts;
1601
    picref->pos = pkt.pos;
1602
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1603
    avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1604
    avfilter_draw_slice(link, 0, link->h, 1);
1605
    avfilter_end_frame(link);
1606
    avfilter_unref_pic(picref);
1607

    
1608
    return 0;
1609
}
1610

    
1611
static int input_query_formats(AVFilterContext *ctx)
1612
{
1613
    FilterPriv *priv = ctx->priv;
1614
    enum PixelFormat pix_fmts[] = {
1615
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1616
    };
1617

    
1618
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1619
    return 0;
1620
}
1621

    
1622
static int input_config_props(AVFilterLink *link)
1623
{
1624
    FilterPriv *priv  = link->src->priv;
1625
    AVCodecContext *c = priv->is->video_st->codec;
1626

    
1627
    link->w = c->width;
1628
    link->h = c->height;
1629

    
1630
    return 0;
1631
}
1632

    
1633
static AVFilter input_filter =
1634
{
1635
    .name      = "ffplay_input",
1636

    
1637
    .priv_size = sizeof(FilterPriv),
1638

    
1639
    .init      = input_init,
1640
    .uninit    = input_uninit,
1641

    
1642
    .query_formats = input_query_formats,
1643

    
1644
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1645
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1646
                                    .type = CODEC_TYPE_VIDEO,
1647
                                    .request_frame = input_request_frame,
1648
                                    .config_props  = input_config_props, },
1649
                                  { .name = NULL }},
1650
};
1651

    
1652
static void output_end_frame(AVFilterLink *link)
1653
{
1654
}
1655

    
1656
static int output_query_formats(AVFilterContext *ctx)
1657
{
1658
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1659

    
1660
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1661
    return 0;
1662
}
1663

    
1664
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1665
                                    int64_t *pts, int64_t *pos)
1666
{
1667
    AVFilterPicRef *pic;
1668

    
1669
    if(avfilter_request_frame(ctx->inputs[0]))
1670
        return -1;
1671
    if(!(pic = ctx->inputs[0]->cur_pic))
1672
        return -1;
1673
    ctx->inputs[0]->cur_pic = NULL;
1674

    
1675
    frame->opaque = pic;
1676
    *pts          = pic->pts;
1677
    *pos          = pic->pos;
1678

    
1679
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1680
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1681

    
1682
    return 1;
1683
}
1684

    
1685
static AVFilter output_filter =
1686
{
1687
    .name      = "ffplay_output",
1688

    
1689
    .query_formats = output_query_formats,
1690

    
1691
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1692
                                    .type          = CODEC_TYPE_VIDEO,
1693
                                    .end_frame     = output_end_frame,
1694
                                    .min_perms     = AV_PERM_READ, },
1695
                                  { .name = NULL }},
1696
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1697
};
1698
#endif  /* CONFIG_AVFILTER */
1699

    
1700
static int video_thread(void *arg)
1701
{
1702
    VideoState *is = arg;
1703
    AVFrame *frame= avcodec_alloc_frame();
1704
    int64_t pts_int;
1705
    double pts;
1706
    int ret;
1707

    
1708
#if CONFIG_AVFILTER
1709
    int64_t pos;
1710
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1711
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1712
    graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1713

    
1714
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1715
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1716

    
1717
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1718
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1719

    
1720

    
1721
    if(vfilters) {
1722
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1723
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1724

    
1725
        outputs->name    = av_strdup("in");
1726
        outputs->filter  = filt_src;
1727
        outputs->pad_idx = 0;
1728
        outputs->next    = NULL;
1729

    
1730
        inputs->name    = av_strdup("out");
1731
        inputs->filter  = filt_out;
1732
        inputs->pad_idx = 0;
1733
        inputs->next    = NULL;
1734

    
1735
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1736
            goto the_end;
1737
        av_freep(&vfilters);
1738
    } else {
1739
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1740
    }
1741
    avfilter_graph_add_filter(graph, filt_src);
1742
    avfilter_graph_add_filter(graph, filt_out);
1743

    
1744
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1745
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1746
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1747

    
1748
    is->out_video_filter = filt_out;
1749
#endif
1750

    
1751
    for(;;) {
1752
#if !CONFIG_AVFILTER
1753
        AVPacket pkt;
1754
#endif
1755
        while (is->paused && !is->videoq.abort_request)
1756
            SDL_Delay(10);
1757
#if CONFIG_AVFILTER
1758
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1759
#else
1760
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1761
#endif
1762

    
1763
        if (ret < 0) goto the_end;
1764

    
1765
        if (!ret)
1766
            continue;
1767

    
1768
        pts = pts_int*av_q2d(is->video_st->time_base);
1769

    
1770
#if CONFIG_AVFILTER
1771
        ret = output_picture2(is, frame, pts, pos);
1772
#else
1773
        ret = output_picture2(is, frame, pts,  pkt.pos);
1774
        av_free_packet(&pkt);
1775
#endif
1776
        if (ret < 0)
1777
            goto the_end;
1778

    
1779
        if (step)
1780
            if (cur_stream)
1781
                stream_pause(cur_stream);
1782
    }
1783
 the_end:
1784
#if CONFIG_AVFILTER
1785
    avfilter_graph_destroy(graph);
1786
    av_freep(&graph);
1787
#endif
1788
    av_free(frame);
1789
    return 0;
1790
}
1791

    
1792
static int subtitle_thread(void *arg)
1793
{
1794
    VideoState *is = arg;
1795
    SubPicture *sp;
1796
    AVPacket pkt1, *pkt = &pkt1;
1797
    int len1, got_subtitle;
1798
    double pts;
1799
    int i, j;
1800
    int r, g, b, y, u, v, a;
1801

    
1802
    for(;;) {
1803
        while (is->paused && !is->subtitleq.abort_request) {
1804
            SDL_Delay(10);
1805
        }
1806
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1807
            break;
1808

    
1809
        if(pkt->data == flush_pkt.data){
1810
            avcodec_flush_buffers(is->subtitle_st->codec);
1811
            continue;
1812
        }
1813
        SDL_LockMutex(is->subpq_mutex);
1814
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1815
               !is->subtitleq.abort_request) {
1816
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1817
        }
1818
        SDL_UnlockMutex(is->subpq_mutex);
1819

    
1820
        if (is->subtitleq.abort_request)
1821
            goto the_end;
1822

    
1823
        sp = &is->subpq[is->subpq_windex];
1824

    
1825
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1826
           this packet, if any */
1827
        pts = 0;
1828
        if (pkt->pts != AV_NOPTS_VALUE)
1829
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1830

    
1831
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1832
                                    &sp->sub, &got_subtitle,
1833
                                    pkt);
1834
//            if (len1 < 0)
1835
//                break;
1836
        if (got_subtitle && sp->sub.format == 0) {
1837
            sp->pts = pts;
1838

    
1839
            for (i = 0; i < sp->sub.num_rects; i++)
1840
            {
1841
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1842
                {
1843
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1844
                    y = RGB_TO_Y_CCIR(r, g, b);
1845
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1846
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1847
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1848
                }
1849
            }
1850

    
1851
            /* now we can update the picture count */
1852
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1853
                is->subpq_windex = 0;
1854
            SDL_LockMutex(is->subpq_mutex);
1855
            is->subpq_size++;
1856
            SDL_UnlockMutex(is->subpq_mutex);
1857
        }
1858
        av_free_packet(pkt);
1859
//        if (step)
1860
//            if (cur_stream)
1861
//                stream_pause(cur_stream);
1862
    }
1863
 the_end:
1864
    return 0;
1865
}
1866

    
1867
/* copy samples for viewing in editor window */
1868
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1869
{
1870
    int size, len, channels;
1871

    
1872
    channels = is->audio_st->codec->channels;
1873

    
1874
    size = samples_size / sizeof(short);
1875
    while (size > 0) {
1876
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1877
        if (len > size)
1878
            len = size;
1879
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1880
        samples += len;
1881
        is->sample_array_index += len;
1882
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1883
            is->sample_array_index = 0;
1884
        size -= len;
1885
    }
1886
}
1887

    
1888
/* return the new audio buffer size (samples can be added or deleted
1889
   to get better sync if video or external master clock) */
1890
static int synchronize_audio(VideoState *is, short *samples,
1891
                             int samples_size1, double pts)
1892
{
1893
    int n, samples_size;
1894
    double ref_clock;
1895

    
1896
    n = 2 * is->audio_st->codec->channels;
1897
    samples_size = samples_size1;
1898

    
1899
    /* if not master, then we try to remove or add samples to correct the clock */
1900
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1901
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1902
        double diff, avg_diff;
1903
        int wanted_size, min_size, max_size, nb_samples;
1904

    
1905
        ref_clock = get_master_clock(is);
1906
        diff = get_audio_clock(is) - ref_clock;
1907

    
1908
        if (diff < AV_NOSYNC_THRESHOLD) {
1909
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1910
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1911
                /* not enough measures to have a correct estimate */
1912
                is->audio_diff_avg_count++;
1913
            } else {
1914
                /* estimate the A-V difference */
1915
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1916

    
1917
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1918
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1919
                    nb_samples = samples_size / n;
1920

    
1921
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1922
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1923
                    if (wanted_size < min_size)
1924
                        wanted_size = min_size;
1925
                    else if (wanted_size > max_size)
1926
                        wanted_size = max_size;
1927

    
1928
                    /* add or remove samples to correction the synchro */
1929
                    if (wanted_size < samples_size) {
1930
                        /* remove samples */
1931
                        samples_size = wanted_size;
1932
                    } else if (wanted_size > samples_size) {
1933
                        uint8_t *samples_end, *q;
1934
                        int nb;
1935

    
1936
                        /* add samples */
1937
                        nb = (samples_size - wanted_size);
1938
                        samples_end = (uint8_t *)samples + samples_size - n;
1939
                        q = samples_end + n;
1940
                        while (nb > 0) {
1941
                            memcpy(q, samples_end, n);
1942
                            q += n;
1943
                            nb -= n;
1944
                        }
1945
                        samples_size = wanted_size;
1946
                    }
1947
                }
1948
#if 0
1949
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1950
                       diff, avg_diff, samples_size - samples_size1,
1951
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1952
#endif
1953
            }
1954
        } else {
1955
            /* too big difference : may be initial PTS errors, so
1956
               reset A-V filter */
1957
            is->audio_diff_avg_count = 0;
1958
            is->audio_diff_cum = 0;
1959
        }
1960
    }
1961

    
1962
    return samples_size;
1963
}
1964

    
1965
/* decode one audio frame and returns its uncompressed size */
1966
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1967
{
1968
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1969
    AVPacket *pkt = &is->audio_pkt;
1970
    AVCodecContext *dec= is->audio_st->codec;
1971
    int n, len1, data_size;
1972
    double pts;
1973

    
1974
    for(;;) {
1975
        /* NOTE: the audio packet can contain several frames */
1976
        while (pkt_temp->size > 0) {
1977
            data_size = sizeof(is->audio_buf1);
1978
            len1 = avcodec_decode_audio3(dec,
1979
                                        (int16_t *)is->audio_buf1, &data_size,
1980
                                        pkt_temp);
1981
            if (len1 < 0) {
1982
                /* if error, we skip the frame */
1983
                pkt_temp->size = 0;
1984
                break;
1985
            }
1986

    
1987
            pkt_temp->data += len1;
1988
            pkt_temp->size -= len1;
1989
            if (data_size <= 0)
1990
                continue;
1991

    
1992
            if (dec->sample_fmt != is->audio_src_fmt) {
1993
                if (is->reformat_ctx)
1994
                    av_audio_convert_free(is->reformat_ctx);
1995
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1996
                                                         dec->sample_fmt, 1, NULL, 0);
1997
                if (!is->reformat_ctx) {
1998
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1999
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2000
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2001
                        break;
2002
                }
2003
                is->audio_src_fmt= dec->sample_fmt;
2004
            }
2005

    
2006
            if (is->reformat_ctx) {
2007
                const void *ibuf[6]= {is->audio_buf1};
2008
                void *obuf[6]= {is->audio_buf2};
2009
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2010
                int ostride[6]= {2};
2011
                int len= data_size/istride[0];
2012
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2013
                    printf("av_audio_convert() failed\n");
2014
                    break;
2015
                }
2016
                is->audio_buf= is->audio_buf2;
2017
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2018
                          remove this legacy cruft */
2019
                data_size= len*2;
2020
            }else{
2021
                is->audio_buf= is->audio_buf1;
2022
            }
2023

    
2024
            /* if no pts, then compute it */
2025
            pts = is->audio_clock;
2026
            *pts_ptr = pts;
2027
            n = 2 * dec->channels;
2028
            is->audio_clock += (double)data_size /
2029
                (double)(n * dec->sample_rate);
2030
#if defined(DEBUG_SYNC)
2031
            {
2032
                static double last_clock;
2033
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2034
                       is->audio_clock - last_clock,
2035
                       is->audio_clock, pts);
2036
                last_clock = is->audio_clock;
2037
            }
2038
#endif
2039
            return data_size;
2040
        }
2041

    
2042
        /* free the current packet */
2043
        if (pkt->data)
2044
            av_free_packet(pkt);
2045

    
2046
        if (is->paused || is->audioq.abort_request) {
2047
            return -1;
2048
        }
2049

    
2050
        /* read next packet */
2051
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2052
            return -1;
2053
        if(pkt->data == flush_pkt.data){
2054
            avcodec_flush_buffers(dec);
2055
            continue;
2056
        }
2057

    
2058
        pkt_temp->data = pkt->data;
2059
        pkt_temp->size = pkt->size;
2060

    
2061
        /* if update the audio clock with the pts */
2062
        if (pkt->pts != AV_NOPTS_VALUE) {
2063
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2064
        }
2065
    }
2066
}
2067

    
2068
/* get the current audio output buffer size, in samples. With SDL, we
2069
   cannot have a precise information */
2070
static int audio_write_get_buf_size(VideoState *is)
2071
{
2072
    return is->audio_buf_size - is->audio_buf_index;
2073
}
2074

    
2075

    
2076
/* prepare a new audio buffer */
2077
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2078
{
2079
    VideoState *is = opaque;
2080
    int audio_size, len1;
2081
    double pts;
2082

    
2083
    audio_callback_time = av_gettime();
2084

    
2085
    while (len > 0) {
2086
        if (is->audio_buf_index >= is->audio_buf_size) {
2087
           audio_size = audio_decode_frame(is, &pts);
2088
           if (audio_size < 0) {
2089
                /* if error, just output silence */
2090
               is->audio_buf = is->audio_buf1;
2091
               is->audio_buf_size = 1024;
2092
               memset(is->audio_buf, 0, is->audio_buf_size);
2093
           } else {
2094
               if (is->show_audio)
2095
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2096
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2097
                                              pts);
2098
               is->audio_buf_size = audio_size;
2099
           }
2100
           is->audio_buf_index = 0;
2101
        }
2102
        len1 = is->audio_buf_size - is->audio_buf_index;
2103
        if (len1 > len)
2104
            len1 = len;
2105
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2106
        len -= len1;
2107
        stream += len1;
2108
        is->audio_buf_index += len1;
2109
    }
2110
}
2111

    
2112
/* open a given stream. Return 0 if OK */
2113
static int stream_component_open(VideoState *is, int stream_index)
2114
{
2115
    AVFormatContext *ic = is->ic;
2116
    AVCodecContext *avctx;
2117
    AVCodec *codec;
2118
    SDL_AudioSpec wanted_spec, spec;
2119

    
2120
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2121
        return -1;
2122
    avctx = ic->streams[stream_index]->codec;
2123

    
2124
    /* prepare audio output */
2125
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2126
        if (avctx->channels > 0) {
2127
            avctx->request_channels = FFMIN(2, avctx->channels);
2128
        } else {
2129
            avctx->request_channels = 2;
2130
        }
2131
    }
2132

    
2133
    codec = avcodec_find_decoder(avctx->codec_id);
2134
    avctx->debug_mv = debug_mv;
2135
    avctx->debug = debug;
2136
    avctx->workaround_bugs = workaround_bugs;
2137
    avctx->lowres = lowres;
2138
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2139
    avctx->idct_algo= idct;
2140
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2141
    avctx->skip_frame= skip_frame;
2142
    avctx->skip_idct= skip_idct;
2143
    avctx->skip_loop_filter= skip_loop_filter;
2144
    avctx->error_recognition= error_recognition;
2145
    avctx->error_concealment= error_concealment;
2146
    avcodec_thread_init(avctx, thread_count);
2147

    
2148
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2149

    
2150
    if (!codec ||
2151
        avcodec_open(avctx, codec) < 0)
2152
        return -1;
2153

    
2154
    /* prepare audio output */
2155
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2156
        wanted_spec.freq = avctx->sample_rate;
2157
        wanted_spec.format = AUDIO_S16SYS;
2158
        wanted_spec.channels = avctx->channels;
2159
        wanted_spec.silence = 0;
2160
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2161
        wanted_spec.callback = sdl_audio_callback;
2162
        wanted_spec.userdata = is;
2163
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2164
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2165
            return -1;
2166
        }
2167
        is->audio_hw_buf_size = spec.size;
2168
        is->audio_src_fmt= SAMPLE_FMT_S16;
2169
    }
2170

    
2171
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2172
    switch(avctx->codec_type) {
2173
    case CODEC_TYPE_AUDIO:
2174
        is->audio_stream = stream_index;
2175
        is->audio_st = ic->streams[stream_index];
2176
        is->audio_buf_size = 0;
2177
        is->audio_buf_index = 0;
2178

    
2179
        /* init averaging filter */
2180
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2181
        is->audio_diff_avg_count = 0;
2182
        /* since we do not have a precise anough audio fifo fullness,
2183
           we correct audio sync only if larger than this threshold */
2184
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2185

    
2186
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2187
        packet_queue_init(&is->audioq);
2188
        SDL_PauseAudio(0);
2189
        break;
2190
    case CODEC_TYPE_VIDEO:
2191
        is->video_stream = stream_index;
2192
        is->video_st = ic->streams[stream_index];
2193

    
2194
//        is->video_current_pts_time = av_gettime();
2195

    
2196
        packet_queue_init(&is->videoq);
2197
        is->video_tid = SDL_CreateThread(video_thread, is);
2198
        break;
2199
    case CODEC_TYPE_SUBTITLE:
2200
        is->subtitle_stream = stream_index;
2201
        is->subtitle_st = ic->streams[stream_index];
2202
        packet_queue_init(&is->subtitleq);
2203

    
2204
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2205
        break;
2206
    default:
2207
        break;
2208
    }
2209
    return 0;
2210
}
2211

    
2212
static void stream_component_close(VideoState *is, int stream_index)
2213
{
2214
    AVFormatContext *ic = is->ic;
2215
    AVCodecContext *avctx;
2216

    
2217
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2218
        return;
2219
    avctx = ic->streams[stream_index]->codec;
2220

    
2221
    switch(avctx->codec_type) {
2222
    case CODEC_TYPE_AUDIO:
2223
        packet_queue_abort(&is->audioq);
2224

    
2225
        SDL_CloseAudio();
2226

    
2227
        packet_queue_end(&is->audioq);
2228
        if (is->reformat_ctx)
2229
            av_audio_convert_free(is->reformat_ctx);
2230
        is->reformat_ctx = NULL;
2231
        break;
2232
    case CODEC_TYPE_VIDEO:
2233
        packet_queue_abort(&is->videoq);
2234

    
2235
        /* note: we also signal this mutex to make sure we deblock the
2236
           video thread in all cases */
2237
        SDL_LockMutex(is->pictq_mutex);
2238
        SDL_CondSignal(is->pictq_cond);
2239
        SDL_UnlockMutex(is->pictq_mutex);
2240

    
2241
        SDL_WaitThread(is->video_tid, NULL);
2242

    
2243
        packet_queue_end(&is->videoq);
2244
        break;
2245
    case CODEC_TYPE_SUBTITLE:
2246
        packet_queue_abort(&is->subtitleq);
2247

    
2248
        /* note: we also signal this mutex to make sure we deblock the
2249
           video thread in all cases */
2250
        SDL_LockMutex(is->subpq_mutex);
2251
        is->subtitle_stream_changed = 1;
2252

    
2253
        SDL_CondSignal(is->subpq_cond);
2254
        SDL_UnlockMutex(is->subpq_mutex);
2255

    
2256
        SDL_WaitThread(is->subtitle_tid, NULL);
2257

    
2258
        packet_queue_end(&is->subtitleq);
2259
        break;
2260
    default:
2261
        break;
2262
    }
2263

    
2264
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2265
    avcodec_close(avctx);
2266
    switch(avctx->codec_type) {
2267
    case CODEC_TYPE_AUDIO:
2268
        is->audio_st = NULL;
2269
        is->audio_stream = -1;
2270
        break;
2271
    case CODEC_TYPE_VIDEO:
2272
        is->video_st = NULL;
2273
        is->video_stream = -1;
2274
        break;
2275
    case CODEC_TYPE_SUBTITLE:
2276
        is->subtitle_st = NULL;
2277
        is->subtitle_stream = -1;
2278
        break;
2279
    default:
2280
        break;
2281
    }
2282
}
2283

    
2284
/* since we have only one decoding thread, we can use a global
2285
   variable instead of a thread local variable */
2286
static VideoState *global_video_state;
2287

    
2288
static int decode_interrupt_cb(void)
2289
{
2290
    return (global_video_state && global_video_state->abort_request);
2291
}
2292

    
2293
/* this thread gets the stream from the disk or the network */
2294
static int decode_thread(void *arg)
2295
{
2296
    VideoState *is = arg;
2297
    AVFormatContext *ic;
2298
    int err, i, ret;
2299
    int st_index[CODEC_TYPE_NB];
2300
    int st_count[CODEC_TYPE_NB]={0};
2301
    int st_best_packet_count[CODEC_TYPE_NB];
2302
    AVPacket pkt1, *pkt = &pkt1;
2303
    AVFormatParameters params, *ap = &params;
2304
    int eof=0;
2305

    
2306
    ic = avformat_alloc_context();
2307

    
2308
    memset(st_index, -1, sizeof(st_index));
2309
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2310
    is->video_stream = -1;
2311
    is->audio_stream = -1;
2312
    is->subtitle_stream = -1;
2313

    
2314
    global_video_state = is;
2315
    url_set_interrupt_cb(decode_interrupt_cb);
2316

    
2317
    memset(ap, 0, sizeof(*ap));
2318

    
2319
    ap->prealloced_context = 1;
2320
    ap->width = frame_width;
2321
    ap->height= frame_height;
2322
    ap->time_base= (AVRational){1, 25};
2323
    ap->pix_fmt = frame_pix_fmt;
2324

    
2325
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2326

    
2327
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2328
    if (err < 0) {
2329
        print_error(is->filename, err);
2330
        ret = -1;
2331
        goto fail;
2332
    }
2333
    is->ic = ic;
2334

    
2335
    if(genpts)
2336
        ic->flags |= AVFMT_FLAG_GENPTS;
2337

    
2338
    err = av_find_stream_info(ic);
2339
    if (err < 0) {
2340
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2341
        ret = -1;
2342
        goto fail;
2343
    }
2344
    if(ic->pb)
2345
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2346

    
2347
    if(seek_by_bytes<0)
2348
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2349

    
2350
    /* if seeking requested, we execute it */
2351
    if (start_time != AV_NOPTS_VALUE) {
2352
        int64_t timestamp;
2353

    
2354
        timestamp = start_time;
2355
        /* add the stream start time */
2356
        if (ic->start_time != AV_NOPTS_VALUE)
2357
            timestamp += ic->start_time;
2358
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2359
        if (ret < 0) {
2360
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2361
                    is->filename, (double)timestamp / AV_TIME_BASE);
2362
        }
2363
    }
2364

    
2365
    for(i = 0; i < ic->nb_streams; i++) {
2366
        AVStream *st= ic->streams[i];
2367
        AVCodecContext *avctx = st->codec;
2368
        ic->streams[i]->discard = AVDISCARD_ALL;
2369
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2370
            continue;
2371
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2372
            continue;
2373

    
2374
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2375
            continue;
2376
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2377

    
2378
        switch(avctx->codec_type) {
2379
        case CODEC_TYPE_AUDIO:
2380
            if (!audio_disable)
2381
                st_index[CODEC_TYPE_AUDIO] = i;
2382
            break;
2383
        case CODEC_TYPE_VIDEO:
2384
        case CODEC_TYPE_SUBTITLE:
2385
            if (!video_disable)
2386
                st_index[avctx->codec_type] = i;
2387
            break;
2388
        default:
2389
            break;
2390
        }
2391
    }
2392
    if (show_status) {
2393
        dump_format(ic, 0, is->filename, 0);
2394
    }
2395

    
2396
    /* open the streams */
2397
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2398
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2399
    }
2400

    
2401
    ret=-1;
2402
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2403
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2404
    }
2405
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2406
    if(ret<0) {
2407
        if (!display_disable)
2408
            is->show_audio = 2;
2409
    }
2410

    
2411
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2412
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2413
    }
2414

    
2415
    if (is->video_stream < 0 && is->audio_stream < 0) {
2416
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2417
        ret = -1;
2418
        goto fail;
2419
    }
2420

    
2421
    for(;;) {
2422
        if (is->abort_request)
2423
            break;
2424
        if (is->paused != is->last_paused) {
2425
            is->last_paused = is->paused;
2426
            if (is->paused)
2427
                is->read_pause_return= av_read_pause(ic);
2428
            else
2429
                av_read_play(ic);
2430
        }
2431
#if CONFIG_RTSP_DEMUXER
2432
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2433
            /* wait 10 ms to avoid trying to get another packet */
2434
            /* XXX: horrible */
2435
            SDL_Delay(10);
2436
            continue;
2437
        }
2438
#endif
2439
        if (is->seek_req) {
2440
            int64_t seek_target= is->seek_pos;
2441
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2442
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2443
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2444
//      of the seek_pos/seek_rel variables
2445

    
2446
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2447
            if (ret < 0) {
2448
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2449
            }else{
2450
                if (is->audio_stream >= 0) {
2451
                    packet_queue_flush(&is->audioq);
2452
                    packet_queue_put(&is->audioq, &flush_pkt);
2453
                }
2454
                if (is->subtitle_stream >= 0) {
2455
                    packet_queue_flush(&is->subtitleq);
2456
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2457
                }
2458
                if (is->video_stream >= 0) {
2459
                    packet_queue_flush(&is->videoq);
2460
                    packet_queue_put(&is->videoq, &flush_pkt);
2461
                }
2462
            }
2463
            is->seek_req = 0;
2464
            eof= 0;
2465
        }
2466

    
2467
        /* if the queue are full, no need to read more */
2468
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2469
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2470
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2471
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2472
            /* wait 10 ms */
2473
            SDL_Delay(10);
2474
            continue;
2475
        }
2476
        if(url_feof(ic->pb) || eof) {
2477
            if(is->video_stream >= 0){
2478
                av_init_packet(pkt);
2479
                pkt->data=NULL;
2480
                pkt->size=0;
2481
                pkt->stream_index= is->video_stream;
2482
                packet_queue_put(&is->videoq, pkt);
2483
            }
2484
            SDL_Delay(10);
2485
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2486
                if(loop!=1 && (!loop || --loop)){
2487
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2488
                }else if(autoexit){
2489
                    ret=AVERROR_EOF;
2490
                    goto fail;
2491
                }
2492
            }
2493
            continue;
2494
        }
2495
        ret = av_read_frame(ic, pkt);
2496
        if (ret < 0) {
2497
            if (ret == AVERROR_EOF)
2498
                eof=1;
2499
            if (url_ferror(ic->pb))
2500
                break;
2501
            SDL_Delay(100); /* wait for user event */
2502
            continue;
2503
        }
2504
        if (pkt->stream_index == is->audio_stream) {
2505
            packet_queue_put(&is->audioq, pkt);
2506
        } else if (pkt->stream_index == is->video_stream) {
2507
            packet_queue_put(&is->videoq, pkt);
2508
        } else if (pkt->stream_index == is->subtitle_stream) {
2509
            packet_queue_put(&is->subtitleq, pkt);
2510
        } else {
2511
            av_free_packet(pkt);
2512
        }
2513
    }
2514
    /* wait until the end */
2515
    while (!is->abort_request) {
2516
        SDL_Delay(100);
2517
    }
2518

    
2519
    ret = 0;
2520
 fail:
2521
    /* disable interrupting */
2522
    global_video_state = NULL;
2523

    
2524
    /* close each stream */
2525
    if (is->audio_stream >= 0)
2526
        stream_component_close(is, is->audio_stream);
2527
    if (is->video_stream >= 0)
2528
        stream_component_close(is, is->video_stream);
2529
    if (is->subtitle_stream >= 0)
2530
        stream_component_close(is, is->subtitle_stream);
2531
    if (is->ic) {
2532
        av_close_input_file(is->ic);
2533
        is->ic = NULL; /* safety */
2534
    }
2535
    url_set_interrupt_cb(NULL);
2536

    
2537
    if (ret != 0) {
2538
        SDL_Event event;
2539

    
2540
        event.type = FF_QUIT_EVENT;
2541
        event.user.data1 = is;
2542
        SDL_PushEvent(&event);
2543
    }
2544
    return 0;
2545
}
2546

    
2547
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2548
{
2549
    VideoState *is;
2550

    
2551
    is = av_mallocz(sizeof(VideoState));
2552
    if (!is)
2553
        return NULL;
2554
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2555
    is->iformat = iformat;
2556
    is->ytop = 0;
2557
    is->xleft = 0;
2558

    
2559
    /* start video display */
2560
    is->pictq_mutex = SDL_CreateMutex();
2561
    is->pictq_cond = SDL_CreateCond();
2562

    
2563
    is->subpq_mutex = SDL_CreateMutex();
2564
    is->subpq_cond = SDL_CreateCond();
2565

    
2566
    is->av_sync_type = av_sync_type;
2567
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2568
    if (!is->parse_tid) {
2569
        av_free(is);
2570
        return NULL;
2571
    }
2572
    return is;
2573
}
2574

    
2575
static void stream_close(VideoState *is)
2576
{
2577
    VideoPicture *vp;
2578
    int i;
2579
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2580
    is->abort_request = 1;
2581
    SDL_WaitThread(is->parse_tid, NULL);
2582
    SDL_WaitThread(is->refresh_tid, NULL);
2583

    
2584
    /* free all pictures */
2585
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2586
        vp = &is->pictq[i];
2587
#if CONFIG_AVFILTER
2588
        if (vp->picref) {
2589
            avfilter_unref_pic(vp->picref);
2590
            vp->picref = NULL;
2591
        }
2592
#endif
2593
        if (vp->bmp) {
2594
            SDL_FreeYUVOverlay(vp->bmp);
2595
            vp->bmp = NULL;
2596
        }
2597
    }
2598
    SDL_DestroyMutex(is->pictq_mutex);
2599
    SDL_DestroyCond(is->pictq_cond);
2600
    SDL_DestroyMutex(is->subpq_mutex);
2601
    SDL_DestroyCond(is->subpq_cond);
2602
#if !CONFIG_AVFILTER
2603
    if (is->img_convert_ctx)
2604
        sws_freeContext(is->img_convert_ctx);
2605
#endif
2606
    av_free(is);
2607
}
2608

    
2609
static void stream_cycle_channel(VideoState *is, int codec_type)
2610
{
2611
    AVFormatContext *ic = is->ic;
2612
    int start_index, stream_index;
2613
    AVStream *st;
2614

    
2615
    if (codec_type == CODEC_TYPE_VIDEO)
2616
        start_index = is->video_stream;
2617
    else if (codec_type == CODEC_TYPE_AUDIO)
2618
        start_index = is->audio_stream;
2619
    else
2620
        start_index = is->subtitle_stream;
2621
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2622
        return;
2623
    stream_index = start_index;
2624
    for(;;) {
2625
        if (++stream_index >= is->ic->nb_streams)
2626
        {
2627
            if (codec_type == CODEC_TYPE_SUBTITLE)
2628
            {
2629
                stream_index = -1;
2630
                goto the_end;
2631
            } else
2632
                stream_index = 0;
2633
        }
2634
        if (stream_index == start_index)
2635
            return;
2636
        st = ic->streams[stream_index];
2637
        if (st->codec->codec_type == codec_type) {
2638
            /* check that parameters are OK */
2639
            switch(codec_type) {
2640
            case CODEC_TYPE_AUDIO:
2641
                if (st->codec->sample_rate != 0 &&
2642
                    st->codec->channels != 0)
2643
                    goto the_end;
2644
                break;
2645
            case CODEC_TYPE_VIDEO:
2646
            case CODEC_TYPE_SUBTITLE:
2647
                goto the_end;
2648
            default:
2649
                break;
2650
            }
2651
        }
2652
    }
2653
 the_end:
2654
    stream_component_close(is, start_index);
2655
    stream_component_open(is, stream_index);
2656
}
2657

    
2658

    
2659
static void toggle_full_screen(void)
2660
{
2661
    is_full_screen = !is_full_screen;
2662
    if (!fs_screen_width) {
2663
        /* use default SDL method */
2664
//        SDL_WM_ToggleFullScreen(screen);
2665
    }
2666
    video_open(cur_stream);
2667
}
2668

    
2669
static void toggle_pause(void)
2670
{
2671
    if (cur_stream)
2672
        stream_pause(cur_stream);
2673
    step = 0;
2674
}
2675

    
2676
static void step_to_next_frame(void)
2677
{
2678
    if (cur_stream) {
2679
        /* if the stream is paused unpause it, then step */
2680
        if (cur_stream->paused)
2681
            stream_pause(cur_stream);
2682
    }
2683
    step = 1;
2684
}
2685

    
2686
static void do_exit(void)
2687
{
2688
    int i;
2689
    if (cur_stream) {
2690
        stream_close(cur_stream);
2691
        cur_stream = NULL;
2692
    }
2693
    for (i = 0; i < CODEC_TYPE_NB; i++)
2694
        av_free(avcodec_opts[i]);
2695
    av_free(avformat_opts);
2696
    av_free(sws_opts);
2697
#if CONFIG_AVFILTER
2698
    avfilter_uninit();
2699
#endif
2700
    if (show_status)
2701
        printf("\n");
2702
    SDL_Quit();
2703
    exit(0);
2704
}
2705

    
2706
static void toggle_audio_display(void)
2707
{
2708
    if (cur_stream) {
2709
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2710
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2711
        fill_rectangle(screen,
2712
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2713
                    bgcolor);
2714
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2715
    }
2716
}
2717

    
2718
/* handle an event sent by the GUI */
2719
static void event_loop(void)
2720
{
2721
    SDL_Event event;
2722
    double incr, pos, frac;
2723

    
2724
    for(;;) {
2725
        double x;
2726
        SDL_WaitEvent(&event);
2727
        switch(event.type) {
2728
        case SDL_KEYDOWN:
2729
            switch(event.key.keysym.sym) {
2730
            case SDLK_ESCAPE:
2731
            case SDLK_q:
2732
                do_exit();
2733
                break;
2734
            case SDLK_f:
2735
                toggle_full_screen();
2736
                break;
2737
            case SDLK_p:
2738
            case SDLK_SPACE:
2739
                toggle_pause();
2740
                break;
2741
            case SDLK_s: //S: Step to next frame
2742
                step_to_next_frame();
2743
                break;
2744
            case SDLK_a:
2745
                if (cur_stream)
2746
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2747
                break;
2748
            case SDLK_v:
2749
                if (cur_stream)
2750
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2751
                break;
2752
            case SDLK_t:
2753
                if (cur_stream)
2754
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2755
                break;
2756
            case SDLK_w:
2757
                toggle_audio_display();
2758
                break;
2759
            case SDLK_LEFT:
2760
                incr = -10.0;
2761
                goto do_seek;
2762
            case SDLK_RIGHT:
2763
                incr = 10.0;
2764
                goto do_seek;
2765
            case SDLK_UP:
2766
                incr = 60.0;
2767
                goto do_seek;
2768
            case SDLK_DOWN:
2769
                incr = -60.0;
2770
            do_seek:
2771
                if (cur_stream) {
2772
                    if (seek_by_bytes) {
2773
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2774
                            pos= cur_stream->video_current_pos;
2775
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2776
                            pos= cur_stream->audio_pkt.pos;
2777
                        }else
2778
                            pos = url_ftell(cur_stream->ic->pb);
2779
                        if (cur_stream->ic->bit_rate)
2780
                            incr *= cur_stream->ic->bit_rate / 8.0;
2781
                        else
2782
                            incr *= 180000.0;
2783
                        pos += incr;
2784
                        stream_seek(cur_stream, pos, incr, 1);
2785
                    } else {
2786
                        pos = get_master_clock(cur_stream);
2787
                        pos += incr;
2788
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2789
                    }
2790
                }
2791
                break;
2792
            default:
2793
                break;
2794
            }
2795
            break;
2796
        case SDL_MOUSEBUTTONDOWN:
2797
        case SDL_MOUSEMOTION:
2798
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2799
                x= event.button.x;
2800
            }else{
2801
                if(event.motion.state != SDL_PRESSED)
2802
                    break;
2803
                x= event.motion.x;
2804
            }
2805
            if (cur_stream) {
2806
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2807
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2808
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2809
                }else{
2810
                    int64_t ts;
2811
                    int ns, hh, mm, ss;
2812
                    int tns, thh, tmm, tss;
2813
                    tns = cur_stream->ic->duration/1000000LL;
2814
                    thh = tns/3600;
2815
                    tmm = (tns%3600)/60;
2816
                    tss = (tns%60);
2817
                    frac = x/cur_stream->width;
2818
                    ns = frac*tns;
2819
                    hh = ns/3600;
2820
                    mm = (ns%3600)/60;
2821
                    ss = (ns%60);
2822
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2823
                            hh, mm, ss, thh, tmm, tss);
2824
                    ts = frac*cur_stream->ic->duration;
2825
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2826
                        ts += cur_stream->ic->start_time;
2827
                    stream_seek(cur_stream, ts, 0, 0);
2828
                }
2829
            }
2830
            break;
2831
        case SDL_VIDEORESIZE:
2832
            if (cur_stream) {
2833
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2834
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2835
                screen_width = cur_stream->width = event.resize.w;
2836
                screen_height= cur_stream->height= event.resize.h;
2837
            }
2838
            break;
2839
        case SDL_QUIT:
2840
        case FF_QUIT_EVENT:
2841
            do_exit();
2842
            break;
2843
        case FF_ALLOC_EVENT:
2844
            video_open(event.user.data1);
2845
            alloc_picture(event.user.data1);
2846
            break;
2847
        case FF_REFRESH_EVENT:
2848
            video_refresh_timer(event.user.data1);
2849
            cur_stream->refresh=0;
2850
            break;
2851
        default:
2852
            break;
2853
        }
2854
    }
2855
}
2856

    
2857
static void opt_frame_size(const char *arg)
2858
{
2859
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2860
        fprintf(stderr, "Incorrect frame size\n");
2861
        exit(1);
2862
    }
2863
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2864
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2865
        exit(1);
2866
    }
2867
}
2868

    
2869
static int opt_width(const char *opt, const char *arg)
2870
{
2871
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2872
    return 0;
2873
}
2874

    
2875
static int opt_height(const char *opt, const char *arg)
2876
{
2877
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2878
    return 0;
2879
}
2880

    
2881
static void opt_format(const char *arg)
2882
{
2883
    file_iformat = av_find_input_format(arg);
2884
    if (!file_iformat) {
2885
        fprintf(stderr, "Unknown input format: %s\n", arg);
2886
        exit(1);
2887
    }
2888
}
2889

    
2890
static void opt_frame_pix_fmt(const char *arg)
2891
{
2892
    frame_pix_fmt = av_get_pix_fmt(arg);
2893
}
2894

    
2895
static int opt_sync(const char *opt, const char *arg)
2896
{
2897
    if (!strcmp(arg, "audio"))
2898
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2899
    else if (!strcmp(arg, "video"))
2900
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2901
    else if (!strcmp(arg, "ext"))
2902
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2903
    else {
2904
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2905
        exit(1);
2906
    }
2907
    return 0;
2908
}
2909

    
2910
static int opt_seek(const char *opt, const char *arg)
2911
{
2912
    start_time = parse_time_or_die(opt, arg, 1);
2913
    return 0;
2914
}
2915

    
2916
static int opt_debug(const char *opt, const char *arg)
2917
{
2918
    av_log_set_level(99);
2919
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2920
    return 0;
2921
}
2922

    
2923
static int opt_vismv(const char *opt, const char *arg)
2924
{
2925
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2926
    return 0;
2927
}
2928

    
2929
static int opt_thread_count(const char *opt, const char *arg)
2930
{
2931
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2932
#if !HAVE_THREADS
2933
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2934
#endif
2935
    return 0;
2936
}
2937

    
2938
static const OptionDef options[] = {
2939
#include "cmdutils_common_opts.h"
2940
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2941
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2942
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2943
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2944
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2945
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2946
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2947
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2948
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2949
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2950
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2951
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2952
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2953
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2954
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2955
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2956
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2957
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2958
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2959
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2960
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2961
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2962
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2963
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2964
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2965
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2966
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2967
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2968
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2969
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2970
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2971
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2972
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2973
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2974
#if CONFIG_AVFILTER
2975
    { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2976
#endif
2977
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2978
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2979
    { NULL, },
2980
};
2981

    
2982
static void show_usage(void)
2983
{
2984
    printf("Simple media player\n");
2985
    printf("usage: ffplay [options] input_file\n");
2986
    printf("\n");
2987
}
2988

    
2989
static void show_help(void)
2990
{
2991
    show_usage();
2992
    show_help_options(options, "Main options:\n",
2993
                      OPT_EXPERT, 0);
2994
    show_help_options(options, "\nAdvanced options:\n",
2995
                      OPT_EXPERT, OPT_EXPERT);
2996
    printf("\nWhile playing:\n"
2997
           "q, ESC              quit\n"
2998
           "f                   toggle full screen\n"
2999
           "p, SPC              pause\n"
3000
           "a                   cycle audio channel\n"
3001
           "v                   cycle video channel\n"
3002
           "t                   cycle subtitle channel\n"
3003
           "w                   show audio waves\n"
3004
           "left/right          seek backward/forward 10 seconds\n"
3005
           "down/up             seek backward/forward 1 minute\n"
3006
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3007
           );
3008
}
3009

    
3010
static void opt_input_file(const char *filename)
3011
{
3012
    if (input_filename) {
3013
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3014
                filename, input_filename);
3015
        exit(1);
3016
    }
3017
    if (!strcmp(filename, "-"))
3018
        filename = "pipe:";
3019
    input_filename = filename;
3020
}
3021

    
3022
/* Called from the main */
3023
int main(int argc, char **argv)
3024
{
3025
    int flags, i;
3026

    
3027
    /* register all codecs, demux and protocols */
3028
    avcodec_register_all();
3029
    avdevice_register_all();
3030
#if CONFIG_AVFILTER
3031
    avfilter_register_all();
3032
#endif
3033
    av_register_all();
3034

    
3035
    for(i=0; i<CODEC_TYPE_NB; i++){
3036
        avcodec_opts[i]= avcodec_alloc_context2(i);
3037
    }
3038
    avformat_opts = avformat_alloc_context();
3039
#if !CONFIG_AVFILTER
3040
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3041
#endif
3042

    
3043
    show_banner();
3044

    
3045
    parse_options(argc, argv, options, opt_input_file);
3046

    
3047
    if (!input_filename) {
3048
        show_usage();
3049
        fprintf(stderr, "An input file must be specified\n");
3050
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3051
        exit(1);
3052
    }
3053

    
3054
    if (display_disable) {
3055
        video_disable = 1;
3056
    }
3057
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3058
#if !defined(__MINGW32__) && !defined(__APPLE__)
3059
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3060
#endif
3061
    if (SDL_Init (flags)) {
3062
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3063
        exit(1);
3064
    }
3065

    
3066
    if (!display_disable) {
3067
#if HAVE_SDL_VIDEO_SIZE
3068
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3069
        fs_screen_width = vi->current_w;
3070
        fs_screen_height = vi->current_h;
3071
#endif
3072
    }
3073

    
3074
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3075
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3076
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3077

    
3078
    av_init_packet(&flush_pkt);
3079
    flush_pkt.data= "FLUSH";
3080

    
3081
    cur_stream = stream_open(input_filename, file_iformat);
3082

    
3083
    event_loop();
3084

    
3085
    /* never returns */
3086

    
3087
    return 0;
3088
}