Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 34017fd9

History | View | Annotate | Download (100 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavformat/avformat.h"
34
#include "libavdevice/avdevice.h"
35
#include "libswscale/swscale.h"
36
#include "libavcodec/audioconvert.h"
37
#include "libavcodec/opt.h"
38
#include "libavcodec/avfft.h"
39

    
40
#if CONFIG_AVFILTER
41
# include "libavfilter/avfilter.h"
42
# include "libavfilter/avfiltergraph.h"
43
# include "libavfilter/graphparser.h"
44
#endif
45

    
46
#include "cmdutils.h"
47

    
48
#include <SDL.h>
49
#include <SDL_thread.h>
50

    
51
#ifdef __MINGW32__
52
#undef main /* We don't want SDL to override our main() */
53
#endif
54

    
55
#include <unistd.h>
56
#include <assert.h>
57

    
58
const char program_name[] = "FFplay";
59
const int program_birth_year = 2003;
60

    
61
//#define DEBUG_SYNC
62

    
63
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65
#define MIN_FRAMES 5
66

    
67
/* SDL audio buffer size, in samples. Should be small to have precise
68
   A/V sync as SDL does not have hardware buffer fullness info. */
69
#define SDL_AUDIO_BUFFER_SIZE 1024
70

    
71
/* no AV sync correction is done if below the AV sync threshold */
72
#define AV_SYNC_THRESHOLD 0.01
73
/* no AV correction is done if too big error */
74
#define AV_NOSYNC_THRESHOLD 10.0
75

    
76
#define FRAME_SKIP_FACTOR 0.05
77

    
78
/* maximum audio speed change to get correct sync */
79
#define SAMPLE_CORRECTION_PERCENT_MAX 10
80

    
81
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82
#define AUDIO_DIFF_AVG_NB   20
83

    
84
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85
#define SAMPLE_ARRAY_SIZE (2*65536)
86

    
87
static int sws_flags = SWS_BICUBIC;
88

    
89
typedef struct PacketQueue {
90
    AVPacketList *first_pkt, *last_pkt;
91
    int nb_packets;
92
    int size;
93
    int abort_request;
94
    SDL_mutex *mutex;
95
    SDL_cond *cond;
96
} PacketQueue;
97

    
98
#define VIDEO_PICTURE_QUEUE_SIZE 2
99
#define SUBPICTURE_QUEUE_SIZE 4
100

    
101
typedef struct VideoPicture {
102
    double pts;                                  ///<presentation time stamp for this picture
103
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
104
    int64_t pos;                                 ///<byte position in file
105
    SDL_Overlay *bmp;
106
    int width, height; /* source height & width */
107
    int allocated;
108
    enum PixelFormat pix_fmt;
109

    
110
#if CONFIG_AVFILTER
111
    AVFilterBufferRef *picref;
112
#endif
113
} VideoPicture;
114

    
115
typedef struct SubPicture {
116
    double pts; /* presentation time stamp for this picture */
117
    AVSubtitle sub;
118
} SubPicture;
119

    
120
enum {
121
    AV_SYNC_AUDIO_MASTER, /* default choice */
122
    AV_SYNC_VIDEO_MASTER,
123
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124
};
125

    
126
typedef struct VideoState {
127
    SDL_Thread *parse_tid;
128
    SDL_Thread *video_tid;
129
    SDL_Thread *refresh_tid;
130
    AVInputFormat *iformat;
131
    int no_background;
132
    int abort_request;
133
    int paused;
134
    int last_paused;
135
    int seek_req;
136
    int seek_flags;
137
    int64_t seek_pos;
138
    int64_t seek_rel;
139
    int read_pause_return;
140
    AVFormatContext *ic;
141
    int dtg_active_format;
142

    
143
    int audio_stream;
144

    
145
    int av_sync_type;
146
    double external_clock; /* external clock base */
147
    int64_t external_clock_time;
148

    
149
    double audio_clock;
150
    double audio_diff_cum; /* used for AV difference average computation */
151
    double audio_diff_avg_coef;
152
    double audio_diff_threshold;
153
    int audio_diff_avg_count;
154
    AVStream *audio_st;
155
    PacketQueue audioq;
156
    int audio_hw_buf_size;
157
    /* samples output by the codec. we reserve more space for avsync
158
       compensation */
159
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    uint8_t *audio_buf;
162
    unsigned int audio_buf_size; /* in bytes */
163
    int audio_buf_index; /* in bytes */
164
    AVPacket audio_pkt_temp;
165
    AVPacket audio_pkt;
166
    enum SampleFormat audio_src_fmt;
167
    AVAudioConvert *reformat_ctx;
168

    
169
    int show_audio; /* if true, display audio samples */
170
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
171
    int sample_array_index;
172
    int last_i_start;
173
    RDFTContext *rdft;
174
    int rdft_bits;
175
    FFTSample *rdft_data;
176
    int xpos;
177

    
178
    SDL_Thread *subtitle_tid;
179
    int subtitle_stream;
180
    int subtitle_stream_changed;
181
    AVStream *subtitle_st;
182
    PacketQueue subtitleq;
183
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184
    int subpq_size, subpq_rindex, subpq_windex;
185
    SDL_mutex *subpq_mutex;
186
    SDL_cond *subpq_cond;
187

    
188
    double frame_timer;
189
    double frame_last_pts;
190
    double frame_last_delay;
191
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192
    int video_stream;
193
    AVStream *video_st;
194
    PacketQueue videoq;
195
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197
    int64_t video_current_pos;                   ///<current displayed file pos
198
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199
    int pictq_size, pictq_rindex, pictq_windex;
200
    SDL_mutex *pictq_mutex;
201
    SDL_cond *pictq_cond;
202
#if !CONFIG_AVFILTER
203
    struct SwsContext *img_convert_ctx;
204
#endif
205

    
206
    //    QETimer *video_timer;
207
    char filename[1024];
208
    int width, height, xleft, ytop;
209

    
210
    int64_t faulty_pts;
211
    int64_t faulty_dts;
212
    int64_t last_dts_for_fault_detection;
213
    int64_t last_pts_for_fault_detection;
214

    
215
#if CONFIG_AVFILTER
216
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
217
#endif
218

    
219
    float skip_frames;
220
    float skip_frames_index;
221
    int refresh;
222
} VideoState;
223

    
224
static void show_help(void);
225
static int audio_write_get_buf_size(VideoState *is);
226

    
227
/* options specified by the user */
228
static AVInputFormat *file_iformat;
229
static const char *input_filename;
230
static const char *window_title;
231
static int fs_screen_width;
232
static int fs_screen_height;
233
static int screen_width = 0;
234
static int screen_height = 0;
235
static int frame_width = 0;
236
static int frame_height = 0;
237
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
238
static int audio_disable;
239
static int video_disable;
240
static int wanted_stream[AVMEDIA_TYPE_NB]={
241
    [AVMEDIA_TYPE_AUDIO]=-1,
242
    [AVMEDIA_TYPE_VIDEO]=-1,
243
    [AVMEDIA_TYPE_SUBTITLE]=-1,
244
};
245
static int seek_by_bytes=-1;
246
static int display_disable;
247
static int show_status = 1;
248
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
249
static int64_t start_time = AV_NOPTS_VALUE;
250
static int64_t duration = AV_NOPTS_VALUE;
251
static int debug = 0;
252
static int debug_mv = 0;
253
static int step = 0;
254
static int thread_count = 1;
255
static int workaround_bugs = 1;
256
static int fast = 0;
257
static int genpts = 0;
258
static int lowres = 0;
259
static int idct = FF_IDCT_AUTO;
260
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
261
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
262
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
263
static int error_recognition = FF_ER_CAREFUL;
264
static int error_concealment = 3;
265
static int decoder_reorder_pts= -1;
266
static int autoexit;
267
static int exit_on_keydown;
268
static int exit_on_mousedown;
269
static int loop=1;
270
static int framedrop=1;
271

    
272
static int rdftspeed=20;
273
#if CONFIG_AVFILTER
274
static char *vfilters = NULL;
275
#endif
276

    
277
/* current context */
278
static int is_full_screen;
279
static VideoState *cur_stream;
280
static int64_t audio_callback_time;
281

    
282
static AVPacket flush_pkt;
283

    
284
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
285
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
286
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
287

    
288
static SDL_Surface *screen;
289

    
290
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
291

    
292
/* packet queue handling */
293
static void packet_queue_init(PacketQueue *q)
294
{
295
    memset(q, 0, sizeof(PacketQueue));
296
    q->mutex = SDL_CreateMutex();
297
    q->cond = SDL_CreateCond();
298
    packet_queue_put(q, &flush_pkt);
299
}
300

    
301
static void packet_queue_flush(PacketQueue *q)
302
{
303
    AVPacketList *pkt, *pkt1;
304

    
305
    SDL_LockMutex(q->mutex);
306
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
307
        pkt1 = pkt->next;
308
        av_free_packet(&pkt->pkt);
309
        av_freep(&pkt);
310
    }
311
    q->last_pkt = NULL;
312
    q->first_pkt = NULL;
313
    q->nb_packets = 0;
314
    q->size = 0;
315
    SDL_UnlockMutex(q->mutex);
316
}
317

    
318
static void packet_queue_end(PacketQueue *q)
319
{
320
    packet_queue_flush(q);
321
    SDL_DestroyMutex(q->mutex);
322
    SDL_DestroyCond(q->cond);
323
}
324

    
325
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
326
{
327
    AVPacketList *pkt1;
328

    
329
    /* duplicate the packet */
330
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
331
        return -1;
332

    
333
    pkt1 = av_malloc(sizeof(AVPacketList));
334
    if (!pkt1)
335
        return -1;
336
    pkt1->pkt = *pkt;
337
    pkt1->next = NULL;
338

    
339

    
340
    SDL_LockMutex(q->mutex);
341

    
342
    if (!q->last_pkt)
343

    
344
        q->first_pkt = pkt1;
345
    else
346
        q->last_pkt->next = pkt1;
347
    q->last_pkt = pkt1;
348
    q->nb_packets++;
349
    q->size += pkt1->pkt.size + sizeof(*pkt1);
350
    /* XXX: should duplicate packet data in DV case */
351
    SDL_CondSignal(q->cond);
352

    
353
    SDL_UnlockMutex(q->mutex);
354
    return 0;
355
}
356

    
357
static void packet_queue_abort(PacketQueue *q)
358
{
359
    SDL_LockMutex(q->mutex);
360

    
361
    q->abort_request = 1;
362

    
363
    SDL_CondSignal(q->cond);
364

    
365
    SDL_UnlockMutex(q->mutex);
366
}
367

    
368
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
369
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
370
{
371
    AVPacketList *pkt1;
372
    int ret;
373

    
374
    SDL_LockMutex(q->mutex);
375

    
376
    for(;;) {
377
        if (q->abort_request) {
378
            ret = -1;
379
            break;
380
        }
381

    
382
        pkt1 = q->first_pkt;
383
        if (pkt1) {
384
            q->first_pkt = pkt1->next;
385
            if (!q->first_pkt)
386
                q->last_pkt = NULL;
387
            q->nb_packets--;
388
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
389
            *pkt = pkt1->pkt;
390
            av_free(pkt1);
391
            ret = 1;
392
            break;
393
        } else if (!block) {
394
            ret = 0;
395
            break;
396
        } else {
397
            SDL_CondWait(q->cond, q->mutex);
398
        }
399
    }
400
    SDL_UnlockMutex(q->mutex);
401
    return ret;
402
}
403

    
404
static inline void fill_rectangle(SDL_Surface *screen,
405
                                  int x, int y, int w, int h, int color)
406
{
407
    SDL_Rect rect;
408
    rect.x = x;
409
    rect.y = y;
410
    rect.w = w;
411
    rect.h = h;
412
    SDL_FillRect(screen, &rect, color);
413
}
414

    
415
#if 0
416
/* draw only the border of a rectangle */
417
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
418
{
419
    int w1, w2, h1, h2;
420

421
    /* fill the background */
422
    w1 = x;
423
    if (w1 < 0)
424
        w1 = 0;
425
    w2 = s->width - (x + w);
426
    if (w2 < 0)
427
        w2 = 0;
428
    h1 = y;
429
    if (h1 < 0)
430
        h1 = 0;
431
    h2 = s->height - (y + h);
432
    if (h2 < 0)
433
        h2 = 0;
434
    fill_rectangle(screen,
435
                   s->xleft, s->ytop,
436
                   w1, s->height,
437
                   color);
438
    fill_rectangle(screen,
439
                   s->xleft + s->width - w2, s->ytop,
440
                   w2, s->height,
441
                   color);
442
    fill_rectangle(screen,
443
                   s->xleft + w1, s->ytop,
444
                   s->width - w1 - w2, h1,
445
                   color);
446
    fill_rectangle(screen,
447
                   s->xleft + w1, s->ytop + s->height - h2,
448
                   s->width - w1 - w2, h2,
449
                   color);
450
}
451
#endif
452

    
453
#define ALPHA_BLEND(a, oldp, newp, s)\
454
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
455

    
456
#define RGBA_IN(r, g, b, a, s)\
457
{\
458
    unsigned int v = ((const uint32_t *)(s))[0];\
459
    a = (v >> 24) & 0xff;\
460
    r = (v >> 16) & 0xff;\
461
    g = (v >> 8) & 0xff;\
462
    b = v & 0xff;\
463
}
464

    
465
#define YUVA_IN(y, u, v, a, s, pal)\
466
{\
467
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
468
    a = (val >> 24) & 0xff;\
469
    y = (val >> 16) & 0xff;\
470
    u = (val >> 8) & 0xff;\
471
    v = val & 0xff;\
472
}
473

    
474
#define YUVA_OUT(d, y, u, v, a)\
475
{\
476
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
477
}
478

    
479

    
480
#define BPP 1
481

    
482
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
483
{
484
    int wrap, wrap3, width2, skip2;
485
    int y, u, v, a, u1, v1, a1, w, h;
486
    uint8_t *lum, *cb, *cr;
487
    const uint8_t *p;
488
    const uint32_t *pal;
489
    int dstx, dsty, dstw, dsth;
490

    
491
    dstw = av_clip(rect->w, 0, imgw);
492
    dsth = av_clip(rect->h, 0, imgh);
493
    dstx = av_clip(rect->x, 0, imgw - dstw);
494
    dsty = av_clip(rect->y, 0, imgh - dsth);
495
    lum = dst->data[0] + dsty * dst->linesize[0];
496
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
497
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
498

    
499
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
500
    skip2 = dstx >> 1;
501
    wrap = dst->linesize[0];
502
    wrap3 = rect->pict.linesize[0];
503
    p = rect->pict.data[0];
504
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
505

    
506
    if (dsty & 1) {
507
        lum += dstx;
508
        cb += skip2;
509
        cr += skip2;
510

    
511
        if (dstx & 1) {
512
            YUVA_IN(y, u, v, a, p, pal);
513
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
515
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
516
            cb++;
517
            cr++;
518
            lum++;
519
            p += BPP;
520
        }
521
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
522
            YUVA_IN(y, u, v, a, p, pal);
523
            u1 = u;
524
            v1 = v;
525
            a1 = a;
526
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
527

    
528
            YUVA_IN(y, u, v, a, p + BPP, pal);
529
            u1 += u;
530
            v1 += v;
531
            a1 += a;
532
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
533
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
534
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
535
            cb++;
536
            cr++;
537
            p += 2 * BPP;
538
            lum += 2;
539
        }
540
        if (w) {
541
            YUVA_IN(y, u, v, a, p, pal);
542
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
544
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
545
            p++;
546
            lum++;
547
        }
548
        p += wrap3 - dstw * BPP;
549
        lum += wrap - dstw - dstx;
550
        cb += dst->linesize[1] - width2 - skip2;
551
        cr += dst->linesize[2] - width2 - skip2;
552
    }
553
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
554
        lum += dstx;
555
        cb += skip2;
556
        cr += skip2;
557

    
558
        if (dstx & 1) {
559
            YUVA_IN(y, u, v, a, p, pal);
560
            u1 = u;
561
            v1 = v;
562
            a1 = a;
563
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564
            p += wrap3;
565
            lum += wrap;
566
            YUVA_IN(y, u, v, a, p, pal);
567
            u1 += u;
568
            v1 += v;
569
            a1 += a;
570
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
572
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
573
            cb++;
574
            cr++;
575
            p += -wrap3 + BPP;
576
            lum += -wrap + 1;
577
        }
578
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
579
            YUVA_IN(y, u, v, a, p, pal);
580
            u1 = u;
581
            v1 = v;
582
            a1 = a;
583
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584

    
585
            YUVA_IN(y, u, v, a, p + BPP, pal);
586
            u1 += u;
587
            v1 += v;
588
            a1 += a;
589
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
590
            p += wrap3;
591
            lum += wrap;
592

    
593
            YUVA_IN(y, u, v, a, p, pal);
594
            u1 += u;
595
            v1 += v;
596
            a1 += a;
597
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598

    
599
            YUVA_IN(y, u, v, a, p + BPP, pal);
600
            u1 += u;
601
            v1 += v;
602
            a1 += a;
603
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604

    
605
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
606
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
607

    
608
            cb++;
609
            cr++;
610
            p += -wrap3 + 2 * BPP;
611
            lum += -wrap + 2;
612
        }
613
        if (w) {
614
            YUVA_IN(y, u, v, a, p, pal);
615
            u1 = u;
616
            v1 = v;
617
            a1 = a;
618
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
            p += wrap3;
620
            lum += wrap;
621
            YUVA_IN(y, u, v, a, p, pal);
622
            u1 += u;
623
            v1 += v;
624
            a1 += a;
625
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
627
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
628
            cb++;
629
            cr++;
630
            p += -wrap3 + BPP;
631
            lum += -wrap + 1;
632
        }
633
        p += wrap3 + (wrap3 - dstw * BPP);
634
        lum += wrap + (wrap - dstw - dstx);
635
        cb += dst->linesize[1] - width2 - skip2;
636
        cr += dst->linesize[2] - width2 - skip2;
637
    }
638
    /* handle odd height */
639
    if (h) {
640
        lum += dstx;
641
        cb += skip2;
642
        cr += skip2;
643

    
644
        if (dstx & 1) {
645
            YUVA_IN(y, u, v, a, p, pal);
646
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
647
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
648
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
649
            cb++;
650
            cr++;
651
            lum++;
652
            p += BPP;
653
        }
654
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
655
            YUVA_IN(y, u, v, a, p, pal);
656
            u1 = u;
657
            v1 = v;
658
            a1 = a;
659
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
660

    
661
            YUVA_IN(y, u, v, a, p + BPP, pal);
662
            u1 += u;
663
            v1 += v;
664
            a1 += a;
665
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
666
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
667
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
668
            cb++;
669
            cr++;
670
            p += 2 * BPP;
671
            lum += 2;
672
        }
673
        if (w) {
674
            YUVA_IN(y, u, v, a, p, pal);
675
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
676
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
677
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
678
        }
679
    }
680
}
681

    
682
static void free_subpicture(SubPicture *sp)
683
{
684
    avsubtitle_free(&sp->sub);
685
}
686

    
687
static void video_image_display(VideoState *is)
688
{
689
    VideoPicture *vp;
690
    SubPicture *sp;
691
    AVPicture pict;
692
    float aspect_ratio;
693
    int width, height, x, y;
694
    SDL_Rect rect;
695
    int i;
696

    
697
    vp = &is->pictq[is->pictq_rindex];
698
    if (vp->bmp) {
699
#if CONFIG_AVFILTER
700
         if (vp->picref->video->pixel_aspect.num == 0)
701
             aspect_ratio = 0;
702
         else
703
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
704
#else
705

    
706
        /* XXX: use variable in the frame */
707
        if (is->video_st->sample_aspect_ratio.num)
708
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
709
        else if (is->video_st->codec->sample_aspect_ratio.num)
710
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
711
        else
712
            aspect_ratio = 0;
713
#endif
714
        if (aspect_ratio <= 0.0)
715
            aspect_ratio = 1.0;
716
        aspect_ratio *= (float)vp->width / (float)vp->height;
717
        /* if an active format is indicated, then it overrides the
718
           mpeg format */
719
#if 0
720
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
721
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
722
            printf("dtg_active_format=%d\n", is->dtg_active_format);
723
        }
724
#endif
725
#if 0
726
        switch(is->video_st->codec->dtg_active_format) {
727
        case FF_DTG_AFD_SAME:
728
        default:
729
            /* nothing to do */
730
            break;
731
        case FF_DTG_AFD_4_3:
732
            aspect_ratio = 4.0 / 3.0;
733
            break;
734
        case FF_DTG_AFD_16_9:
735
            aspect_ratio = 16.0 / 9.0;
736
            break;
737
        case FF_DTG_AFD_14_9:
738
            aspect_ratio = 14.0 / 9.0;
739
            break;
740
        case FF_DTG_AFD_4_3_SP_14_9:
741
            aspect_ratio = 14.0 / 9.0;
742
            break;
743
        case FF_DTG_AFD_16_9_SP_14_9:
744
            aspect_ratio = 14.0 / 9.0;
745
            break;
746
        case FF_DTG_AFD_SP_4_3:
747
            aspect_ratio = 4.0 / 3.0;
748
            break;
749
        }
750
#endif
751

    
752
        if (is->subtitle_st)
753
        {
754
            if (is->subpq_size > 0)
755
            {
756
                sp = &is->subpq[is->subpq_rindex];
757

    
758
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
759
                {
760
                    SDL_LockYUVOverlay (vp->bmp);
761

    
762
                    pict.data[0] = vp->bmp->pixels[0];
763
                    pict.data[1] = vp->bmp->pixels[2];
764
                    pict.data[2] = vp->bmp->pixels[1];
765

    
766
                    pict.linesize[0] = vp->bmp->pitches[0];
767
                    pict.linesize[1] = vp->bmp->pitches[2];
768
                    pict.linesize[2] = vp->bmp->pitches[1];
769

    
770
                    for (i = 0; i < sp->sub.num_rects; i++)
771
                        blend_subrect(&pict, sp->sub.rects[i],
772
                                      vp->bmp->w, vp->bmp->h);
773

    
774
                    SDL_UnlockYUVOverlay (vp->bmp);
775
                }
776
            }
777
        }
778

    
779

    
780
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
781
        height = is->height;
782
        width = ((int)rint(height * aspect_ratio)) & ~1;
783
        if (width > is->width) {
784
            width = is->width;
785
            height = ((int)rint(width / aspect_ratio)) & ~1;
786
        }
787
        x = (is->width - width) / 2;
788
        y = (is->height - height) / 2;
789
        if (!is->no_background) {
790
            /* fill the background */
791
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
792
        } else {
793
            is->no_background = 0;
794
        }
795
        rect.x = is->xleft + x;
796
        rect.y = is->ytop  + y;
797
        rect.w = width;
798
        rect.h = height;
799
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
800
    } else {
801
#if 0
802
        fill_rectangle(screen,
803
                       is->xleft, is->ytop, is->width, is->height,
804
                       QERGB(0x00, 0x00, 0x00));
805
#endif
806
    }
807
}
808

    
809
static inline int compute_mod(int a, int b)
810
{
811
    a = a % b;
812
    if (a >= 0)
813
        return a;
814
    else
815
        return a + b;
816
}
817

    
818
static void video_audio_display(VideoState *s)
819
{
820
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
821
    int ch, channels, h, h2, bgcolor, fgcolor;
822
    int16_t time_diff;
823
    int rdft_bits, nb_freq;
824

    
825
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
826
        ;
827
    nb_freq= 1<<(rdft_bits-1);
828

    
829
    /* compute display index : center on currently output samples */
830
    channels = s->audio_st->codec->channels;
831
    nb_display_channels = channels;
832
    if (!s->paused) {
833
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
834
        n = 2 * channels;
835
        delay = audio_write_get_buf_size(s);
836
        delay /= n;
837

    
838
        /* to be more precise, we take into account the time spent since
839
           the last buffer computation */
840
        if (audio_callback_time) {
841
            time_diff = av_gettime() - audio_callback_time;
842
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
843
        }
844

    
845
        delay += 2*data_used;
846
        if (delay < data_used)
847
            delay = data_used;
848

    
849
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
850
        if(s->show_audio==1){
851
            h= INT_MIN;
852
            for(i=0; i<1000; i+=channels){
853
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
854
                int a= s->sample_array[idx];
855
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
856
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
857
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
858
                int score= a-d;
859
                if(h<score && (b^c)<0){
860
                    h= score;
861
                    i_start= idx;
862
                }
863
            }
864
        }
865

    
866
        s->last_i_start = i_start;
867
    } else {
868
        i_start = s->last_i_start;
869
    }
870

    
871
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
872
    if(s->show_audio==1){
873
        fill_rectangle(screen,
874
                       s->xleft, s->ytop, s->width, s->height,
875
                       bgcolor);
876

    
877
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
878

    
879
        /* total height for one channel */
880
        h = s->height / nb_display_channels;
881
        /* graph height / 2 */
882
        h2 = (h * 9) / 20;
883
        for(ch = 0;ch < nb_display_channels; ch++) {
884
            i = i_start + ch;
885
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
886
            for(x = 0; x < s->width; x++) {
887
                y = (s->sample_array[i] * h2) >> 15;
888
                if (y < 0) {
889
                    y = -y;
890
                    ys = y1 - y;
891
                } else {
892
                    ys = y1;
893
                }
894
                fill_rectangle(screen,
895
                               s->xleft + x, ys, 1, y,
896
                               fgcolor);
897
                i += channels;
898
                if (i >= SAMPLE_ARRAY_SIZE)
899
                    i -= SAMPLE_ARRAY_SIZE;
900
            }
901
        }
902

    
903
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
904

    
905
        for(ch = 1;ch < nb_display_channels; ch++) {
906
            y = s->ytop + ch * h;
907
            fill_rectangle(screen,
908
                           s->xleft, y, s->width, 1,
909
                           fgcolor);
910
        }
911
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
912
    }else{
913
        nb_display_channels= FFMIN(nb_display_channels, 2);
914
        if(rdft_bits != s->rdft_bits){
915
            av_rdft_end(s->rdft);
916
            av_free(s->rdft_data);
917
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
918
            s->rdft_bits= rdft_bits;
919
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
920
        }
921
        {
922
            FFTSample *data[2];
923
            for(ch = 0;ch < nb_display_channels; ch++) {
924
                data[ch] = s->rdft_data + 2*nb_freq*ch;
925
                i = i_start + ch;
926
                for(x = 0; x < 2*nb_freq; x++) {
927
                    double w= (x-nb_freq)*(1.0/nb_freq);
928
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
929
                    i += channels;
930
                    if (i >= SAMPLE_ARRAY_SIZE)
931
                        i -= SAMPLE_ARRAY_SIZE;
932
                }
933
                av_rdft_calc(s->rdft, data[ch]);
934
            }
935
            //least efficient way to do this, we should of course directly access it but its more than fast enough
936
            for(y=0; y<s->height; y++){
937
                double w= 1/sqrt(nb_freq);
938
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
939
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
940
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
941
                a= FFMIN(a,255);
942
                b= FFMIN(b,255);
943
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944

    
945
                fill_rectangle(screen,
946
                            s->xpos, s->height-y, 1, 1,
947
                            fgcolor);
948
            }
949
        }
950
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951
        s->xpos++;
952
        if(s->xpos >= s->width)
953
            s->xpos= s->xleft;
954
    }
955
}
956

    
957
static int video_open(VideoState *is){
958
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959
    int w,h;
960

    
961
    if(is_full_screen) flags |= SDL_FULLSCREEN;
962
    else               flags |= SDL_RESIZABLE;
963

    
964
    if (is_full_screen && fs_screen_width) {
965
        w = fs_screen_width;
966
        h = fs_screen_height;
967
    } else if(!is_full_screen && screen_width){
968
        w = screen_width;
969
        h = screen_height;
970
#if CONFIG_AVFILTER
971
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972
        w = is->out_video_filter->inputs[0]->w;
973
        h = is->out_video_filter->inputs[0]->h;
974
#else
975
    }else if (is->video_st && is->video_st->codec->width){
976
        w = is->video_st->codec->width;
977
        h = is->video_st->codec->height;
978
#endif
979
    } else {
980
        w = 640;
981
        h = 480;
982
    }
983
    if(screen && is->width == screen->w && screen->w == w
984
       && is->height== screen->h && screen->h == h)
985
        return 0;
986

    
987
#ifndef __APPLE__
988
    screen = SDL_SetVideoMode(w, h, 0, flags);
989
#else
990
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991
    screen = SDL_SetVideoMode(w, h, 24, flags);
992
#endif
993
    if (!screen) {
994
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
995
        return -1;
996
    }
997
    if (!window_title)
998
        window_title = input_filename;
999
    SDL_WM_SetCaption(window_title, window_title);
1000

    
1001
    is->width = screen->w;
1002
    is->height = screen->h;
1003

    
1004
    return 0;
1005
}
1006

    
1007
/* display the current picture, if any */
1008
static void video_display(VideoState *is)
1009
{
1010
    if(!screen)
1011
        video_open(cur_stream);
1012
    if (is->audio_st && is->show_audio)
1013
        video_audio_display(is);
1014
    else if (is->video_st)
1015
        video_image_display(is);
1016
}
1017

    
1018
static int refresh_thread(void *opaque)
1019
{
1020
    VideoState *is= opaque;
1021
    while(!is->abort_request){
1022
    SDL_Event event;
1023
    event.type = FF_REFRESH_EVENT;
1024
    event.user.data1 = opaque;
1025
        if(!is->refresh){
1026
            is->refresh=1;
1027
    SDL_PushEvent(&event);
1028
        }
1029
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030
    }
1031
    return 0;
1032
}
1033

    
1034
/* get the current audio clock value */
1035
static double get_audio_clock(VideoState *is)
1036
{
1037
    double pts;
1038
    int hw_buf_size, bytes_per_sec;
1039
    pts = is->audio_clock;
1040
    hw_buf_size = audio_write_get_buf_size(is);
1041
    bytes_per_sec = 0;
1042
    if (is->audio_st) {
1043
        bytes_per_sec = is->audio_st->codec->sample_rate *
1044
            2 * is->audio_st->codec->channels;
1045
    }
1046
    if (bytes_per_sec)
1047
        pts -= (double)hw_buf_size / bytes_per_sec;
1048
    return pts;
1049
}
1050

    
1051
/* get the current video clock value */
1052
static double get_video_clock(VideoState *is)
1053
{
1054
    if (is->paused) {
1055
        return is->video_current_pts;
1056
    } else {
1057
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058
    }
1059
}
1060

    
1061
/* get the current external clock value */
1062
static double get_external_clock(VideoState *is)
1063
{
1064
    int64_t ti;
1065
    ti = av_gettime();
1066
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067
}
1068

    
1069
/* get the current master clock value */
1070
static double get_master_clock(VideoState *is)
1071
{
1072
    double val;
1073

    
1074
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075
        if (is->video_st)
1076
            val = get_video_clock(is);
1077
        else
1078
            val = get_audio_clock(is);
1079
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080
        if (is->audio_st)
1081
            val = get_audio_clock(is);
1082
        else
1083
            val = get_video_clock(is);
1084
    } else {
1085
        val = get_external_clock(is);
1086
    }
1087
    return val;
1088
}
1089

    
1090
/* seek in the stream */
1091
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092
{
1093
    if (!is->seek_req) {
1094
        is->seek_pos = pos;
1095
        is->seek_rel = rel;
1096
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097
        if (seek_by_bytes)
1098
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1099
        is->seek_req = 1;
1100
    }
1101
}
1102

    
1103
/* pause or resume the video */
1104
static void stream_pause(VideoState *is)
1105
{
1106
    if (is->paused) {
1107
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108
        if(is->read_pause_return != AVERROR(ENOSYS)){
1109
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110
        }
1111
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112
    }
1113
    is->paused = !is->paused;
1114
}
1115

    
1116
static double compute_target_time(double frame_current_pts, VideoState *is)
1117
{
1118
    double delay, sync_threshold, diff;
1119

    
1120
    /* compute nominal delay */
1121
    delay = frame_current_pts - is->frame_last_pts;
1122
    if (delay <= 0 || delay >= 10.0) {
1123
        /* if incorrect delay, use previous one */
1124
        delay = is->frame_last_delay;
1125
    } else {
1126
        is->frame_last_delay = delay;
1127
    }
1128
    is->frame_last_pts = frame_current_pts;
1129

    
1130
    /* update delay to follow master synchronisation source */
1131
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133
        /* if video is slave, we try to correct big delays by
1134
           duplicating or deleting a frame */
1135
        diff = get_video_clock(is) - get_master_clock(is);
1136

    
1137
        /* skip or repeat frame. We take into account the
1138
           delay to compute the threshold. I still don't know
1139
           if it is the best guess */
1140
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142
            if (diff <= -sync_threshold)
1143
                delay = 0;
1144
            else if (diff >= sync_threshold)
1145
                delay = 2 * delay;
1146
        }
1147
    }
1148
    is->frame_timer += delay;
1149
#if defined(DEBUG_SYNC)
1150
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151
            delay, actual_delay, frame_current_pts, -diff);
1152
#endif
1153

    
1154
    return is->frame_timer;
1155
}
1156

    
1157
/* called to display each frame */
1158
static void video_refresh_timer(void *opaque)
1159
{
1160
    VideoState *is = opaque;
1161
    VideoPicture *vp;
1162

    
1163
    SubPicture *sp, *sp2;
1164

    
1165
    if (is->video_st) {
1166
retry:
1167
        if (is->pictq_size == 0) {
1168
            //nothing to do, no picture to display in the que
1169
        } else {
1170
            double time= av_gettime()/1000000.0;
1171
            double next_target;
1172
            /* dequeue the picture */
1173
            vp = &is->pictq[is->pictq_rindex];
1174

    
1175
            if(time < vp->target_clock)
1176
                return;
1177
            /* update current video pts */
1178
            is->video_current_pts = vp->pts;
1179
            is->video_current_pts_drift = is->video_current_pts - time;
1180
            is->video_current_pos = vp->pos;
1181
            if(is->pictq_size > 1){
1182
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183
                assert(nextvp->target_clock >= vp->target_clock);
1184
                next_target= nextvp->target_clock;
1185
            }else{
1186
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187
            }
1188
            if(framedrop && time > next_target){
1189
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190
                if(is->pictq_size > 1 || time > next_target + 0.5){
1191
                    /* update queue size and signal for next picture */
1192
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193
                        is->pictq_rindex = 0;
1194

    
1195
                    SDL_LockMutex(is->pictq_mutex);
1196
                    is->pictq_size--;
1197
                    SDL_CondSignal(is->pictq_cond);
1198
                    SDL_UnlockMutex(is->pictq_mutex);
1199
                    goto retry;
1200
                }
1201
            }
1202

    
1203
            if(is->subtitle_st) {
1204
                if (is->subtitle_stream_changed) {
1205
                    SDL_LockMutex(is->subpq_mutex);
1206

    
1207
                    while (is->subpq_size) {
1208
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1209

    
1210
                        /* update queue size and signal for next picture */
1211
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212
                            is->subpq_rindex = 0;
1213

    
1214
                        is->subpq_size--;
1215
                    }
1216
                    is->subtitle_stream_changed = 0;
1217

    
1218
                    SDL_CondSignal(is->subpq_cond);
1219
                    SDL_UnlockMutex(is->subpq_mutex);
1220
                } else {
1221
                    if (is->subpq_size > 0) {
1222
                        sp = &is->subpq[is->subpq_rindex];
1223

    
1224
                        if (is->subpq_size > 1)
1225
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226
                        else
1227
                            sp2 = NULL;
1228

    
1229
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231
                        {
1232
                            free_subpicture(sp);
1233

    
1234
                            /* update queue size and signal for next picture */
1235
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236
                                is->subpq_rindex = 0;
1237

    
1238
                            SDL_LockMutex(is->subpq_mutex);
1239
                            is->subpq_size--;
1240
                            SDL_CondSignal(is->subpq_cond);
1241
                            SDL_UnlockMutex(is->subpq_mutex);
1242
                        }
1243
                    }
1244
                }
1245
            }
1246

    
1247
            /* display picture */
1248
            video_display(is);
1249

    
1250
            /* update queue size and signal for next picture */
1251
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252
                is->pictq_rindex = 0;
1253

    
1254
            SDL_LockMutex(is->pictq_mutex);
1255
            is->pictq_size--;
1256
            SDL_CondSignal(is->pictq_cond);
1257
            SDL_UnlockMutex(is->pictq_mutex);
1258
        }
1259
    } else if (is->audio_st) {
1260
        /* draw the next audio frame */
1261

    
1262
        /* if only audio stream, then display the audio bars (better
1263
           than nothing, just to test the implementation */
1264

    
1265
        /* display picture */
1266
        video_display(is);
1267
    }
1268
    if (show_status) {
1269
        static int64_t last_time;
1270
        int64_t cur_time;
1271
        int aqsize, vqsize, sqsize;
1272
        double av_diff;
1273

    
1274
        cur_time = av_gettime();
1275
        if (!last_time || (cur_time - last_time) >= 30000) {
1276
            aqsize = 0;
1277
            vqsize = 0;
1278
            sqsize = 0;
1279
            if (is->audio_st)
1280
                aqsize = is->audioq.size;
1281
            if (is->video_st)
1282
                vqsize = is->videoq.size;
1283
            if (is->subtitle_st)
1284
                sqsize = is->subtitleq.size;
1285
            av_diff = 0;
1286
            if (is->audio_st && is->video_st)
1287
                av_diff = get_audio_clock(is) - get_video_clock(is);
1288
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290
            fflush(stdout);
1291
            last_time = cur_time;
1292
        }
1293
    }
1294
}
1295

    
1296
static void stream_close(VideoState *is)
1297
{
1298
    VideoPicture *vp;
1299
    int i;
1300
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1301
    is->abort_request = 1;
1302
    SDL_WaitThread(is->parse_tid, NULL);
1303
    SDL_WaitThread(is->refresh_tid, NULL);
1304

    
1305
    /* free all pictures */
1306
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1307
        vp = &is->pictq[i];
1308
#if CONFIG_AVFILTER
1309
        if (vp->picref) {
1310
            avfilter_unref_buffer(vp->picref);
1311
            vp->picref = NULL;
1312
        }
1313
#endif
1314
        if (vp->bmp) {
1315
            SDL_FreeYUVOverlay(vp->bmp);
1316
            vp->bmp = NULL;
1317
        }
1318
    }
1319
    SDL_DestroyMutex(is->pictq_mutex);
1320
    SDL_DestroyCond(is->pictq_cond);
1321
    SDL_DestroyMutex(is->subpq_mutex);
1322
    SDL_DestroyCond(is->subpq_cond);
1323
#if !CONFIG_AVFILTER
1324
    if (is->img_convert_ctx)
1325
        sws_freeContext(is->img_convert_ctx);
1326
#endif
1327
    av_free(is);
1328
}
1329

    
1330
static void do_exit(void)
1331
{
1332
    int i;
1333
    if (cur_stream) {
1334
        stream_close(cur_stream);
1335
        cur_stream = NULL;
1336
    }
1337
    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
1338
        av_free(avcodec_opts[i]);
1339
    av_free(avformat_opts);
1340
    av_free(sws_opts);
1341
#if CONFIG_AVFILTER
1342
    avfilter_uninit();
1343
#endif
1344
    if (show_status)
1345
        printf("\n");
1346
    SDL_Quit();
1347
    exit(0);
1348
}
1349

    
1350
/* allocate a picture (needs to do that in main thread to avoid
1351
   potential locking problems */
1352
static void alloc_picture(void *opaque)
1353
{
1354
    VideoState *is = opaque;
1355
    VideoPicture *vp;
1356

    
1357
    vp = &is->pictq[is->pictq_windex];
1358

    
1359
    if (vp->bmp)
1360
        SDL_FreeYUVOverlay(vp->bmp);
1361

    
1362
#if CONFIG_AVFILTER
1363
    if (vp->picref)
1364
        avfilter_unref_buffer(vp->picref);
1365
    vp->picref = NULL;
1366

    
1367
    vp->width   = is->out_video_filter->inputs[0]->w;
1368
    vp->height  = is->out_video_filter->inputs[0]->h;
1369
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1370
#else
1371
    vp->width   = is->video_st->codec->width;
1372
    vp->height  = is->video_st->codec->height;
1373
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1374
#endif
1375

    
1376
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1377
                                   SDL_YV12_OVERLAY,
1378
                                   screen);
1379
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1380
        /* SDL allocates a buffer smaller than requested if the video
1381
         * overlay hardware is unable to support the requested size. */
1382
        fprintf(stderr, "Error: the video system does not support an image\n"
1383
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1384
                        "to reduce the image size.\n", vp->width, vp->height );
1385
        do_exit();
1386
    }
1387

    
1388
    SDL_LockMutex(is->pictq_mutex);
1389
    vp->allocated = 1;
1390
    SDL_CondSignal(is->pictq_cond);
1391
    SDL_UnlockMutex(is->pictq_mutex);
1392
}
1393

    
1394
/**
1395
 *
1396
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1397
 */
1398
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1399
{
1400
    VideoPicture *vp;
1401
    int dst_pix_fmt;
1402
#if CONFIG_AVFILTER
1403
    AVPicture pict_src;
1404
#endif
1405
    /* wait until we have space to put a new picture */
1406
    SDL_LockMutex(is->pictq_mutex);
1407

    
1408
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1409
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1410

    
1411
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1412
           !is->videoq.abort_request) {
1413
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1414
    }
1415
    SDL_UnlockMutex(is->pictq_mutex);
1416

    
1417
    if (is->videoq.abort_request)
1418
        return -1;
1419

    
1420
    vp = &is->pictq[is->pictq_windex];
1421

    
1422
    /* alloc or resize hardware picture buffer */
1423
    if (!vp->bmp ||
1424
#if CONFIG_AVFILTER
1425
        vp->width  != is->out_video_filter->inputs[0]->w ||
1426
        vp->height != is->out_video_filter->inputs[0]->h) {
1427
#else
1428
        vp->width != is->video_st->codec->width ||
1429
        vp->height != is->video_st->codec->height) {
1430
#endif
1431
        SDL_Event event;
1432

    
1433
        vp->allocated = 0;
1434

    
1435
        /* the allocation must be done in the main thread to avoid
1436
           locking problems */
1437
        event.type = FF_ALLOC_EVENT;
1438
        event.user.data1 = is;
1439
        SDL_PushEvent(&event);
1440

    
1441
        /* wait until the picture is allocated */
1442
        SDL_LockMutex(is->pictq_mutex);
1443
        while (!vp->allocated && !is->videoq.abort_request) {
1444
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1445
        }
1446
        SDL_UnlockMutex(is->pictq_mutex);
1447

    
1448
        if (is->videoq.abort_request)
1449
            return -1;
1450
    }
1451

    
1452
    /* if the frame is not skipped, then display it */
1453
    if (vp->bmp) {
1454
        AVPicture pict;
1455
#if CONFIG_AVFILTER
1456
        if(vp->picref)
1457
            avfilter_unref_buffer(vp->picref);
1458
        vp->picref = src_frame->opaque;
1459
#endif
1460

    
1461
        /* get a pointer on the bitmap */
1462
        SDL_LockYUVOverlay (vp->bmp);
1463

    
1464
        dst_pix_fmt = PIX_FMT_YUV420P;
1465
        memset(&pict,0,sizeof(AVPicture));
1466
        pict.data[0] = vp->bmp->pixels[0];
1467
        pict.data[1] = vp->bmp->pixels[2];
1468
        pict.data[2] = vp->bmp->pixels[1];
1469

    
1470
        pict.linesize[0] = vp->bmp->pitches[0];
1471
        pict.linesize[1] = vp->bmp->pitches[2];
1472
        pict.linesize[2] = vp->bmp->pitches[1];
1473

    
1474
#if CONFIG_AVFILTER
1475
        pict_src.data[0] = src_frame->data[0];
1476
        pict_src.data[1] = src_frame->data[1];
1477
        pict_src.data[2] = src_frame->data[2];
1478

    
1479
        pict_src.linesize[0] = src_frame->linesize[0];
1480
        pict_src.linesize[1] = src_frame->linesize[1];
1481
        pict_src.linesize[2] = src_frame->linesize[2];
1482

    
1483
        //FIXME use direct rendering
1484
        av_picture_copy(&pict, &pict_src,
1485
                        vp->pix_fmt, vp->width, vp->height);
1486
#else
1487
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1488
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1489
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1490
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1491
        if (is->img_convert_ctx == NULL) {
1492
            fprintf(stderr, "Cannot initialize the conversion context\n");
1493
            exit(1);
1494
        }
1495
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1496
                  0, vp->height, pict.data, pict.linesize);
1497
#endif
1498
        /* update the bitmap content */
1499
        SDL_UnlockYUVOverlay(vp->bmp);
1500

    
1501
        vp->pts = pts;
1502
        vp->pos = pos;
1503

    
1504
        /* now we can update the picture count */
1505
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1506
            is->pictq_windex = 0;
1507
        SDL_LockMutex(is->pictq_mutex);
1508
        vp->target_clock= compute_target_time(vp->pts, is);
1509

    
1510
        is->pictq_size++;
1511
        SDL_UnlockMutex(is->pictq_mutex);
1512
    }
1513
    return 0;
1514
}
1515

    
1516
/**
1517
 * compute the exact PTS for the picture if it is omitted in the stream
1518
 * @param pts1 the dts of the pkt / pts of the frame
1519
 */
1520
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1521
{
1522
    double frame_delay, pts;
1523

    
1524
    pts = pts1;
1525

    
1526
    if (pts != 0) {
1527
        /* update video clock with pts, if present */
1528
        is->video_clock = pts;
1529
    } else {
1530
        pts = is->video_clock;
1531
    }
1532
    /* update video clock for next frame */
1533
    frame_delay = av_q2d(is->video_st->codec->time_base);
1534
    /* for MPEG2, the frame can be repeated, so we update the
1535
       clock accordingly */
1536
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1537
    is->video_clock += frame_delay;
1538

    
1539
#if defined(DEBUG_SYNC) && 0
1540
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1541
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1542
#endif
1543
    return queue_picture(is, src_frame, pts, pos);
1544
}
1545

    
1546
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1547
{
1548
    int len1, got_picture, i;
1549

    
1550
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1551
            return -1;
1552

    
1553
        if(pkt->data == flush_pkt.data){
1554
            avcodec_flush_buffers(is->video_st->codec);
1555

    
1556
            SDL_LockMutex(is->pictq_mutex);
1557
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1558
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1559
                is->pictq[i].target_clock= 0;
1560
            }
1561
            while (is->pictq_size && !is->videoq.abort_request) {
1562
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1563
            }
1564
            is->video_current_pos= -1;
1565
            SDL_UnlockMutex(is->pictq_mutex);
1566

    
1567
            is->last_dts_for_fault_detection=
1568
            is->last_pts_for_fault_detection= INT64_MIN;
1569
            is->frame_last_pts= AV_NOPTS_VALUE;
1570
            is->frame_last_delay = 0;
1571
            is->frame_timer = (double)av_gettime() / 1000000.0;
1572
            is->skip_frames= 1;
1573
            is->skip_frames_index= 0;
1574
            return 0;
1575
        }
1576

    
1577
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1578
           this packet, if any */
1579
        is->video_st->codec->reordered_opaque= pkt->pts;
1580
        len1 = avcodec_decode_video2(is->video_st->codec,
1581
                                    frame, &got_picture,
1582
                                    pkt);
1583

    
1584
        if (got_picture) {
1585
            if(pkt->dts != AV_NOPTS_VALUE){
1586
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1587
                is->last_dts_for_fault_detection= pkt->dts;
1588
            }
1589
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1590
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1591
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1592
            }
1593
        }
1594

    
1595
        if(   (   decoder_reorder_pts==1
1596
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1597
               || pkt->dts == AV_NOPTS_VALUE)
1598
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1599
            *pts= frame->reordered_opaque;
1600
        else if(pkt->dts != AV_NOPTS_VALUE)
1601
            *pts= pkt->dts;
1602
        else
1603
            *pts= 0;
1604

    
1605
//            if (len1 < 0)
1606
//                break;
1607
    if (got_picture){
1608
        is->skip_frames_index += 1;
1609
        if(is->skip_frames_index >= is->skip_frames){
1610
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1611
            return 1;
1612
        }
1613

    
1614
    }
1615
    return 0;
1616
}
1617

    
1618
#if CONFIG_AVFILTER
1619
typedef struct {
1620
    VideoState *is;
1621
    AVFrame *frame;
1622
    int use_dr1;
1623
} FilterPriv;
1624

    
1625
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1626
{
1627
    AVFilterContext *ctx = codec->opaque;
1628
    AVFilterBufferRef  *ref;
1629
    int perms = AV_PERM_WRITE;
1630
    int i, w, h, stride[4];
1631
    unsigned edge;
1632

    
1633
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1634
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1635
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1636
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1637
    }
1638
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1639

    
1640
    w = codec->width;
1641
    h = codec->height;
1642
    avcodec_align_dimensions2(codec, &w, &h, stride);
1643
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1644
    w += edge << 1;
1645
    h += edge << 1;
1646

    
1647
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1648
        return -1;
1649

    
1650
    ref->video->w = codec->width;
1651
    ref->video->h = codec->height;
1652
    for(i = 0; i < 4; i ++) {
1653
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1654
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1655

    
1656
        if (ref->data[i]) {
1657
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1658
        }
1659
        pic->data[i]     = ref->data[i];
1660
        pic->linesize[i] = ref->linesize[i];
1661
    }
1662
    pic->opaque = ref;
1663
    pic->age    = INT_MAX;
1664
    pic->type   = FF_BUFFER_TYPE_USER;
1665
    pic->reordered_opaque = codec->reordered_opaque;
1666
    return 0;
1667
}
1668

    
1669
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1670
{
1671
    memset(pic->data, 0, sizeof(pic->data));
1672
    avfilter_unref_buffer(pic->opaque);
1673
}
1674

    
1675
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1676
{
1677
    AVFilterBufferRef *ref = pic->opaque;
1678

    
1679
    if (pic->data[0] == NULL) {
1680
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1681
        return codec->get_buffer(codec, pic);
1682
    }
1683

    
1684
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1685
        (codec->pix_fmt != ref->format)) {
1686
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1687
        return -1;
1688
    }
1689

    
1690
    pic->reordered_opaque = codec->reordered_opaque;
1691
    return 0;
1692
}
1693

    
1694
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1695
{
1696
    FilterPriv *priv = ctx->priv;
1697
    AVCodecContext *codec;
1698
    if(!opaque) return -1;
1699

    
1700
    priv->is = opaque;
1701
    codec    = priv->is->video_st->codec;
1702
    codec->opaque = ctx;
1703
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1704
        priv->use_dr1 = 1;
1705
        codec->get_buffer     = input_get_buffer;
1706
        codec->release_buffer = input_release_buffer;
1707
        codec->reget_buffer   = input_reget_buffer;
1708
    }
1709

    
1710
    priv->frame = avcodec_alloc_frame();
1711

    
1712
    return 0;
1713
}
1714

    
1715
static void input_uninit(AVFilterContext *ctx)
1716
{
1717
    FilterPriv *priv = ctx->priv;
1718
    av_free(priv->frame);
1719
}
1720

    
1721
static int input_request_frame(AVFilterLink *link)
1722
{
1723
    FilterPriv *priv = link->src->priv;
1724
    AVFilterBufferRef *picref;
1725
    int64_t pts = 0;
1726
    AVPacket pkt;
1727
    int ret;
1728

    
1729
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1730
        av_free_packet(&pkt);
1731
    if (ret < 0)
1732
        return -1;
1733

    
1734
    if(priv->use_dr1) {
1735
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1736
    } else {
1737
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1738
        av_image_copy(picref->data, picref->linesize,
1739
                             priv->frame->data, priv->frame->linesize,
1740
                             picref->format, link->w, link->h);
1741
    }
1742
    av_free_packet(&pkt);
1743

    
1744
    picref->pts = pts;
1745
    picref->pos = pkt.pos;
1746
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1747
    avfilter_start_frame(link, picref);
1748
    avfilter_draw_slice(link, 0, link->h, 1);
1749
    avfilter_end_frame(link);
1750

    
1751
    return 0;
1752
}
1753

    
1754
static int input_query_formats(AVFilterContext *ctx)
1755
{
1756
    FilterPriv *priv = ctx->priv;
1757
    enum PixelFormat pix_fmts[] = {
1758
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1759
    };
1760

    
1761
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1762
    return 0;
1763
}
1764

    
1765
static int input_config_props(AVFilterLink *link)
1766
{
1767
    FilterPriv *priv  = link->src->priv;
1768
    AVCodecContext *c = priv->is->video_st->codec;
1769

    
1770
    link->w = c->width;
1771
    link->h = c->height;
1772

    
1773
    return 0;
1774
}
1775

    
1776
static AVFilter input_filter =
1777
{
1778
    .name      = "ffplay_input",
1779

    
1780
    .priv_size = sizeof(FilterPriv),
1781

    
1782
    .init      = input_init,
1783
    .uninit    = input_uninit,
1784

    
1785
    .query_formats = input_query_formats,
1786

    
1787
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1788
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1789
                                    .type = AVMEDIA_TYPE_VIDEO,
1790
                                    .request_frame = input_request_frame,
1791
                                    .config_props  = input_config_props, },
1792
                                  { .name = NULL }},
1793
};
1794

    
1795
static void output_end_frame(AVFilterLink *link)
1796
{
1797
}
1798

    
1799
static int output_query_formats(AVFilterContext *ctx)
1800
{
1801
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1802

    
1803
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1804
    return 0;
1805
}
1806

    
1807
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1808
                                    int64_t *pts, int64_t *pos)
1809
{
1810
    AVFilterBufferRef *pic;
1811

    
1812
    if(avfilter_request_frame(ctx->inputs[0]))
1813
        return -1;
1814
    if(!(pic = ctx->inputs[0]->cur_buf))
1815
        return -1;
1816
    ctx->inputs[0]->cur_buf = NULL;
1817

    
1818
    frame->opaque = pic;
1819
    *pts          = pic->pts;
1820
    *pos          = pic->pos;
1821

    
1822
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1823
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1824

    
1825
    return 1;
1826
}
1827

    
1828
static AVFilter output_filter =
1829
{
1830
    .name      = "ffplay_output",
1831

    
1832
    .query_formats = output_query_formats,
1833

    
1834
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1835
                                    .type          = AVMEDIA_TYPE_VIDEO,
1836
                                    .end_frame     = output_end_frame,
1837
                                    .min_perms     = AV_PERM_READ, },
1838
                                  { .name = NULL }},
1839
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1840
};
1841
#endif  /* CONFIG_AVFILTER */
1842

    
1843
static int video_thread(void *arg)
1844
{
1845
    VideoState *is = arg;
1846
    AVFrame *frame= avcodec_alloc_frame();
1847
    int64_t pts_int;
1848
    double pts;
1849
    int ret;
1850

    
1851
#if CONFIG_AVFILTER
1852
    int64_t pos;
1853
    char sws_flags_str[128];
1854
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1855
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1856
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1857
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1858

    
1859
    if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1860
    if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1861

    
1862
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1863
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1864

    
1865

    
1866
    if(vfilters) {
1867
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1868
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1869

    
1870
        outputs->name    = av_strdup("in");
1871
        outputs->filter  = filt_src;
1872
        outputs->pad_idx = 0;
1873
        outputs->next    = NULL;
1874

    
1875
        inputs->name    = av_strdup("out");
1876
        inputs->filter  = filt_out;
1877
        inputs->pad_idx = 0;
1878
        inputs->next    = NULL;
1879

    
1880
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1881
            goto the_end;
1882
        av_freep(&vfilters);
1883
    } else {
1884
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1885
    }
1886
    avfilter_graph_add_filter(graph, filt_src);
1887
    avfilter_graph_add_filter(graph, filt_out);
1888

    
1889
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1890
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1891
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1892

    
1893
    is->out_video_filter = filt_out;
1894
#endif
1895

    
1896
    for(;;) {
1897
#if !CONFIG_AVFILTER
1898
        AVPacket pkt;
1899
#endif
1900
        while (is->paused && !is->videoq.abort_request)
1901
            SDL_Delay(10);
1902
#if CONFIG_AVFILTER
1903
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1904
#else
1905
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1906
#endif
1907

    
1908
        if (ret < 0) goto the_end;
1909

    
1910
        if (!ret)
1911
            continue;
1912

    
1913
        pts = pts_int*av_q2d(is->video_st->time_base);
1914

    
1915
#if CONFIG_AVFILTER
1916
        ret = output_picture2(is, frame, pts, pos);
1917
#else
1918
        ret = output_picture2(is, frame, pts,  pkt.pos);
1919
        av_free_packet(&pkt);
1920
#endif
1921
        if (ret < 0)
1922
            goto the_end;
1923

    
1924
        if (step)
1925
            if (cur_stream)
1926
                stream_pause(cur_stream);
1927
    }
1928
 the_end:
1929
#if CONFIG_AVFILTER
1930
    avfilter_graph_destroy(graph);
1931
    av_freep(&graph);
1932
#endif
1933
    av_free(frame);
1934
    return 0;
1935
}
1936

    
1937
static int subtitle_thread(void *arg)
1938
{
1939
    VideoState *is = arg;
1940
    SubPicture *sp;
1941
    AVPacket pkt1, *pkt = &pkt1;
1942
    int len1, got_subtitle;
1943
    double pts;
1944
    int i, j;
1945
    int r, g, b, y, u, v, a;
1946

    
1947
    for(;;) {
1948
        while (is->paused && !is->subtitleq.abort_request) {
1949
            SDL_Delay(10);
1950
        }
1951
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1952
            break;
1953

    
1954
        if(pkt->data == flush_pkt.data){
1955
            avcodec_flush_buffers(is->subtitle_st->codec);
1956
            continue;
1957
        }
1958
        SDL_LockMutex(is->subpq_mutex);
1959
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1960
               !is->subtitleq.abort_request) {
1961
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1962
        }
1963
        SDL_UnlockMutex(is->subpq_mutex);
1964

    
1965
        if (is->subtitleq.abort_request)
1966
            goto the_end;
1967

    
1968
        sp = &is->subpq[is->subpq_windex];
1969

    
1970
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1971
           this packet, if any */
1972
        pts = 0;
1973
        if (pkt->pts != AV_NOPTS_VALUE)
1974
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1975

    
1976
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1977
                                    &sp->sub, &got_subtitle,
1978
                                    pkt);
1979
//            if (len1 < 0)
1980
//                break;
1981
        if (got_subtitle && sp->sub.format == 0) {
1982
            sp->pts = pts;
1983

    
1984
            for (i = 0; i < sp->sub.num_rects; i++)
1985
            {
1986
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1987
                {
1988
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1989
                    y = RGB_TO_Y_CCIR(r, g, b);
1990
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1991
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1992
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1993
                }
1994
            }
1995

    
1996
            /* now we can update the picture count */
1997
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1998
                is->subpq_windex = 0;
1999
            SDL_LockMutex(is->subpq_mutex);
2000
            is->subpq_size++;
2001
            SDL_UnlockMutex(is->subpq_mutex);
2002
        }
2003
        av_free_packet(pkt);
2004
//        if (step)
2005
//            if (cur_stream)
2006
//                stream_pause(cur_stream);
2007
    }
2008
 the_end:
2009
    return 0;
2010
}
2011

    
2012
/* copy samples for viewing in editor window */
2013
static void update_sample_display(VideoState *is, short *samples, int samples_size)
2014
{
2015
    int size, len, channels;
2016

    
2017
    channels = is->audio_st->codec->channels;
2018

    
2019
    size = samples_size / sizeof(short);
2020
    while (size > 0) {
2021
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2022
        if (len > size)
2023
            len = size;
2024
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2025
        samples += len;
2026
        is->sample_array_index += len;
2027
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2028
            is->sample_array_index = 0;
2029
        size -= len;
2030
    }
2031
}
2032

    
2033
/* return the new audio buffer size (samples can be added or deleted
2034
   to get better sync if video or external master clock) */
2035
static int synchronize_audio(VideoState *is, short *samples,
2036
                             int samples_size1, double pts)
2037
{
2038
    int n, samples_size;
2039
    double ref_clock;
2040

    
2041
    n = 2 * is->audio_st->codec->channels;
2042
    samples_size = samples_size1;
2043

    
2044
    /* if not master, then we try to remove or add samples to correct the clock */
2045
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2046
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2047
        double diff, avg_diff;
2048
        int wanted_size, min_size, max_size, nb_samples;
2049

    
2050
        ref_clock = get_master_clock(is);
2051
        diff = get_audio_clock(is) - ref_clock;
2052

    
2053
        if (diff < AV_NOSYNC_THRESHOLD) {
2054
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2055
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2056
                /* not enough measures to have a correct estimate */
2057
                is->audio_diff_avg_count++;
2058
            } else {
2059
                /* estimate the A-V difference */
2060
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2061

    
2062
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2063
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2064
                    nb_samples = samples_size / n;
2065

    
2066
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2067
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2068
                    if (wanted_size < min_size)
2069
                        wanted_size = min_size;
2070
                    else if (wanted_size > max_size)
2071
                        wanted_size = max_size;
2072

    
2073
                    /* add or remove samples to correction the synchro */
2074
                    if (wanted_size < samples_size) {
2075
                        /* remove samples */
2076
                        samples_size = wanted_size;
2077
                    } else if (wanted_size > samples_size) {
2078
                        uint8_t *samples_end, *q;
2079
                        int nb;
2080

    
2081
                        /* add samples */
2082
                        nb = (samples_size - wanted_size);
2083
                        samples_end = (uint8_t *)samples + samples_size - n;
2084
                        q = samples_end + n;
2085
                        while (nb > 0) {
2086
                            memcpy(q, samples_end, n);
2087
                            q += n;
2088
                            nb -= n;
2089
                        }
2090
                        samples_size = wanted_size;
2091
                    }
2092
                }
2093
#if 0
2094
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2095
                       diff, avg_diff, samples_size - samples_size1,
2096
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2097
#endif
2098
            }
2099
        } else {
2100
            /* too big difference : may be initial PTS errors, so
2101
               reset A-V filter */
2102
            is->audio_diff_avg_count = 0;
2103
            is->audio_diff_cum = 0;
2104
        }
2105
    }
2106

    
2107
    return samples_size;
2108
}
2109

    
2110
/* decode one audio frame and returns its uncompressed size */
2111
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2112
{
2113
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2114
    AVPacket *pkt = &is->audio_pkt;
2115
    AVCodecContext *dec= is->audio_st->codec;
2116
    int n, len1, data_size;
2117
    double pts;
2118

    
2119
    for(;;) {
2120
        /* NOTE: the audio packet can contain several frames */
2121
        while (pkt_temp->size > 0) {
2122
            data_size = sizeof(is->audio_buf1);
2123
            len1 = avcodec_decode_audio3(dec,
2124
                                        (int16_t *)is->audio_buf1, &data_size,
2125
                                        pkt_temp);
2126
            if (len1 < 0) {
2127
                /* if error, we skip the frame */
2128
                pkt_temp->size = 0;
2129
                break;
2130
            }
2131

    
2132
            pkt_temp->data += len1;
2133
            pkt_temp->size -= len1;
2134
            if (data_size <= 0)
2135
                continue;
2136

    
2137
            if (dec->sample_fmt != is->audio_src_fmt) {
2138
                if (is->reformat_ctx)
2139
                    av_audio_convert_free(is->reformat_ctx);
2140
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2141
                                                         dec->sample_fmt, 1, NULL, 0);
2142
                if (!is->reformat_ctx) {
2143
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2144
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2145
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2146
                        break;
2147
                }
2148
                is->audio_src_fmt= dec->sample_fmt;
2149
            }
2150

    
2151
            if (is->reformat_ctx) {
2152
                const void *ibuf[6]= {is->audio_buf1};
2153
                void *obuf[6]= {is->audio_buf2};
2154
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2155
                int ostride[6]= {2};
2156
                int len= data_size/istride[0];
2157
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2158
                    printf("av_audio_convert() failed\n");
2159
                    break;
2160
                }
2161
                is->audio_buf= is->audio_buf2;
2162
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2163
                          remove this legacy cruft */
2164
                data_size= len*2;
2165
            }else{
2166
                is->audio_buf= is->audio_buf1;
2167
            }
2168

    
2169
            /* if no pts, then compute it */
2170
            pts = is->audio_clock;
2171
            *pts_ptr = pts;
2172
            n = 2 * dec->channels;
2173
            is->audio_clock += (double)data_size /
2174
                (double)(n * dec->sample_rate);
2175
#if defined(DEBUG_SYNC)
2176
            {
2177
                static double last_clock;
2178
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2179
                       is->audio_clock - last_clock,
2180
                       is->audio_clock, pts);
2181
                last_clock = is->audio_clock;
2182
            }
2183
#endif
2184
            return data_size;
2185
        }
2186

    
2187
        /* free the current packet */
2188
        if (pkt->data)
2189
            av_free_packet(pkt);
2190

    
2191
        if (is->paused || is->audioq.abort_request) {
2192
            return -1;
2193
        }
2194

    
2195
        /* read next packet */
2196
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2197
            return -1;
2198
        if(pkt->data == flush_pkt.data){
2199
            avcodec_flush_buffers(dec);
2200
            continue;
2201
        }
2202

    
2203
        pkt_temp->data = pkt->data;
2204
        pkt_temp->size = pkt->size;
2205

    
2206
        /* if update the audio clock with the pts */
2207
        if (pkt->pts != AV_NOPTS_VALUE) {
2208
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2209
        }
2210
    }
2211
}
2212

    
2213
/* get the current audio output buffer size, in samples. With SDL, we
2214
   cannot have a precise information */
2215
static int audio_write_get_buf_size(VideoState *is)
2216
{
2217
    return is->audio_buf_size - is->audio_buf_index;
2218
}
2219

    
2220

    
2221
/* prepare a new audio buffer */
2222
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2223
{
2224
    VideoState *is = opaque;
2225
    int audio_size, len1;
2226
    double pts;
2227

    
2228
    audio_callback_time = av_gettime();
2229

    
2230
    while (len > 0) {
2231
        if (is->audio_buf_index >= is->audio_buf_size) {
2232
           audio_size = audio_decode_frame(is, &pts);
2233
           if (audio_size < 0) {
2234
                /* if error, just output silence */
2235
               is->audio_buf = is->audio_buf1;
2236
               is->audio_buf_size = 1024;
2237
               memset(is->audio_buf, 0, is->audio_buf_size);
2238
           } else {
2239
               if (is->show_audio)
2240
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2241
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2242
                                              pts);
2243
               is->audio_buf_size = audio_size;
2244
           }
2245
           is->audio_buf_index = 0;
2246
        }
2247
        len1 = is->audio_buf_size - is->audio_buf_index;
2248
        if (len1 > len)
2249
            len1 = len;
2250
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2251
        len -= len1;
2252
        stream += len1;
2253
        is->audio_buf_index += len1;
2254
    }
2255
}
2256

    
2257
/* open a given stream. Return 0 if OK */
2258
static int stream_component_open(VideoState *is, int stream_index)
2259
{
2260
    AVFormatContext *ic = is->ic;
2261
    AVCodecContext *avctx;
2262
    AVCodec *codec;
2263
    SDL_AudioSpec wanted_spec, spec;
2264

    
2265
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2266
        return -1;
2267
    avctx = ic->streams[stream_index]->codec;
2268

    
2269
    /* prepare audio output */
2270
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2271
        if (avctx->channels > 0) {
2272
            avctx->request_channels = FFMIN(2, avctx->channels);
2273
        } else {
2274
            avctx->request_channels = 2;
2275
        }
2276
    }
2277

    
2278
    codec = avcodec_find_decoder(avctx->codec_id);
2279
    avctx->debug_mv = debug_mv;
2280
    avctx->debug = debug;
2281
    avctx->workaround_bugs = workaround_bugs;
2282
    avctx->lowres = lowres;
2283
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2284
    avctx->idct_algo= idct;
2285
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2286
    avctx->skip_frame= skip_frame;
2287
    avctx->skip_idct= skip_idct;
2288
    avctx->skip_loop_filter= skip_loop_filter;
2289
    avctx->error_recognition= error_recognition;
2290
    avctx->error_concealment= error_concealment;
2291
    avcodec_thread_init(avctx, thread_count);
2292

    
2293
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2294

    
2295
    if (!codec ||
2296
        avcodec_open(avctx, codec) < 0)
2297
        return -1;
2298

    
2299
    /* prepare audio output */
2300
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2301
        wanted_spec.freq = avctx->sample_rate;
2302
        wanted_spec.format = AUDIO_S16SYS;
2303
        wanted_spec.channels = avctx->channels;
2304
        wanted_spec.silence = 0;
2305
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2306
        wanted_spec.callback = sdl_audio_callback;
2307
        wanted_spec.userdata = is;
2308
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2309
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2310
            return -1;
2311
        }
2312
        is->audio_hw_buf_size = spec.size;
2313
        is->audio_src_fmt= SAMPLE_FMT_S16;
2314
    }
2315

    
2316
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2317
    switch(avctx->codec_type) {
2318
    case AVMEDIA_TYPE_AUDIO:
2319
        is->audio_stream = stream_index;
2320
        is->audio_st = ic->streams[stream_index];
2321
        is->audio_buf_size = 0;
2322
        is->audio_buf_index = 0;
2323

    
2324
        /* init averaging filter */
2325
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2326
        is->audio_diff_avg_count = 0;
2327
        /* since we do not have a precise anough audio fifo fullness,
2328
           we correct audio sync only if larger than this threshold */
2329
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2330

    
2331
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2332
        packet_queue_init(&is->audioq);
2333
        SDL_PauseAudio(0);
2334
        break;
2335
    case AVMEDIA_TYPE_VIDEO:
2336
        is->video_stream = stream_index;
2337
        is->video_st = ic->streams[stream_index];
2338

    
2339
//        is->video_current_pts_time = av_gettime();
2340

    
2341
        packet_queue_init(&is->videoq);
2342
        is->video_tid = SDL_CreateThread(video_thread, is);
2343
        break;
2344
    case AVMEDIA_TYPE_SUBTITLE:
2345
        is->subtitle_stream = stream_index;
2346
        is->subtitle_st = ic->streams[stream_index];
2347
        packet_queue_init(&is->subtitleq);
2348

    
2349
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2350
        break;
2351
    default:
2352
        break;
2353
    }
2354
    return 0;
2355
}
2356

    
2357
static void stream_component_close(VideoState *is, int stream_index)
2358
{
2359
    AVFormatContext *ic = is->ic;
2360
    AVCodecContext *avctx;
2361

    
2362
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2363
        return;
2364
    avctx = ic->streams[stream_index]->codec;
2365

    
2366
    switch(avctx->codec_type) {
2367
    case AVMEDIA_TYPE_AUDIO:
2368
        packet_queue_abort(&is->audioq);
2369

    
2370
        SDL_CloseAudio();
2371

    
2372
        packet_queue_end(&is->audioq);
2373
        if (is->reformat_ctx)
2374
            av_audio_convert_free(is->reformat_ctx);
2375
        is->reformat_ctx = NULL;
2376
        break;
2377
    case AVMEDIA_TYPE_VIDEO:
2378
        packet_queue_abort(&is->videoq);
2379

    
2380
        /* note: we also signal this mutex to make sure we deblock the
2381
           video thread in all cases */
2382
        SDL_LockMutex(is->pictq_mutex);
2383
        SDL_CondSignal(is->pictq_cond);
2384
        SDL_UnlockMutex(is->pictq_mutex);
2385

    
2386
        SDL_WaitThread(is->video_tid, NULL);
2387

    
2388
        packet_queue_end(&is->videoq);
2389
        break;
2390
    case AVMEDIA_TYPE_SUBTITLE:
2391
        packet_queue_abort(&is->subtitleq);
2392

    
2393
        /* note: we also signal this mutex to make sure we deblock the
2394
           video thread in all cases */
2395
        SDL_LockMutex(is->subpq_mutex);
2396
        is->subtitle_stream_changed = 1;
2397

    
2398
        SDL_CondSignal(is->subpq_cond);
2399
        SDL_UnlockMutex(is->subpq_mutex);
2400

    
2401
        SDL_WaitThread(is->subtitle_tid, NULL);
2402

    
2403
        packet_queue_end(&is->subtitleq);
2404
        break;
2405
    default:
2406
        break;
2407
    }
2408

    
2409
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2410
    avcodec_close(avctx);
2411
    switch(avctx->codec_type) {
2412
    case AVMEDIA_TYPE_AUDIO:
2413
        is->audio_st = NULL;
2414
        is->audio_stream = -1;
2415
        break;
2416
    case AVMEDIA_TYPE_VIDEO:
2417
        is->video_st = NULL;
2418
        is->video_stream = -1;
2419
        break;
2420
    case AVMEDIA_TYPE_SUBTITLE:
2421
        is->subtitle_st = NULL;
2422
        is->subtitle_stream = -1;
2423
        break;
2424
    default:
2425
        break;
2426
    }
2427
}
2428

    
2429
/* since we have only one decoding thread, we can use a global
2430
   variable instead of a thread local variable */
2431
static VideoState *global_video_state;
2432

    
2433
static int decode_interrupt_cb(void)
2434
{
2435
    return (global_video_state && global_video_state->abort_request);
2436
}
2437

    
2438
/* this thread gets the stream from the disk or the network */
2439
static int decode_thread(void *arg)
2440
{
2441
    VideoState *is = arg;
2442
    AVFormatContext *ic;
2443
    int err, i, ret;
2444
    int st_index[AVMEDIA_TYPE_NB];
2445
    int st_count[AVMEDIA_TYPE_NB]={0};
2446
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2447
    AVPacket pkt1, *pkt = &pkt1;
2448
    AVFormatParameters params, *ap = &params;
2449
    int eof=0;
2450
    int pkt_in_play_range = 0;
2451

    
2452
    ic = avformat_alloc_context();
2453

    
2454
    memset(st_index, -1, sizeof(st_index));
2455
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2456
    is->video_stream = -1;
2457
    is->audio_stream = -1;
2458
    is->subtitle_stream = -1;
2459

    
2460
    global_video_state = is;
2461
    url_set_interrupt_cb(decode_interrupt_cb);
2462

    
2463
    memset(ap, 0, sizeof(*ap));
2464

    
2465
    ap->prealloced_context = 1;
2466
    ap->width = frame_width;
2467
    ap->height= frame_height;
2468
    ap->time_base= (AVRational){1, 25};
2469
    ap->pix_fmt = frame_pix_fmt;
2470

    
2471
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2472

    
2473
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2474
    if (err < 0) {
2475
        print_error(is->filename, err);
2476
        ret = -1;
2477
        goto fail;
2478
    }
2479
    is->ic = ic;
2480

    
2481
    if(genpts)
2482
        ic->flags |= AVFMT_FLAG_GENPTS;
2483

    
2484
    err = av_find_stream_info(ic);
2485
    if (err < 0) {
2486
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2487
        ret = -1;
2488
        goto fail;
2489
    }
2490
    if(ic->pb)
2491
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2492

    
2493
    if(seek_by_bytes<0)
2494
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2495

    
2496
    /* if seeking requested, we execute it */
2497
    if (start_time != AV_NOPTS_VALUE) {
2498
        int64_t timestamp;
2499

    
2500
        timestamp = start_time;
2501
        /* add the stream start time */
2502
        if (ic->start_time != AV_NOPTS_VALUE)
2503
            timestamp += ic->start_time;
2504
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2505
        if (ret < 0) {
2506
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2507
                    is->filename, (double)timestamp / AV_TIME_BASE);
2508
        }
2509
    }
2510

    
2511
    for(i = 0; i < ic->nb_streams; i++) {
2512
        AVStream *st= ic->streams[i];
2513
        AVCodecContext *avctx = st->codec;
2514
        ic->streams[i]->discard = AVDISCARD_ALL;
2515
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2516
            continue;
2517
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2518
            continue;
2519

    
2520
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2521
            continue;
2522
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2523

    
2524
        switch(avctx->codec_type) {
2525
        case AVMEDIA_TYPE_AUDIO:
2526
            if (!audio_disable)
2527
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2528
            break;
2529
        case AVMEDIA_TYPE_VIDEO:
2530
        case AVMEDIA_TYPE_SUBTITLE:
2531
            if (!video_disable)
2532
                st_index[avctx->codec_type] = i;
2533
            break;
2534
        default:
2535
            break;
2536
        }
2537
    }
2538
    if (show_status) {
2539
        dump_format(ic, 0, is->filename, 0);
2540
    }
2541

    
2542
    /* open the streams */
2543
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2544
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2545
    }
2546

    
2547
    ret=-1;
2548
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2549
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2550
    }
2551
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2552
    if(ret<0) {
2553
        if (!display_disable)
2554
            is->show_audio = 2;
2555
    }
2556

    
2557
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2558
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2559
    }
2560

    
2561
    if (is->video_stream < 0 && is->audio_stream < 0) {
2562
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2563
        ret = -1;
2564
        goto fail;
2565
    }
2566

    
2567
    for(;;) {
2568
        if (is->abort_request)
2569
            break;
2570
        if (is->paused != is->last_paused) {
2571
            is->last_paused = is->paused;
2572
            if (is->paused)
2573
                is->read_pause_return= av_read_pause(ic);
2574
            else
2575
                av_read_play(ic);
2576
        }
2577
#if CONFIG_RTSP_DEMUXER
2578
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2579
            /* wait 10 ms to avoid trying to get another packet */
2580
            /* XXX: horrible */
2581
            SDL_Delay(10);
2582
            continue;
2583
        }
2584
#endif
2585
        if (is->seek_req) {
2586
            int64_t seek_target= is->seek_pos;
2587
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2588
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2589
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2590
//      of the seek_pos/seek_rel variables
2591

    
2592
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2593
            if (ret < 0) {
2594
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2595
            }else{
2596
                if (is->audio_stream >= 0) {
2597
                    packet_queue_flush(&is->audioq);
2598
                    packet_queue_put(&is->audioq, &flush_pkt);
2599
                }
2600
                if (is->subtitle_stream >= 0) {
2601
                    packet_queue_flush(&is->subtitleq);
2602
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2603
                }
2604
                if (is->video_stream >= 0) {
2605
                    packet_queue_flush(&is->videoq);
2606
                    packet_queue_put(&is->videoq, &flush_pkt);
2607
                }
2608
            }
2609
            is->seek_req = 0;
2610
            eof= 0;
2611
        }
2612

    
2613
        /* if the queue are full, no need to read more */
2614
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2615
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2616
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2617
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2618
            /* wait 10 ms */
2619
            SDL_Delay(10);
2620
            continue;
2621
        }
2622
        if(url_feof(ic->pb) || eof) {
2623
            if(is->video_stream >= 0){
2624
                av_init_packet(pkt);
2625
                pkt->data=NULL;
2626
                pkt->size=0;
2627
                pkt->stream_index= is->video_stream;
2628
                packet_queue_put(&is->videoq, pkt);
2629
            }
2630
            SDL_Delay(10);
2631
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2632
                if(loop!=1 && (!loop || --loop)){
2633
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2634
                }else if(autoexit){
2635
                    ret=AVERROR_EOF;
2636
                    goto fail;
2637
                }
2638
            }
2639
            continue;
2640
        }
2641
        ret = av_read_frame(ic, pkt);
2642
        if (ret < 0) {
2643
            if (ret == AVERROR_EOF)
2644
                eof=1;
2645
            if (url_ferror(ic->pb))
2646
                break;
2647
            SDL_Delay(100); /* wait for user event */
2648
            continue;
2649
        }
2650
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2651
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2652
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2653
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2654
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2655
                <= ((double)duration/1000000);
2656
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2657
            packet_queue_put(&is->audioq, pkt);
2658
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2659
            packet_queue_put(&is->videoq, pkt);
2660
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2661
            packet_queue_put(&is->subtitleq, pkt);
2662
        } else {
2663
            av_free_packet(pkt);
2664
        }
2665
    }
2666
    /* wait until the end */
2667
    while (!is->abort_request) {
2668
        SDL_Delay(100);
2669
    }
2670

    
2671
    ret = 0;
2672
 fail:
2673
    /* disable interrupting */
2674
    global_video_state = NULL;
2675

    
2676
    /* close each stream */
2677
    if (is->audio_stream >= 0)
2678
        stream_component_close(is, is->audio_stream);
2679
    if (is->video_stream >= 0)
2680
        stream_component_close(is, is->video_stream);
2681
    if (is->subtitle_stream >= 0)
2682
        stream_component_close(is, is->subtitle_stream);
2683
    if (is->ic) {
2684
        av_close_input_file(is->ic);
2685
        is->ic = NULL; /* safety */
2686
    }
2687
    url_set_interrupt_cb(NULL);
2688

    
2689
    if (ret != 0) {
2690
        SDL_Event event;
2691

    
2692
        event.type = FF_QUIT_EVENT;
2693
        event.user.data1 = is;
2694
        SDL_PushEvent(&event);
2695
    }
2696
    return 0;
2697
}
2698

    
2699
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2700
{
2701
    VideoState *is;
2702

    
2703
    is = av_mallocz(sizeof(VideoState));
2704
    if (!is)
2705
        return NULL;
2706
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2707
    is->iformat = iformat;
2708
    is->ytop = 0;
2709
    is->xleft = 0;
2710

    
2711
    /* start video display */
2712
    is->pictq_mutex = SDL_CreateMutex();
2713
    is->pictq_cond = SDL_CreateCond();
2714

    
2715
    is->subpq_mutex = SDL_CreateMutex();
2716
    is->subpq_cond = SDL_CreateCond();
2717

    
2718
    is->av_sync_type = av_sync_type;
2719
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2720
    if (!is->parse_tid) {
2721
        av_free(is);
2722
        return NULL;
2723
    }
2724
    return is;
2725
}
2726

    
2727
static void stream_cycle_channel(VideoState *is, int codec_type)
2728
{
2729
    AVFormatContext *ic = is->ic;
2730
    int start_index, stream_index;
2731
    AVStream *st;
2732

    
2733
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2734
        start_index = is->video_stream;
2735
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2736
        start_index = is->audio_stream;
2737
    else
2738
        start_index = is->subtitle_stream;
2739
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2740
        return;
2741
    stream_index = start_index;
2742
    for(;;) {
2743
        if (++stream_index >= is->ic->nb_streams)
2744
        {
2745
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2746
            {
2747
                stream_index = -1;
2748
                goto the_end;
2749
            } else
2750
                stream_index = 0;
2751
        }
2752
        if (stream_index == start_index)
2753
            return;
2754
        st = ic->streams[stream_index];
2755
        if (st->codec->codec_type == codec_type) {
2756
            /* check that parameters are OK */
2757
            switch(codec_type) {
2758
            case AVMEDIA_TYPE_AUDIO:
2759
                if (st->codec->sample_rate != 0 &&
2760
                    st->codec->channels != 0)
2761
                    goto the_end;
2762
                break;
2763
            case AVMEDIA_TYPE_VIDEO:
2764
            case AVMEDIA_TYPE_SUBTITLE:
2765
                goto the_end;
2766
            default:
2767
                break;
2768
            }
2769
        }
2770
    }
2771
 the_end:
2772
    stream_component_close(is, start_index);
2773
    stream_component_open(is, stream_index);
2774
}
2775

    
2776

    
2777
static void toggle_full_screen(void)
2778
{
2779
    is_full_screen = !is_full_screen;
2780
    if (!fs_screen_width) {
2781
        /* use default SDL method */
2782
//        SDL_WM_ToggleFullScreen(screen);
2783
    }
2784
    video_open(cur_stream);
2785
}
2786

    
2787
static void toggle_pause(void)
2788
{
2789
    if (cur_stream)
2790
        stream_pause(cur_stream);
2791
    step = 0;
2792
}
2793

    
2794
static void step_to_next_frame(void)
2795
{
2796
    if (cur_stream) {
2797
        /* if the stream is paused unpause it, then step */
2798
        if (cur_stream->paused)
2799
            stream_pause(cur_stream);
2800
    }
2801
    step = 1;
2802
}
2803

    
2804
static void toggle_audio_display(void)
2805
{
2806
    if (cur_stream) {
2807
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2808
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2809
        fill_rectangle(screen,
2810
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2811
                    bgcolor);
2812
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2813
    }
2814
}
2815

    
2816
/* handle an event sent by the GUI */
2817
static void event_loop(void)
2818
{
2819
    SDL_Event event;
2820
    double incr, pos, frac;
2821

    
2822
    for(;;) {
2823
        double x;
2824
        SDL_WaitEvent(&event);
2825
        switch(event.type) {
2826
        case SDL_KEYDOWN:
2827
            if (exit_on_keydown) {
2828
                do_exit();
2829
                break;
2830
            }
2831
            switch(event.key.keysym.sym) {
2832
            case SDLK_ESCAPE:
2833
            case SDLK_q:
2834
                do_exit();
2835
                break;
2836
            case SDLK_f:
2837
                toggle_full_screen();
2838
                break;
2839
            case SDLK_p:
2840
            case SDLK_SPACE:
2841
                toggle_pause();
2842
                break;
2843
            case SDLK_s: //S: Step to next frame
2844
                step_to_next_frame();
2845
                break;
2846
            case SDLK_a:
2847
                if (cur_stream)
2848
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2849
                break;
2850
            case SDLK_v:
2851
                if (cur_stream)
2852
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2853
                break;
2854
            case SDLK_t:
2855
                if (cur_stream)
2856
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2857
                break;
2858
            case SDLK_w:
2859
                toggle_audio_display();
2860
                break;
2861
            case SDLK_LEFT:
2862
                incr = -10.0;
2863
                goto do_seek;
2864
            case SDLK_RIGHT:
2865
                incr = 10.0;
2866
                goto do_seek;
2867
            case SDLK_UP:
2868
                incr = 60.0;
2869
                goto do_seek;
2870
            case SDLK_DOWN:
2871
                incr = -60.0;
2872
            do_seek:
2873
                if (cur_stream) {
2874
                    if (seek_by_bytes) {
2875
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2876
                            pos= cur_stream->video_current_pos;
2877
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2878
                            pos= cur_stream->audio_pkt.pos;
2879
                        }else
2880
                            pos = url_ftell(cur_stream->ic->pb);
2881
                        if (cur_stream->ic->bit_rate)
2882
                            incr *= cur_stream->ic->bit_rate / 8.0;
2883
                        else
2884
                            incr *= 180000.0;
2885
                        pos += incr;
2886
                        stream_seek(cur_stream, pos, incr, 1);
2887
                    } else {
2888
                        pos = get_master_clock(cur_stream);
2889
                        pos += incr;
2890
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2891
                    }
2892
                }
2893
                break;
2894
            default:
2895
                break;
2896
            }
2897
            break;
2898
        case SDL_MOUSEBUTTONDOWN:
2899
            if (exit_on_mousedown) {
2900
                do_exit();
2901
                break;
2902
            }
2903
        case SDL_MOUSEMOTION:
2904
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2905
                x= event.button.x;
2906
            }else{
2907
                if(event.motion.state != SDL_PRESSED)
2908
                    break;
2909
                x= event.motion.x;
2910
            }
2911
            if (cur_stream) {
2912
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2913
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2914
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2915
                }else{
2916
                    int64_t ts;
2917
                    int ns, hh, mm, ss;
2918
                    int tns, thh, tmm, tss;
2919
                    tns = cur_stream->ic->duration/1000000LL;
2920
                    thh = tns/3600;
2921
                    tmm = (tns%3600)/60;
2922
                    tss = (tns%60);
2923
                    frac = x/cur_stream->width;
2924
                    ns = frac*tns;
2925
                    hh = ns/3600;
2926
                    mm = (ns%3600)/60;
2927
                    ss = (ns%60);
2928
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2929
                            hh, mm, ss, thh, tmm, tss);
2930
                    ts = frac*cur_stream->ic->duration;
2931
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2932
                        ts += cur_stream->ic->start_time;
2933
                    stream_seek(cur_stream, ts, 0, 0);
2934
                }
2935
            }
2936
            break;
2937
        case SDL_VIDEORESIZE:
2938
            if (cur_stream) {
2939
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2940
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2941
                screen_width = cur_stream->width = event.resize.w;
2942
                screen_height= cur_stream->height= event.resize.h;
2943
            }
2944
            break;
2945
        case SDL_QUIT:
2946
        case FF_QUIT_EVENT:
2947
            do_exit();
2948
            break;
2949
        case FF_ALLOC_EVENT:
2950
            video_open(event.user.data1);
2951
            alloc_picture(event.user.data1);
2952
            break;
2953
        case FF_REFRESH_EVENT:
2954
            video_refresh_timer(event.user.data1);
2955
            cur_stream->refresh=0;
2956
            break;
2957
        default:
2958
            break;
2959
        }
2960
    }
2961
}
2962

    
2963
static void opt_frame_size(const char *arg)
2964
{
2965
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2966
        fprintf(stderr, "Incorrect frame size\n");
2967
        exit(1);
2968
    }
2969
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2970
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2971
        exit(1);
2972
    }
2973
}
2974

    
2975
static int opt_width(const char *opt, const char *arg)
2976
{
2977
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2978
    return 0;
2979
}
2980

    
2981
static int opt_height(const char *opt, const char *arg)
2982
{
2983
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2984
    return 0;
2985
}
2986

    
2987
static void opt_format(const char *arg)
2988
{
2989
    file_iformat = av_find_input_format(arg);
2990
    if (!file_iformat) {
2991
        fprintf(stderr, "Unknown input format: %s\n", arg);
2992
        exit(1);
2993
    }
2994
}
2995

    
2996
static void opt_frame_pix_fmt(const char *arg)
2997
{
2998
    frame_pix_fmt = av_get_pix_fmt(arg);
2999
}
3000

    
3001
static int opt_sync(const char *opt, const char *arg)
3002
{
3003
    if (!strcmp(arg, "audio"))
3004
        av_sync_type = AV_SYNC_AUDIO_MASTER;
3005
    else if (!strcmp(arg, "video"))
3006
        av_sync_type = AV_SYNC_VIDEO_MASTER;
3007
    else if (!strcmp(arg, "ext"))
3008
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3009
    else {
3010
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3011
        exit(1);
3012
    }
3013
    return 0;
3014
}
3015

    
3016
static int opt_seek(const char *opt, const char *arg)
3017
{
3018
    start_time = parse_time_or_die(opt, arg, 1);
3019
    return 0;
3020
}
3021

    
3022
static int opt_duration(const char *opt, const char *arg)
3023
{
3024
    duration = parse_time_or_die(opt, arg, 1);
3025
    return 0;
3026
}
3027

    
3028
static int opt_debug(const char *opt, const char *arg)
3029
{
3030
    av_log_set_level(99);
3031
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3032
    return 0;
3033
}
3034

    
3035
static int opt_vismv(const char *opt, const char *arg)
3036
{
3037
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3038
    return 0;
3039
}
3040

    
3041
static int opt_thread_count(const char *opt, const char *arg)
3042
{
3043
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3044
#if !HAVE_THREADS
3045
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3046
#endif
3047
    return 0;
3048
}
3049

    
3050
static const OptionDef options[] = {
3051
#include "cmdutils_common_opts.h"
3052
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3053
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3054
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3055
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3056
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3057
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3058
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3059
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3060
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3061
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3062
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3063
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3064
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3065
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3066
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3067
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3068
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3069
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3070
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3071
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3072
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3073
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3074
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3075
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3076
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3077
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3078
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3079
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3080
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3081
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3082
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3083
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3084
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3085
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3086
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3087
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3088
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3089
#if CONFIG_AVFILTER
3090
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3091
#endif
3092
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3093
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3094
    { NULL, },
3095
};
3096

    
3097
static void show_usage(void)
3098
{
3099
    printf("Simple media player\n");
3100
    printf("usage: ffplay [options] input_file\n");
3101
    printf("\n");
3102
}
3103

    
3104
static void show_help(void)
3105
{
3106
    show_usage();
3107
    show_help_options(options, "Main options:\n",
3108
                      OPT_EXPERT, 0);
3109
    show_help_options(options, "\nAdvanced options:\n",
3110
                      OPT_EXPERT, OPT_EXPERT);
3111
    printf("\nWhile playing:\n"
3112
           "q, ESC              quit\n"
3113
           "f                   toggle full screen\n"
3114
           "p, SPC              pause\n"
3115
           "a                   cycle audio channel\n"
3116
           "v                   cycle video channel\n"
3117
           "t                   cycle subtitle channel\n"
3118
           "w                   show audio waves\n"
3119
           "s                   activate frame-step mode\n"
3120
           "left/right          seek backward/forward 10 seconds\n"
3121
           "down/up             seek backward/forward 1 minute\n"
3122
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3123
           );
3124
}
3125

    
3126
static void opt_input_file(const char *filename)
3127
{
3128
    if (input_filename) {
3129
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3130
                filename, input_filename);
3131
        exit(1);
3132
    }
3133
    if (!strcmp(filename, "-"))
3134
        filename = "pipe:";
3135
    input_filename = filename;
3136
}
3137

    
3138
/* Called from the main */
3139
int main(int argc, char **argv)
3140
{
3141
    int flags, i;
3142

    
3143
    /* register all codecs, demux and protocols */
3144
    avcodec_register_all();
3145
#if CONFIG_AVDEVICE
3146
    avdevice_register_all();
3147
#endif
3148
#if CONFIG_AVFILTER
3149
    avfilter_register_all();
3150
#endif
3151
    av_register_all();
3152

    
3153
    for(i=0; i<AVMEDIA_TYPE_NB; i++){
3154
        avcodec_opts[i]= avcodec_alloc_context2(i);
3155
    }
3156
    avformat_opts = avformat_alloc_context();
3157
#if !CONFIG_AVFILTER
3158
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3159
#endif
3160

    
3161
    show_banner();
3162

    
3163
    parse_options(argc, argv, options, opt_input_file);
3164

    
3165
    if (!input_filename) {
3166
        show_usage();
3167
        fprintf(stderr, "An input file must be specified\n");
3168
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3169
        exit(1);
3170
    }
3171

    
3172
    if (display_disable) {
3173
        video_disable = 1;
3174
    }
3175
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3176
#if !defined(__MINGW32__) && !defined(__APPLE__)
3177
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3178
#endif
3179
    if (SDL_Init (flags)) {
3180
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3181
        exit(1);
3182
    }
3183

    
3184
    if (!display_disable) {
3185
#if HAVE_SDL_VIDEO_SIZE
3186
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3187
        fs_screen_width = vi->current_w;
3188
        fs_screen_height = vi->current_h;
3189
#endif
3190
    }
3191

    
3192
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3193
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3194
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3195

    
3196
    av_init_packet(&flush_pkt);
3197
    flush_pkt.data= "FLUSH";
3198

    
3199
    cur_stream = stream_open(input_filename, file_iformat);
3200

    
3201
    event_loop();
3202

    
3203
    /* never returns */
3204

    
3205
    return 0;
3206
}