Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 387b4ac9

History | View | Annotate | Download (100 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavformat/avformat.h"
34
#include "libavdevice/avdevice.h"
35
#include "libswscale/swscale.h"
36
#include "libavcodec/audioconvert.h"
37
#include "libavcodec/opt.h"
38
#include "libavcodec/avfft.h"
39

    
40
#if CONFIG_AVFILTER
41
# include "libavfilter/avfilter.h"
42
# include "libavfilter/avfiltergraph.h"
43
# include "libavfilter/graphparser.h"
44
#endif
45

    
46
#include "cmdutils.h"
47

    
48
#include <SDL.h>
49
#include <SDL_thread.h>
50

    
51
#ifdef __MINGW32__
52
#undef main /* We don't want SDL to override our main() */
53
#endif
54

    
55
#include <unistd.h>
56
#include <assert.h>
57

    
58
const char program_name[] = "FFplay";
59
const int program_birth_year = 2003;
60

    
61
//#define DEBUG_SYNC
62

    
63
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65
#define MIN_FRAMES 5
66

    
67
/* SDL audio buffer size, in samples. Should be small to have precise
68
   A/V sync as SDL does not have hardware buffer fullness info. */
69
#define SDL_AUDIO_BUFFER_SIZE 1024
70

    
71
/* no AV sync correction is done if below the AV sync threshold */
72
#define AV_SYNC_THRESHOLD 0.01
73
/* no AV correction is done if too big error */
74
#define AV_NOSYNC_THRESHOLD 10.0
75

    
76
#define FRAME_SKIP_FACTOR 0.05
77

    
78
/* maximum audio speed change to get correct sync */
79
#define SAMPLE_CORRECTION_PERCENT_MAX 10
80

    
81
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82
#define AUDIO_DIFF_AVG_NB   20
83

    
84
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85
#define SAMPLE_ARRAY_SIZE (2*65536)
86

    
87
static int sws_flags = SWS_BICUBIC;
88

    
89
typedef struct PacketQueue {
90
    AVPacketList *first_pkt, *last_pkt;
91
    int nb_packets;
92
    int size;
93
    int abort_request;
94
    SDL_mutex *mutex;
95
    SDL_cond *cond;
96
} PacketQueue;
97

    
98
#define VIDEO_PICTURE_QUEUE_SIZE 2
99
#define SUBPICTURE_QUEUE_SIZE 4
100

    
101
typedef struct VideoPicture {
102
    double pts;                                  ///<presentation time stamp for this picture
103
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
104
    int64_t pos;                                 ///<byte position in file
105
    SDL_Overlay *bmp;
106
    int width, height; /* source height & width */
107
    int allocated;
108
    enum PixelFormat pix_fmt;
109

    
110
#if CONFIG_AVFILTER
111
    AVFilterBufferRef *picref;
112
#endif
113
} VideoPicture;
114

    
115
typedef struct SubPicture {
116
    double pts; /* presentation time stamp for this picture */
117
    AVSubtitle sub;
118
} SubPicture;
119

    
120
enum {
121
    AV_SYNC_AUDIO_MASTER, /* default choice */
122
    AV_SYNC_VIDEO_MASTER,
123
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124
};
125

    
126
typedef struct VideoState {
127
    SDL_Thread *parse_tid;
128
    SDL_Thread *video_tid;
129
    SDL_Thread *refresh_tid;
130
    AVInputFormat *iformat;
131
    int no_background;
132
    int abort_request;
133
    int paused;
134
    int last_paused;
135
    int seek_req;
136
    int seek_flags;
137
    int64_t seek_pos;
138
    int64_t seek_rel;
139
    int read_pause_return;
140
    AVFormatContext *ic;
141
    int dtg_active_format;
142

    
143
    int audio_stream;
144

    
145
    int av_sync_type;
146
    double external_clock; /* external clock base */
147
    int64_t external_clock_time;
148

    
149
    double audio_clock;
150
    double audio_diff_cum; /* used for AV difference average computation */
151
    double audio_diff_avg_coef;
152
    double audio_diff_threshold;
153
    int audio_diff_avg_count;
154
    AVStream *audio_st;
155
    PacketQueue audioq;
156
    int audio_hw_buf_size;
157
    /* samples output by the codec. we reserve more space for avsync
158
       compensation */
159
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    uint8_t *audio_buf;
162
    unsigned int audio_buf_size; /* in bytes */
163
    int audio_buf_index; /* in bytes */
164
    AVPacket audio_pkt_temp;
165
    AVPacket audio_pkt;
166
    enum SampleFormat audio_src_fmt;
167
    AVAudioConvert *reformat_ctx;
168

    
169
    int show_audio; /* if true, display audio samples */
170
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
171
    int sample_array_index;
172
    int last_i_start;
173
    RDFTContext *rdft;
174
    int rdft_bits;
175
    FFTSample *rdft_data;
176
    int xpos;
177

    
178
    SDL_Thread *subtitle_tid;
179
    int subtitle_stream;
180
    int subtitle_stream_changed;
181
    AVStream *subtitle_st;
182
    PacketQueue subtitleq;
183
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184
    int subpq_size, subpq_rindex, subpq_windex;
185
    SDL_mutex *subpq_mutex;
186
    SDL_cond *subpq_cond;
187

    
188
    double frame_timer;
189
    double frame_last_pts;
190
    double frame_last_delay;
191
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192
    int video_stream;
193
    AVStream *video_st;
194
    PacketQueue videoq;
195
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197
    int64_t video_current_pos;                   ///<current displayed file pos
198
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199
    int pictq_size, pictq_rindex, pictq_windex;
200
    SDL_mutex *pictq_mutex;
201
    SDL_cond *pictq_cond;
202
#if !CONFIG_AVFILTER
203
    struct SwsContext *img_convert_ctx;
204
#endif
205

    
206
    //    QETimer *video_timer;
207
    char filename[1024];
208
    int width, height, xleft, ytop;
209

    
210
    PtsCorrectionContext pts_ctx;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->video->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714
        /* if an active format is indicated, then it overrides the
715
           mpeg format */
716
#if 0
717
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
719
            printf("dtg_active_format=%d\n", is->dtg_active_format);
720
        }
721
#endif
722
#if 0
723
        switch(is->video_st->codec->dtg_active_format) {
724
        case FF_DTG_AFD_SAME:
725
        default:
726
            /* nothing to do */
727
            break;
728
        case FF_DTG_AFD_4_3:
729
            aspect_ratio = 4.0 / 3.0;
730
            break;
731
        case FF_DTG_AFD_16_9:
732
            aspect_ratio = 16.0 / 9.0;
733
            break;
734
        case FF_DTG_AFD_14_9:
735
            aspect_ratio = 14.0 / 9.0;
736
            break;
737
        case FF_DTG_AFD_4_3_SP_14_9:
738
            aspect_ratio = 14.0 / 9.0;
739
            break;
740
        case FF_DTG_AFD_16_9_SP_14_9:
741
            aspect_ratio = 14.0 / 9.0;
742
            break;
743
        case FF_DTG_AFD_SP_4_3:
744
            aspect_ratio = 4.0 / 3.0;
745
            break;
746
        }
747
#endif
748

    
749
        if (is->subtitle_st)
750
        {
751
            if (is->subpq_size > 0)
752
            {
753
                sp = &is->subpq[is->subpq_rindex];
754

    
755
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756
                {
757
                    SDL_LockYUVOverlay (vp->bmp);
758

    
759
                    pict.data[0] = vp->bmp->pixels[0];
760
                    pict.data[1] = vp->bmp->pixels[2];
761
                    pict.data[2] = vp->bmp->pixels[1];
762

    
763
                    pict.linesize[0] = vp->bmp->pitches[0];
764
                    pict.linesize[1] = vp->bmp->pitches[2];
765
                    pict.linesize[2] = vp->bmp->pitches[1];
766

    
767
                    for (i = 0; i < sp->sub.num_rects; i++)
768
                        blend_subrect(&pict, sp->sub.rects[i],
769
                                      vp->bmp->w, vp->bmp->h);
770

    
771
                    SDL_UnlockYUVOverlay (vp->bmp);
772
                }
773
            }
774
        }
775

    
776

    
777
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
778
        height = is->height;
779
        width = ((int)rint(height * aspect_ratio)) & ~1;
780
        if (width > is->width) {
781
            width = is->width;
782
            height = ((int)rint(width / aspect_ratio)) & ~1;
783
        }
784
        x = (is->width - width) / 2;
785
        y = (is->height - height) / 2;
786
        if (!is->no_background) {
787
            /* fill the background */
788
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789
        } else {
790
            is->no_background = 0;
791
        }
792
        rect.x = is->xleft + x;
793
        rect.y = is->ytop  + y;
794
        rect.w = width;
795
        rect.h = height;
796
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
797
    } else {
798
#if 0
799
        fill_rectangle(screen,
800
                       is->xleft, is->ytop, is->width, is->height,
801
                       QERGB(0x00, 0x00, 0x00));
802
#endif
803
    }
804
}
805

    
806
static inline int compute_mod(int a, int b)
807
{
808
    a = a % b;
809
    if (a >= 0)
810
        return a;
811
    else
812
        return a + b;
813
}
814

    
815
static void video_audio_display(VideoState *s)
816
{
817
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818
    int ch, channels, h, h2, bgcolor, fgcolor;
819
    int16_t time_diff;
820
    int rdft_bits, nb_freq;
821

    
822
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823
        ;
824
    nb_freq= 1<<(rdft_bits-1);
825

    
826
    /* compute display index : center on currently output samples */
827
    channels = s->audio_st->codec->channels;
828
    nb_display_channels = channels;
829
    if (!s->paused) {
830
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831
        n = 2 * channels;
832
        delay = audio_write_get_buf_size(s);
833
        delay /= n;
834

    
835
        /* to be more precise, we take into account the time spent since
836
           the last buffer computation */
837
        if (audio_callback_time) {
838
            time_diff = av_gettime() - audio_callback_time;
839
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840
        }
841

    
842
        delay += 2*data_used;
843
        if (delay < data_used)
844
            delay = data_used;
845

    
846
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847
        if(s->show_audio==1){
848
            h= INT_MIN;
849
            for(i=0; i<1000; i+=channels){
850
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851
                int a= s->sample_array[idx];
852
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855
                int score= a-d;
856
                if(h<score && (b^c)<0){
857
                    h= score;
858
                    i_start= idx;
859
                }
860
            }
861
        }
862

    
863
        s->last_i_start = i_start;
864
    } else {
865
        i_start = s->last_i_start;
866
    }
867

    
868
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869
    if(s->show_audio==1){
870
        fill_rectangle(screen,
871
                       s->xleft, s->ytop, s->width, s->height,
872
                       bgcolor);
873

    
874
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875

    
876
        /* total height for one channel */
877
        h = s->height / nb_display_channels;
878
        /* graph height / 2 */
879
        h2 = (h * 9) / 20;
880
        for(ch = 0;ch < nb_display_channels; ch++) {
881
            i = i_start + ch;
882
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883
            for(x = 0; x < s->width; x++) {
884
                y = (s->sample_array[i] * h2) >> 15;
885
                if (y < 0) {
886
                    y = -y;
887
                    ys = y1 - y;
888
                } else {
889
                    ys = y1;
890
                }
891
                fill_rectangle(screen,
892
                               s->xleft + x, ys, 1, y,
893
                               fgcolor);
894
                i += channels;
895
                if (i >= SAMPLE_ARRAY_SIZE)
896
                    i -= SAMPLE_ARRAY_SIZE;
897
            }
898
        }
899

    
900
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901

    
902
        for(ch = 1;ch < nb_display_channels; ch++) {
903
            y = s->ytop + ch * h;
904
            fill_rectangle(screen,
905
                           s->xleft, y, s->width, 1,
906
                           fgcolor);
907
        }
908
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909
    }else{
910
        nb_display_channels= FFMIN(nb_display_channels, 2);
911
        if(rdft_bits != s->rdft_bits){
912
            av_rdft_end(s->rdft);
913
            av_free(s->rdft_data);
914
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915
            s->rdft_bits= rdft_bits;
916
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917
        }
918
        {
919
            FFTSample *data[2];
920
            for(ch = 0;ch < nb_display_channels; ch++) {
921
                data[ch] = s->rdft_data + 2*nb_freq*ch;
922
                i = i_start + ch;
923
                for(x = 0; x < 2*nb_freq; x++) {
924
                    double w= (x-nb_freq)*(1.0/nb_freq);
925
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
926
                    i += channels;
927
                    if (i >= SAMPLE_ARRAY_SIZE)
928
                        i -= SAMPLE_ARRAY_SIZE;
929
                }
930
                av_rdft_calc(s->rdft, data[ch]);
931
            }
932
            //least efficient way to do this, we should of course directly access it but its more than fast enough
933
            for(y=0; y<s->height; y++){
934
                double w= 1/sqrt(nb_freq);
935
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
938
                a= FFMIN(a,255);
939
                b= FFMIN(b,255);
940
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941

    
942
                fill_rectangle(screen,
943
                            s->xpos, s->height-y, 1, 1,
944
                            fgcolor);
945
            }
946
        }
947
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948
        s->xpos++;
949
        if(s->xpos >= s->width)
950
            s->xpos= s->xleft;
951
    }
952
}
953

    
954
static int video_open(VideoState *is){
955
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956
    int w,h;
957

    
958
    if(is_full_screen) flags |= SDL_FULLSCREEN;
959
    else               flags |= SDL_RESIZABLE;
960

    
961
    if (is_full_screen && fs_screen_width) {
962
        w = fs_screen_width;
963
        h = fs_screen_height;
964
    } else if(!is_full_screen && screen_width){
965
        w = screen_width;
966
        h = screen_height;
967
#if CONFIG_AVFILTER
968
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969
        w = is->out_video_filter->inputs[0]->w;
970
        h = is->out_video_filter->inputs[0]->h;
971
#else
972
    }else if (is->video_st && is->video_st->codec->width){
973
        w = is->video_st->codec->width;
974
        h = is->video_st->codec->height;
975
#endif
976
    } else {
977
        w = 640;
978
        h = 480;
979
    }
980
    if(screen && is->width == screen->w && screen->w == w
981
       && is->height== screen->h && screen->h == h)
982
        return 0;
983

    
984
#ifndef __APPLE__
985
    screen = SDL_SetVideoMode(w, h, 0, flags);
986
#else
987
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988
    screen = SDL_SetVideoMode(w, h, 24, flags);
989
#endif
990
    if (!screen) {
991
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
992
        return -1;
993
    }
994
    if (!window_title)
995
        window_title = input_filename;
996
    SDL_WM_SetCaption(window_title, window_title);
997

    
998
    is->width = screen->w;
999
    is->height = screen->h;
1000

    
1001
    return 0;
1002
}
1003

    
1004
/* display the current picture, if any */
1005
static void video_display(VideoState *is)
1006
{
1007
    if(!screen)
1008
        video_open(cur_stream);
1009
    if (is->audio_st && is->show_audio)
1010
        video_audio_display(is);
1011
    else if (is->video_st)
1012
        video_image_display(is);
1013
}
1014

    
1015
static int refresh_thread(void *opaque)
1016
{
1017
    VideoState *is= opaque;
1018
    while(!is->abort_request){
1019
    SDL_Event event;
1020
    event.type = FF_REFRESH_EVENT;
1021
    event.user.data1 = opaque;
1022
        if(!is->refresh){
1023
            is->refresh=1;
1024
    SDL_PushEvent(&event);
1025
        }
1026
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027
    }
1028
    return 0;
1029
}
1030

    
1031
/* get the current audio clock value */
1032
static double get_audio_clock(VideoState *is)
1033
{
1034
    double pts;
1035
    int hw_buf_size, bytes_per_sec;
1036
    pts = is->audio_clock;
1037
    hw_buf_size = audio_write_get_buf_size(is);
1038
    bytes_per_sec = 0;
1039
    if (is->audio_st) {
1040
        bytes_per_sec = is->audio_st->codec->sample_rate *
1041
            2 * is->audio_st->codec->channels;
1042
    }
1043
    if (bytes_per_sec)
1044
        pts -= (double)hw_buf_size / bytes_per_sec;
1045
    return pts;
1046
}
1047

    
1048
/* get the current video clock value */
1049
static double get_video_clock(VideoState *is)
1050
{
1051
    if (is->paused) {
1052
        return is->video_current_pts;
1053
    } else {
1054
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055
    }
1056
}
1057

    
1058
/* get the current external clock value */
1059
static double get_external_clock(VideoState *is)
1060
{
1061
    int64_t ti;
1062
    ti = av_gettime();
1063
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064
}
1065

    
1066
/* get the current master clock value */
1067
static double get_master_clock(VideoState *is)
1068
{
1069
    double val;
1070

    
1071
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072
        if (is->video_st)
1073
            val = get_video_clock(is);
1074
        else
1075
            val = get_audio_clock(is);
1076
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077
        if (is->audio_st)
1078
            val = get_audio_clock(is);
1079
        else
1080
            val = get_video_clock(is);
1081
    } else {
1082
        val = get_external_clock(is);
1083
    }
1084
    return val;
1085
}
1086

    
1087
/* seek in the stream */
1088
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089
{
1090
    if (!is->seek_req) {
1091
        is->seek_pos = pos;
1092
        is->seek_rel = rel;
1093
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094
        if (seek_by_bytes)
1095
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1096
        is->seek_req = 1;
1097
    }
1098
}
1099

    
1100
/* pause or resume the video */
1101
static void stream_pause(VideoState *is)
1102
{
1103
    if (is->paused) {
1104
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105
        if(is->read_pause_return != AVERROR(ENOSYS)){
1106
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107
        }
1108
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109
    }
1110
    is->paused = !is->paused;
1111
}
1112

    
1113
static double compute_target_time(double frame_current_pts, VideoState *is)
1114
{
1115
    double delay, sync_threshold, diff;
1116

    
1117
    /* compute nominal delay */
1118
    delay = frame_current_pts - is->frame_last_pts;
1119
    if (delay <= 0 || delay >= 10.0) {
1120
        /* if incorrect delay, use previous one */
1121
        delay = is->frame_last_delay;
1122
    } else {
1123
        is->frame_last_delay = delay;
1124
    }
1125
    is->frame_last_pts = frame_current_pts;
1126

    
1127
    /* update delay to follow master synchronisation source */
1128
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130
        /* if video is slave, we try to correct big delays by
1131
           duplicating or deleting a frame */
1132
        diff = get_video_clock(is) - get_master_clock(is);
1133

    
1134
        /* skip or repeat frame. We take into account the
1135
           delay to compute the threshold. I still don't know
1136
           if it is the best guess */
1137
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139
            if (diff <= -sync_threshold)
1140
                delay = 0;
1141
            else if (diff >= sync_threshold)
1142
                delay = 2 * delay;
1143
        }
1144
    }
1145
    is->frame_timer += delay;
1146
#if defined(DEBUG_SYNC)
1147
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148
            delay, actual_delay, frame_current_pts, -diff);
1149
#endif
1150

    
1151
    return is->frame_timer;
1152
}
1153

    
1154
/* called to display each frame */
1155
static void video_refresh_timer(void *opaque)
1156
{
1157
    VideoState *is = opaque;
1158
    VideoPicture *vp;
1159

    
1160
    SubPicture *sp, *sp2;
1161

    
1162
    if (is->video_st) {
1163
retry:
1164
        if (is->pictq_size == 0) {
1165
            //nothing to do, no picture to display in the que
1166
        } else {
1167
            double time= av_gettime()/1000000.0;
1168
            double next_target;
1169
            /* dequeue the picture */
1170
            vp = &is->pictq[is->pictq_rindex];
1171

    
1172
            if(time < vp->target_clock)
1173
                return;
1174
            /* update current video pts */
1175
            is->video_current_pts = vp->pts;
1176
            is->video_current_pts_drift = is->video_current_pts - time;
1177
            is->video_current_pos = vp->pos;
1178
            if(is->pictq_size > 1){
1179
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180
                assert(nextvp->target_clock >= vp->target_clock);
1181
                next_target= nextvp->target_clock;
1182
            }else{
1183
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184
            }
1185
            if(framedrop && time > next_target){
1186
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187
                if(is->pictq_size > 1 || time > next_target + 0.5){
1188
                    /* update queue size and signal for next picture */
1189
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190
                        is->pictq_rindex = 0;
1191

    
1192
                    SDL_LockMutex(is->pictq_mutex);
1193
                    is->pictq_size--;
1194
                    SDL_CondSignal(is->pictq_cond);
1195
                    SDL_UnlockMutex(is->pictq_mutex);
1196
                    goto retry;
1197
                }
1198
            }
1199

    
1200
            if(is->subtitle_st) {
1201
                if (is->subtitle_stream_changed) {
1202
                    SDL_LockMutex(is->subpq_mutex);
1203

    
1204
                    while (is->subpq_size) {
1205
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1206

    
1207
                        /* update queue size and signal for next picture */
1208
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209
                            is->subpq_rindex = 0;
1210

    
1211
                        is->subpq_size--;
1212
                    }
1213
                    is->subtitle_stream_changed = 0;
1214

    
1215
                    SDL_CondSignal(is->subpq_cond);
1216
                    SDL_UnlockMutex(is->subpq_mutex);
1217
                } else {
1218
                    if (is->subpq_size > 0) {
1219
                        sp = &is->subpq[is->subpq_rindex];
1220

    
1221
                        if (is->subpq_size > 1)
1222
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223
                        else
1224
                            sp2 = NULL;
1225

    
1226
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228
                        {
1229
                            free_subpicture(sp);
1230

    
1231
                            /* update queue size and signal for next picture */
1232
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233
                                is->subpq_rindex = 0;
1234

    
1235
                            SDL_LockMutex(is->subpq_mutex);
1236
                            is->subpq_size--;
1237
                            SDL_CondSignal(is->subpq_cond);
1238
                            SDL_UnlockMutex(is->subpq_mutex);
1239
                        }
1240
                    }
1241
                }
1242
            }
1243

    
1244
            /* display picture */
1245
            video_display(is);
1246

    
1247
            /* update queue size and signal for next picture */
1248
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249
                is->pictq_rindex = 0;
1250

    
1251
            SDL_LockMutex(is->pictq_mutex);
1252
            is->pictq_size--;
1253
            SDL_CondSignal(is->pictq_cond);
1254
            SDL_UnlockMutex(is->pictq_mutex);
1255
        }
1256
    } else if (is->audio_st) {
1257
        /* draw the next audio frame */
1258

    
1259
        /* if only audio stream, then display the audio bars (better
1260
           than nothing, just to test the implementation */
1261

    
1262
        /* display picture */
1263
        video_display(is);
1264
    }
1265
    if (show_status) {
1266
        static int64_t last_time;
1267
        int64_t cur_time;
1268
        int aqsize, vqsize, sqsize;
1269
        double av_diff;
1270

    
1271
        cur_time = av_gettime();
1272
        if (!last_time || (cur_time - last_time) >= 30000) {
1273
            aqsize = 0;
1274
            vqsize = 0;
1275
            sqsize = 0;
1276
            if (is->audio_st)
1277
                aqsize = is->audioq.size;
1278
            if (is->video_st)
1279
                vqsize = is->videoq.size;
1280
            if (is->subtitle_st)
1281
                sqsize = is->subtitleq.size;
1282
            av_diff = 0;
1283
            if (is->audio_st && is->video_st)
1284
                av_diff = get_audio_clock(is) - get_video_clock(is);
1285
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1287
            fflush(stdout);
1288
            last_time = cur_time;
1289
        }
1290
    }
1291
}
1292

    
1293
static void stream_close(VideoState *is)
1294
{
1295
    VideoPicture *vp;
1296
    int i;
1297
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1298
    is->abort_request = 1;
1299
    SDL_WaitThread(is->parse_tid, NULL);
1300
    SDL_WaitThread(is->refresh_tid, NULL);
1301

    
1302
    /* free all pictures */
1303
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1304
        vp = &is->pictq[i];
1305
#if CONFIG_AVFILTER
1306
        if (vp->picref) {
1307
            avfilter_unref_buffer(vp->picref);
1308
            vp->picref = NULL;
1309
        }
1310
#endif
1311
        if (vp->bmp) {
1312
            SDL_FreeYUVOverlay(vp->bmp);
1313
            vp->bmp = NULL;
1314
        }
1315
    }
1316
    SDL_DestroyMutex(is->pictq_mutex);
1317
    SDL_DestroyCond(is->pictq_cond);
1318
    SDL_DestroyMutex(is->subpq_mutex);
1319
    SDL_DestroyCond(is->subpq_cond);
1320
#if !CONFIG_AVFILTER
1321
    if (is->img_convert_ctx)
1322
        sws_freeContext(is->img_convert_ctx);
1323
#endif
1324
    av_free(is);
1325
}
1326

    
1327
static void do_exit(void)
1328
{
1329
    if (cur_stream) {
1330
        stream_close(cur_stream);
1331
        cur_stream = NULL;
1332
    }
1333
    uninit_opts();
1334
#if CONFIG_AVFILTER
1335
    avfilter_uninit();
1336
#endif
1337
    if (show_status)
1338
        printf("\n");
1339
    SDL_Quit();
1340
    av_log(NULL, AV_LOG_QUIET, "");
1341
    exit(0);
1342
}
1343

    
1344
/* allocate a picture (needs to do that in main thread to avoid
1345
   potential locking problems */
1346
static void alloc_picture(void *opaque)
1347
{
1348
    VideoState *is = opaque;
1349
    VideoPicture *vp;
1350

    
1351
    vp = &is->pictq[is->pictq_windex];
1352

    
1353
    if (vp->bmp)
1354
        SDL_FreeYUVOverlay(vp->bmp);
1355

    
1356
#if CONFIG_AVFILTER
1357
    if (vp->picref)
1358
        avfilter_unref_buffer(vp->picref);
1359
    vp->picref = NULL;
1360

    
1361
    vp->width   = is->out_video_filter->inputs[0]->w;
1362
    vp->height  = is->out_video_filter->inputs[0]->h;
1363
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1364
#else
1365
    vp->width   = is->video_st->codec->width;
1366
    vp->height  = is->video_st->codec->height;
1367
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1368
#endif
1369

    
1370
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1371
                                   SDL_YV12_OVERLAY,
1372
                                   screen);
1373
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1374
        /* SDL allocates a buffer smaller than requested if the video
1375
         * overlay hardware is unable to support the requested size. */
1376
        fprintf(stderr, "Error: the video system does not support an image\n"
1377
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1378
                        "to reduce the image size.\n", vp->width, vp->height );
1379
        do_exit();
1380
    }
1381

    
1382
    SDL_LockMutex(is->pictq_mutex);
1383
    vp->allocated = 1;
1384
    SDL_CondSignal(is->pictq_cond);
1385
    SDL_UnlockMutex(is->pictq_mutex);
1386
}
1387

    
1388
/**
1389
 *
1390
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1391
 */
1392
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1393
{
1394
    VideoPicture *vp;
1395
    int dst_pix_fmt;
1396
#if CONFIG_AVFILTER
1397
    AVPicture pict_src;
1398
#endif
1399
    /* wait until we have space to put a new picture */
1400
    SDL_LockMutex(is->pictq_mutex);
1401

    
1402
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1403
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1404

    
1405
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1406
           !is->videoq.abort_request) {
1407
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1408
    }
1409
    SDL_UnlockMutex(is->pictq_mutex);
1410

    
1411
    if (is->videoq.abort_request)
1412
        return -1;
1413

    
1414
    vp = &is->pictq[is->pictq_windex];
1415

    
1416
    /* alloc or resize hardware picture buffer */
1417
    if (!vp->bmp ||
1418
#if CONFIG_AVFILTER
1419
        vp->width  != is->out_video_filter->inputs[0]->w ||
1420
        vp->height != is->out_video_filter->inputs[0]->h) {
1421
#else
1422
        vp->width != is->video_st->codec->width ||
1423
        vp->height != is->video_st->codec->height) {
1424
#endif
1425
        SDL_Event event;
1426

    
1427
        vp->allocated = 0;
1428

    
1429
        /* the allocation must be done in the main thread to avoid
1430
           locking problems */
1431
        event.type = FF_ALLOC_EVENT;
1432
        event.user.data1 = is;
1433
        SDL_PushEvent(&event);
1434

    
1435
        /* wait until the picture is allocated */
1436
        SDL_LockMutex(is->pictq_mutex);
1437
        while (!vp->allocated && !is->videoq.abort_request) {
1438
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1439
        }
1440
        SDL_UnlockMutex(is->pictq_mutex);
1441

    
1442
        if (is->videoq.abort_request)
1443
            return -1;
1444
    }
1445

    
1446
    /* if the frame is not skipped, then display it */
1447
    if (vp->bmp) {
1448
        AVPicture pict;
1449
#if CONFIG_AVFILTER
1450
        if(vp->picref)
1451
            avfilter_unref_buffer(vp->picref);
1452
        vp->picref = src_frame->opaque;
1453
#endif
1454

    
1455
        /* get a pointer on the bitmap */
1456
        SDL_LockYUVOverlay (vp->bmp);
1457

    
1458
        dst_pix_fmt = PIX_FMT_YUV420P;
1459
        memset(&pict,0,sizeof(AVPicture));
1460
        pict.data[0] = vp->bmp->pixels[0];
1461
        pict.data[1] = vp->bmp->pixels[2];
1462
        pict.data[2] = vp->bmp->pixels[1];
1463

    
1464
        pict.linesize[0] = vp->bmp->pitches[0];
1465
        pict.linesize[1] = vp->bmp->pitches[2];
1466
        pict.linesize[2] = vp->bmp->pitches[1];
1467

    
1468
#if CONFIG_AVFILTER
1469
        pict_src.data[0] = src_frame->data[0];
1470
        pict_src.data[1] = src_frame->data[1];
1471
        pict_src.data[2] = src_frame->data[2];
1472

    
1473
        pict_src.linesize[0] = src_frame->linesize[0];
1474
        pict_src.linesize[1] = src_frame->linesize[1];
1475
        pict_src.linesize[2] = src_frame->linesize[2];
1476

    
1477
        //FIXME use direct rendering
1478
        av_picture_copy(&pict, &pict_src,
1479
                        vp->pix_fmt, vp->width, vp->height);
1480
#else
1481
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1482
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1483
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1484
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1485
        if (is->img_convert_ctx == NULL) {
1486
            fprintf(stderr, "Cannot initialize the conversion context\n");
1487
            exit(1);
1488
        }
1489
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1490
                  0, vp->height, pict.data, pict.linesize);
1491
#endif
1492
        /* update the bitmap content */
1493
        SDL_UnlockYUVOverlay(vp->bmp);
1494

    
1495
        vp->pts = pts;
1496
        vp->pos = pos;
1497

    
1498
        /* now we can update the picture count */
1499
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1500
            is->pictq_windex = 0;
1501
        SDL_LockMutex(is->pictq_mutex);
1502
        vp->target_clock= compute_target_time(vp->pts, is);
1503

    
1504
        is->pictq_size++;
1505
        SDL_UnlockMutex(is->pictq_mutex);
1506
    }
1507
    return 0;
1508
}
1509

    
1510
/**
1511
 * compute the exact PTS for the picture if it is omitted in the stream
1512
 * @param pts1 the dts of the pkt / pts of the frame
1513
 */
1514
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1515
{
1516
    double frame_delay, pts;
1517

    
1518
    pts = pts1;
1519

    
1520
    if (pts != 0) {
1521
        /* update video clock with pts, if present */
1522
        is->video_clock = pts;
1523
    } else {
1524
        pts = is->video_clock;
1525
    }
1526
    /* update video clock for next frame */
1527
    frame_delay = av_q2d(is->video_st->codec->time_base);
1528
    /* for MPEG2, the frame can be repeated, so we update the
1529
       clock accordingly */
1530
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1531
    is->video_clock += frame_delay;
1532

    
1533
#if defined(DEBUG_SYNC) && 0
1534
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1535
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1536
#endif
1537
    return queue_picture(is, src_frame, pts, pos);
1538
}
1539

    
1540
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1541
{
1542
    int len1, got_picture, i;
1543

    
1544
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1545
            return -1;
1546

    
1547
        if(pkt->data == flush_pkt.data){
1548
            avcodec_flush_buffers(is->video_st->codec);
1549

    
1550
            SDL_LockMutex(is->pictq_mutex);
1551
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1552
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1553
                is->pictq[i].target_clock= 0;
1554
            }
1555
            while (is->pictq_size && !is->videoq.abort_request) {
1556
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1557
            }
1558
            is->video_current_pos= -1;
1559
            SDL_UnlockMutex(is->pictq_mutex);
1560

    
1561
            init_pts_correction(&is->pts_ctx);
1562
            is->frame_last_pts= AV_NOPTS_VALUE;
1563
            is->frame_last_delay = 0;
1564
            is->frame_timer = (double)av_gettime() / 1000000.0;
1565
            is->skip_frames= 1;
1566
            is->skip_frames_index= 0;
1567
            return 0;
1568
        }
1569

    
1570
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1571
           this packet, if any */
1572
        is->video_st->codec->reordered_opaque= pkt->pts;
1573
        len1 = avcodec_decode_video2(is->video_st->codec,
1574
                                    frame, &got_picture,
1575
                                    pkt);
1576

    
1577
        if (got_picture) {
1578
            if (decoder_reorder_pts == -1) {
1579
                *pts = guess_correct_pts(&is->pts_ctx, frame->reordered_opaque, pkt->dts);
1580
            } else if (decoder_reorder_pts) {
1581
                *pts = frame->reordered_opaque;
1582
            } else {
1583
                *pts = pkt->dts;
1584
            }
1585

    
1586
            if (*pts == AV_NOPTS_VALUE) {
1587
                *pts = 0;
1588
            }
1589
        }
1590

    
1591
//            if (len1 < 0)
1592
//                break;
1593
    if (got_picture){
1594
        is->skip_frames_index += 1;
1595
        if(is->skip_frames_index >= is->skip_frames){
1596
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1597
            return 1;
1598
        }
1599

    
1600
    }
1601
    return 0;
1602
}
1603

    
1604
#if CONFIG_AVFILTER
1605
typedef struct {
1606
    VideoState *is;
1607
    AVFrame *frame;
1608
    int use_dr1;
1609
} FilterPriv;
1610

    
1611
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1612
{
1613
    AVFilterContext *ctx = codec->opaque;
1614
    AVFilterBufferRef  *ref;
1615
    int perms = AV_PERM_WRITE;
1616
    int i, w, h, stride[4];
1617
    unsigned edge;
1618

    
1619
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1620
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1621
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1622
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1623
    }
1624
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1625

    
1626
    w = codec->width;
1627
    h = codec->height;
1628
    avcodec_align_dimensions2(codec, &w, &h, stride);
1629
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1630
    w += edge << 1;
1631
    h += edge << 1;
1632

    
1633
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1634
        return -1;
1635

    
1636
    ref->video->w = codec->width;
1637
    ref->video->h = codec->height;
1638
    for(i = 0; i < 4; i ++) {
1639
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1640
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1641

    
1642
        if (ref->data[i]) {
1643
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1644
        }
1645
        pic->data[i]     = ref->data[i];
1646
        pic->linesize[i] = ref->linesize[i];
1647
    }
1648
    pic->opaque = ref;
1649
    pic->age    = INT_MAX;
1650
    pic->type   = FF_BUFFER_TYPE_USER;
1651
    pic->reordered_opaque = codec->reordered_opaque;
1652
    return 0;
1653
}
1654

    
1655
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1656
{
1657
    memset(pic->data, 0, sizeof(pic->data));
1658
    avfilter_unref_buffer(pic->opaque);
1659
}
1660

    
1661
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1662
{
1663
    AVFilterBufferRef *ref = pic->opaque;
1664

    
1665
    if (pic->data[0] == NULL) {
1666
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1667
        return codec->get_buffer(codec, pic);
1668
    }
1669

    
1670
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1671
        (codec->pix_fmt != ref->format)) {
1672
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1673
        return -1;
1674
    }
1675

    
1676
    pic->reordered_opaque = codec->reordered_opaque;
1677
    return 0;
1678
}
1679

    
1680
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1681
{
1682
    FilterPriv *priv = ctx->priv;
1683
    AVCodecContext *codec;
1684
    if(!opaque) return -1;
1685

    
1686
    priv->is = opaque;
1687
    codec    = priv->is->video_st->codec;
1688
    codec->opaque = ctx;
1689
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1690
        priv->use_dr1 = 1;
1691
        codec->get_buffer     = input_get_buffer;
1692
        codec->release_buffer = input_release_buffer;
1693
        codec->reget_buffer   = input_reget_buffer;
1694
    }
1695

    
1696
    priv->frame = avcodec_alloc_frame();
1697

    
1698
    return 0;
1699
}
1700

    
1701
static void input_uninit(AVFilterContext *ctx)
1702
{
1703
    FilterPriv *priv = ctx->priv;
1704
    av_free(priv->frame);
1705
}
1706

    
1707
static int input_request_frame(AVFilterLink *link)
1708
{
1709
    FilterPriv *priv = link->src->priv;
1710
    AVFilterBufferRef *picref;
1711
    int64_t pts = 0;
1712
    AVPacket pkt;
1713
    int ret;
1714

    
1715
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1716
        av_free_packet(&pkt);
1717
    if (ret < 0)
1718
        return -1;
1719

    
1720
    if(priv->use_dr1) {
1721
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1722
    } else {
1723
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1724
        av_image_copy(picref->data, picref->linesize,
1725
                      priv->frame->data, priv->frame->linesize,
1726
                      picref->format, link->w, link->h);
1727
    }
1728
    av_free_packet(&pkt);
1729

    
1730
    picref->pts = pts;
1731
    picref->pos = pkt.pos;
1732
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1733
    avfilter_start_frame(link, picref);
1734
    avfilter_draw_slice(link, 0, link->h, 1);
1735
    avfilter_end_frame(link);
1736

    
1737
    return 0;
1738
}
1739

    
1740
static int input_query_formats(AVFilterContext *ctx)
1741
{
1742
    FilterPriv *priv = ctx->priv;
1743
    enum PixelFormat pix_fmts[] = {
1744
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1745
    };
1746

    
1747
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1748
    return 0;
1749
}
1750

    
1751
static int input_config_props(AVFilterLink *link)
1752
{
1753
    FilterPriv *priv  = link->src->priv;
1754
    AVCodecContext *c = priv->is->video_st->codec;
1755

    
1756
    link->w = c->width;
1757
    link->h = c->height;
1758
    link->time_base = priv->is->video_st->time_base;
1759

    
1760
    return 0;
1761
}
1762

    
1763
static AVFilter input_filter =
1764
{
1765
    .name      = "ffplay_input",
1766

    
1767
    .priv_size = sizeof(FilterPriv),
1768

    
1769
    .init      = input_init,
1770
    .uninit    = input_uninit,
1771

    
1772
    .query_formats = input_query_formats,
1773

    
1774
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1775
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1776
                                    .type = AVMEDIA_TYPE_VIDEO,
1777
                                    .request_frame = input_request_frame,
1778
                                    .config_props  = input_config_props, },
1779
                                  { .name = NULL }},
1780
};
1781

    
1782
static void output_end_frame(AVFilterLink *link)
1783
{
1784
}
1785

    
1786
static int output_query_formats(AVFilterContext *ctx)
1787
{
1788
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1789

    
1790
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1791
    return 0;
1792
}
1793

    
1794
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1795
                                    int64_t *pts, AVRational *tb, int64_t *pos)
1796
{
1797
    AVFilterBufferRef *pic;
1798

    
1799
    if(avfilter_request_frame(ctx->inputs[0]))
1800
        return -1;
1801
    if(!(pic = ctx->inputs[0]->cur_buf))
1802
        return -1;
1803
    ctx->inputs[0]->cur_buf = NULL;
1804

    
1805
    frame->opaque = pic;
1806
    *pts          = pic->pts;
1807
    *pos          = pic->pos;
1808
    *tb           = ctx->inputs[0]->time_base;
1809

    
1810
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1811
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1812

    
1813
    return 1;
1814
}
1815

    
1816
static AVFilter output_filter =
1817
{
1818
    .name      = "ffplay_output",
1819

    
1820
    .query_formats = output_query_formats,
1821

    
1822
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1823
                                    .type          = AVMEDIA_TYPE_VIDEO,
1824
                                    .end_frame     = output_end_frame,
1825
                                    .min_perms     = AV_PERM_READ, },
1826
                                  { .name = NULL }},
1827
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1828
};
1829
#endif  /* CONFIG_AVFILTER */
1830

    
1831
static int video_thread(void *arg)
1832
{
1833
    VideoState *is = arg;
1834
    AVFrame *frame= avcodec_alloc_frame();
1835
    int64_t pts_int;
1836
    double pts;
1837
    int ret;
1838

    
1839
#if CONFIG_AVFILTER
1840
    int64_t pos;
1841
    char sws_flags_str[128];
1842
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1843
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1844
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1845
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1846

    
1847
    if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1848
    if (avfilter_open(&filt_out, &output_filter, "out") < 0) goto the_end;
1849

    
1850
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1851
    if(avfilter_init_filter(filt_out, NULL, NULL))           goto the_end;
1852

    
1853

    
1854
    if(vfilters) {
1855
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1856
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1857

    
1858
        outputs->name    = av_strdup("in");
1859
        outputs->filter  = filt_src;
1860
        outputs->pad_idx = 0;
1861
        outputs->next    = NULL;
1862

    
1863
        inputs->name    = av_strdup("out");
1864
        inputs->filter  = filt_out;
1865
        inputs->pad_idx = 0;
1866
        inputs->next    = NULL;
1867

    
1868
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1869
            goto the_end;
1870
        av_freep(&vfilters);
1871
    } else {
1872
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1873
    }
1874
    avfilter_graph_add_filter(graph, filt_src);
1875
    avfilter_graph_add_filter(graph, filt_out);
1876

    
1877
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1878
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1879
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1880

    
1881
    is->out_video_filter = filt_out;
1882
#endif
1883

    
1884
    for(;;) {
1885
#if !CONFIG_AVFILTER
1886
        AVPacket pkt;
1887
#else
1888
        AVRational tb;
1889
#endif
1890
        while (is->paused && !is->videoq.abort_request)
1891
            SDL_Delay(10);
1892
#if CONFIG_AVFILTER
1893
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &tb, &pos);
1894

    
1895
        if (av_cmp_q(tb, is->video_st->time_base)) {
1896
            int64_t pts1 = pts_int;
1897
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1898
            av_log(NULL, AV_LOG_DEBUG, "video_thread(): "
1899
                   "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1900
                   tb.num, tb.den, pts1,
1901
                   is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1902
        }
1903
#else
1904
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1905
#endif
1906

    
1907
        if (ret < 0) goto the_end;
1908

    
1909
        if (!ret)
1910
            continue;
1911

    
1912
        pts = pts_int*av_q2d(is->video_st->time_base);
1913

    
1914
#if CONFIG_AVFILTER
1915
        ret = output_picture2(is, frame, pts, pos);
1916
#else
1917
        ret = output_picture2(is, frame, pts,  pkt.pos);
1918
        av_free_packet(&pkt);
1919
#endif
1920
        if (ret < 0)
1921
            goto the_end;
1922

    
1923
        if (step)
1924
            if (cur_stream)
1925
                stream_pause(cur_stream);
1926
    }
1927
 the_end:
1928
#if CONFIG_AVFILTER
1929
    avfilter_graph_destroy(graph);
1930
    av_freep(&graph);
1931
#endif
1932
    av_free(frame);
1933
    return 0;
1934
}
1935

    
1936
static int subtitle_thread(void *arg)
1937
{
1938
    VideoState *is = arg;
1939
    SubPicture *sp;
1940
    AVPacket pkt1, *pkt = &pkt1;
1941
    int len1, got_subtitle;
1942
    double pts;
1943
    int i, j;
1944
    int r, g, b, y, u, v, a;
1945

    
1946
    for(;;) {
1947
        while (is->paused && !is->subtitleq.abort_request) {
1948
            SDL_Delay(10);
1949
        }
1950
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1951
            break;
1952

    
1953
        if(pkt->data == flush_pkt.data){
1954
            avcodec_flush_buffers(is->subtitle_st->codec);
1955
            continue;
1956
        }
1957
        SDL_LockMutex(is->subpq_mutex);
1958
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1959
               !is->subtitleq.abort_request) {
1960
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1961
        }
1962
        SDL_UnlockMutex(is->subpq_mutex);
1963

    
1964
        if (is->subtitleq.abort_request)
1965
            goto the_end;
1966

    
1967
        sp = &is->subpq[is->subpq_windex];
1968

    
1969
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1970
           this packet, if any */
1971
        pts = 0;
1972
        if (pkt->pts != AV_NOPTS_VALUE)
1973
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1974

    
1975
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1976
                                    &sp->sub, &got_subtitle,
1977
                                    pkt);
1978
//            if (len1 < 0)
1979
//                break;
1980
        if (got_subtitle && sp->sub.format == 0) {
1981
            sp->pts = pts;
1982

    
1983
            for (i = 0; i < sp->sub.num_rects; i++)
1984
            {
1985
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1986
                {
1987
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1988
                    y = RGB_TO_Y_CCIR(r, g, b);
1989
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1990
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1991
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1992
                }
1993
            }
1994

    
1995
            /* now we can update the picture count */
1996
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1997
                is->subpq_windex = 0;
1998
            SDL_LockMutex(is->subpq_mutex);
1999
            is->subpq_size++;
2000
            SDL_UnlockMutex(is->subpq_mutex);
2001
        }
2002
        av_free_packet(pkt);
2003
//        if (step)
2004
//            if (cur_stream)
2005
//                stream_pause(cur_stream);
2006
    }
2007
 the_end:
2008
    return 0;
2009
}
2010

    
2011
/* copy samples for viewing in editor window */
2012
static void update_sample_display(VideoState *is, short *samples, int samples_size)
2013
{
2014
    int size, len, channels;
2015

    
2016
    channels = is->audio_st->codec->channels;
2017

    
2018
    size = samples_size / sizeof(short);
2019
    while (size > 0) {
2020
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
2021
        if (len > size)
2022
            len = size;
2023
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2024
        samples += len;
2025
        is->sample_array_index += len;
2026
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2027
            is->sample_array_index = 0;
2028
        size -= len;
2029
    }
2030
}
2031

    
2032
/* return the new audio buffer size (samples can be added or deleted
2033
   to get better sync if video or external master clock) */
2034
static int synchronize_audio(VideoState *is, short *samples,
2035
                             int samples_size1, double pts)
2036
{
2037
    int n, samples_size;
2038
    double ref_clock;
2039

    
2040
    n = 2 * is->audio_st->codec->channels;
2041
    samples_size = samples_size1;
2042

    
2043
    /* if not master, then we try to remove or add samples to correct the clock */
2044
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2045
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2046
        double diff, avg_diff;
2047
        int wanted_size, min_size, max_size, nb_samples;
2048

    
2049
        ref_clock = get_master_clock(is);
2050
        diff = get_audio_clock(is) - ref_clock;
2051

    
2052
        if (diff < AV_NOSYNC_THRESHOLD) {
2053
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2054
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2055
                /* not enough measures to have a correct estimate */
2056
                is->audio_diff_avg_count++;
2057
            } else {
2058
                /* estimate the A-V difference */
2059
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2060

    
2061
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2062
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2063
                    nb_samples = samples_size / n;
2064

    
2065
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2066
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2067
                    if (wanted_size < min_size)
2068
                        wanted_size = min_size;
2069
                    else if (wanted_size > max_size)
2070
                        wanted_size = max_size;
2071

    
2072
                    /* add or remove samples to correction the synchro */
2073
                    if (wanted_size < samples_size) {
2074
                        /* remove samples */
2075
                        samples_size = wanted_size;
2076
                    } else if (wanted_size > samples_size) {
2077
                        uint8_t *samples_end, *q;
2078
                        int nb;
2079

    
2080
                        /* add samples */
2081
                        nb = (samples_size - wanted_size);
2082
                        samples_end = (uint8_t *)samples + samples_size - n;
2083
                        q = samples_end + n;
2084
                        while (nb > 0) {
2085
                            memcpy(q, samples_end, n);
2086
                            q += n;
2087
                            nb -= n;
2088
                        }
2089
                        samples_size = wanted_size;
2090
                    }
2091
                }
2092
#if 0
2093
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2094
                       diff, avg_diff, samples_size - samples_size1,
2095
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2096
#endif
2097
            }
2098
        } else {
2099
            /* too big difference : may be initial PTS errors, so
2100
               reset A-V filter */
2101
            is->audio_diff_avg_count = 0;
2102
            is->audio_diff_cum = 0;
2103
        }
2104
    }
2105

    
2106
    return samples_size;
2107
}
2108

    
2109
/* decode one audio frame and returns its uncompressed size */
2110
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2111
{
2112
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2113
    AVPacket *pkt = &is->audio_pkt;
2114
    AVCodecContext *dec= is->audio_st->codec;
2115
    int n, len1, data_size;
2116
    double pts;
2117

    
2118
    for(;;) {
2119
        /* NOTE: the audio packet can contain several frames */
2120
        while (pkt_temp->size > 0) {
2121
            data_size = sizeof(is->audio_buf1);
2122
            len1 = avcodec_decode_audio3(dec,
2123
                                        (int16_t *)is->audio_buf1, &data_size,
2124
                                        pkt_temp);
2125
            if (len1 < 0) {
2126
                /* if error, we skip the frame */
2127
                pkt_temp->size = 0;
2128
                break;
2129
            }
2130

    
2131
            pkt_temp->data += len1;
2132
            pkt_temp->size -= len1;
2133
            if (data_size <= 0)
2134
                continue;
2135

    
2136
            if (dec->sample_fmt != is->audio_src_fmt) {
2137
                if (is->reformat_ctx)
2138
                    av_audio_convert_free(is->reformat_ctx);
2139
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2140
                                                         dec->sample_fmt, 1, NULL, 0);
2141
                if (!is->reformat_ctx) {
2142
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2143
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2144
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2145
                        break;
2146
                }
2147
                is->audio_src_fmt= dec->sample_fmt;
2148
            }
2149

    
2150
            if (is->reformat_ctx) {
2151
                const void *ibuf[6]= {is->audio_buf1};
2152
                void *obuf[6]= {is->audio_buf2};
2153
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2154
                int ostride[6]= {2};
2155
                int len= data_size/istride[0];
2156
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2157
                    printf("av_audio_convert() failed\n");
2158
                    break;
2159
                }
2160
                is->audio_buf= is->audio_buf2;
2161
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2162
                          remove this legacy cruft */
2163
                data_size= len*2;
2164
            }else{
2165
                is->audio_buf= is->audio_buf1;
2166
            }
2167

    
2168
            /* if no pts, then compute it */
2169
            pts = is->audio_clock;
2170
            *pts_ptr = pts;
2171
            n = 2 * dec->channels;
2172
            is->audio_clock += (double)data_size /
2173
                (double)(n * dec->sample_rate);
2174
#if defined(DEBUG_SYNC)
2175
            {
2176
                static double last_clock;
2177
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2178
                       is->audio_clock - last_clock,
2179
                       is->audio_clock, pts);
2180
                last_clock = is->audio_clock;
2181
            }
2182
#endif
2183
            return data_size;
2184
        }
2185

    
2186
        /* free the current packet */
2187
        if (pkt->data)
2188
            av_free_packet(pkt);
2189

    
2190
        if (is->paused || is->audioq.abort_request) {
2191
            return -1;
2192
        }
2193

    
2194
        /* read next packet */
2195
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2196
            return -1;
2197
        if(pkt->data == flush_pkt.data){
2198
            avcodec_flush_buffers(dec);
2199
            continue;
2200
        }
2201

    
2202
        pkt_temp->data = pkt->data;
2203
        pkt_temp->size = pkt->size;
2204

    
2205
        /* if update the audio clock with the pts */
2206
        if (pkt->pts != AV_NOPTS_VALUE) {
2207
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2208
        }
2209
    }
2210
}
2211

    
2212
/* get the current audio output buffer size, in samples. With SDL, we
2213
   cannot have a precise information */
2214
static int audio_write_get_buf_size(VideoState *is)
2215
{
2216
    return is->audio_buf_size - is->audio_buf_index;
2217
}
2218

    
2219

    
2220
/* prepare a new audio buffer */
2221
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2222
{
2223
    VideoState *is = opaque;
2224
    int audio_size, len1;
2225
    double pts;
2226

    
2227
    audio_callback_time = av_gettime();
2228

    
2229
    while (len > 0) {
2230
        if (is->audio_buf_index >= is->audio_buf_size) {
2231
           audio_size = audio_decode_frame(is, &pts);
2232
           if (audio_size < 0) {
2233
                /* if error, just output silence */
2234
               is->audio_buf = is->audio_buf1;
2235
               is->audio_buf_size = 1024;
2236
               memset(is->audio_buf, 0, is->audio_buf_size);
2237
           } else {
2238
               if (is->show_audio)
2239
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2240
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2241
                                              pts);
2242
               is->audio_buf_size = audio_size;
2243
           }
2244
           is->audio_buf_index = 0;
2245
        }
2246
        len1 = is->audio_buf_size - is->audio_buf_index;
2247
        if (len1 > len)
2248
            len1 = len;
2249
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2250
        len -= len1;
2251
        stream += len1;
2252
        is->audio_buf_index += len1;
2253
    }
2254
}
2255

    
2256
/* open a given stream. Return 0 if OK */
2257
static int stream_component_open(VideoState *is, int stream_index)
2258
{
2259
    AVFormatContext *ic = is->ic;
2260
    AVCodecContext *avctx;
2261
    AVCodec *codec;
2262
    SDL_AudioSpec wanted_spec, spec;
2263

    
2264
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2265
        return -1;
2266
    avctx = ic->streams[stream_index]->codec;
2267

    
2268
    /* prepare audio output */
2269
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2270
        if (avctx->channels > 0) {
2271
            avctx->request_channels = FFMIN(2, avctx->channels);
2272
        } else {
2273
            avctx->request_channels = 2;
2274
        }
2275
    }
2276

    
2277
    codec = avcodec_find_decoder(avctx->codec_id);
2278
    avctx->debug_mv = debug_mv;
2279
    avctx->debug = debug;
2280
    avctx->workaround_bugs = workaround_bugs;
2281
    avctx->lowres = lowres;
2282
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2283
    avctx->idct_algo= idct;
2284
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2285
    avctx->skip_frame= skip_frame;
2286
    avctx->skip_idct= skip_idct;
2287
    avctx->skip_loop_filter= skip_loop_filter;
2288
    avctx->error_recognition= error_recognition;
2289
    avctx->error_concealment= error_concealment;
2290
    avcodec_thread_init(avctx, thread_count);
2291

    
2292
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2293

    
2294
    if (!codec ||
2295
        avcodec_open(avctx, codec) < 0)
2296
        return -1;
2297

    
2298
    /* prepare audio output */
2299
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2300
        wanted_spec.freq = avctx->sample_rate;
2301
        wanted_spec.format = AUDIO_S16SYS;
2302
        wanted_spec.channels = avctx->channels;
2303
        wanted_spec.silence = 0;
2304
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2305
        wanted_spec.callback = sdl_audio_callback;
2306
        wanted_spec.userdata = is;
2307
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2308
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2309
            return -1;
2310
        }
2311
        is->audio_hw_buf_size = spec.size;
2312
        is->audio_src_fmt= SAMPLE_FMT_S16;
2313
    }
2314

    
2315
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2316
    switch(avctx->codec_type) {
2317
    case AVMEDIA_TYPE_AUDIO:
2318
        is->audio_stream = stream_index;
2319
        is->audio_st = ic->streams[stream_index];
2320
        is->audio_buf_size = 0;
2321
        is->audio_buf_index = 0;
2322

    
2323
        /* init averaging filter */
2324
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2325
        is->audio_diff_avg_count = 0;
2326
        /* since we do not have a precise anough audio fifo fullness,
2327
           we correct audio sync only if larger than this threshold */
2328
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2329

    
2330
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2331
        packet_queue_init(&is->audioq);
2332
        SDL_PauseAudio(0);
2333
        break;
2334
    case AVMEDIA_TYPE_VIDEO:
2335
        is->video_stream = stream_index;
2336
        is->video_st = ic->streams[stream_index];
2337

    
2338
//        is->video_current_pts_time = av_gettime();
2339

    
2340
        packet_queue_init(&is->videoq);
2341
        is->video_tid = SDL_CreateThread(video_thread, is);
2342
        break;
2343
    case AVMEDIA_TYPE_SUBTITLE:
2344
        is->subtitle_stream = stream_index;
2345
        is->subtitle_st = ic->streams[stream_index];
2346
        packet_queue_init(&is->subtitleq);
2347

    
2348
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2349
        break;
2350
    default:
2351
        break;
2352
    }
2353
    return 0;
2354
}
2355

    
2356
static void stream_component_close(VideoState *is, int stream_index)
2357
{
2358
    AVFormatContext *ic = is->ic;
2359
    AVCodecContext *avctx;
2360

    
2361
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2362
        return;
2363
    avctx = ic->streams[stream_index]->codec;
2364

    
2365
    switch(avctx->codec_type) {
2366
    case AVMEDIA_TYPE_AUDIO:
2367
        packet_queue_abort(&is->audioq);
2368

    
2369
        SDL_CloseAudio();
2370

    
2371
        packet_queue_end(&is->audioq);
2372
        if (is->reformat_ctx)
2373
            av_audio_convert_free(is->reformat_ctx);
2374
        is->reformat_ctx = NULL;
2375
        break;
2376
    case AVMEDIA_TYPE_VIDEO:
2377
        packet_queue_abort(&is->videoq);
2378

    
2379
        /* note: we also signal this mutex to make sure we deblock the
2380
           video thread in all cases */
2381
        SDL_LockMutex(is->pictq_mutex);
2382
        SDL_CondSignal(is->pictq_cond);
2383
        SDL_UnlockMutex(is->pictq_mutex);
2384

    
2385
        SDL_WaitThread(is->video_tid, NULL);
2386

    
2387
        packet_queue_end(&is->videoq);
2388
        break;
2389
    case AVMEDIA_TYPE_SUBTITLE:
2390
        packet_queue_abort(&is->subtitleq);
2391

    
2392
        /* note: we also signal this mutex to make sure we deblock the
2393
           video thread in all cases */
2394
        SDL_LockMutex(is->subpq_mutex);
2395
        is->subtitle_stream_changed = 1;
2396

    
2397
        SDL_CondSignal(is->subpq_cond);
2398
        SDL_UnlockMutex(is->subpq_mutex);
2399

    
2400
        SDL_WaitThread(is->subtitle_tid, NULL);
2401

    
2402
        packet_queue_end(&is->subtitleq);
2403
        break;
2404
    default:
2405
        break;
2406
    }
2407

    
2408
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2409
    avcodec_close(avctx);
2410
    switch(avctx->codec_type) {
2411
    case AVMEDIA_TYPE_AUDIO:
2412
        is->audio_st = NULL;
2413
        is->audio_stream = -1;
2414
        break;
2415
    case AVMEDIA_TYPE_VIDEO:
2416
        is->video_st = NULL;
2417
        is->video_stream = -1;
2418
        break;
2419
    case AVMEDIA_TYPE_SUBTITLE:
2420
        is->subtitle_st = NULL;
2421
        is->subtitle_stream = -1;
2422
        break;
2423
    default:
2424
        break;
2425
    }
2426
}
2427

    
2428
/* since we have only one decoding thread, we can use a global
2429
   variable instead of a thread local variable */
2430
static VideoState *global_video_state;
2431

    
2432
static int decode_interrupt_cb(void)
2433
{
2434
    return (global_video_state && global_video_state->abort_request);
2435
}
2436

    
2437
/* this thread gets the stream from the disk or the network */
2438
static int decode_thread(void *arg)
2439
{
2440
    VideoState *is = arg;
2441
    AVFormatContext *ic;
2442
    int err, i, ret;
2443
    int st_index[AVMEDIA_TYPE_NB];
2444
    int st_count[AVMEDIA_TYPE_NB]={0};
2445
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2446
    AVPacket pkt1, *pkt = &pkt1;
2447
    AVFormatParameters params, *ap = &params;
2448
    int eof=0;
2449
    int pkt_in_play_range = 0;
2450

    
2451
    ic = avformat_alloc_context();
2452

    
2453
    memset(st_index, -1, sizeof(st_index));
2454
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2455
    is->video_stream = -1;
2456
    is->audio_stream = -1;
2457
    is->subtitle_stream = -1;
2458

    
2459
    global_video_state = is;
2460
    url_set_interrupt_cb(decode_interrupt_cb);
2461

    
2462
    memset(ap, 0, sizeof(*ap));
2463

    
2464
    ap->prealloced_context = 1;
2465
    ap->width = frame_width;
2466
    ap->height= frame_height;
2467
    ap->time_base= (AVRational){1, 25};
2468
    ap->pix_fmt = frame_pix_fmt;
2469

    
2470
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2471

    
2472
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2473
    if (err < 0) {
2474
        print_error(is->filename, err);
2475
        ret = -1;
2476
        goto fail;
2477
    }
2478
    is->ic = ic;
2479

    
2480
    if(genpts)
2481
        ic->flags |= AVFMT_FLAG_GENPTS;
2482

    
2483
    err = av_find_stream_info(ic);
2484
    if (err < 0) {
2485
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2486
        ret = -1;
2487
        goto fail;
2488
    }
2489
    if(ic->pb)
2490
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2491

    
2492
    if(seek_by_bytes<0)
2493
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2494

    
2495
    /* if seeking requested, we execute it */
2496
    if (start_time != AV_NOPTS_VALUE) {
2497
        int64_t timestamp;
2498

    
2499
        timestamp = start_time;
2500
        /* add the stream start time */
2501
        if (ic->start_time != AV_NOPTS_VALUE)
2502
            timestamp += ic->start_time;
2503
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2504
        if (ret < 0) {
2505
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2506
                    is->filename, (double)timestamp / AV_TIME_BASE);
2507
        }
2508
    }
2509

    
2510
    for(i = 0; i < ic->nb_streams; i++) {
2511
        AVStream *st= ic->streams[i];
2512
        AVCodecContext *avctx = st->codec;
2513
        ic->streams[i]->discard = AVDISCARD_ALL;
2514
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2515
            continue;
2516
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2517
            continue;
2518

    
2519
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2520
            continue;
2521
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2522

    
2523
        switch(avctx->codec_type) {
2524
        case AVMEDIA_TYPE_AUDIO:
2525
            if (!audio_disable)
2526
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2527
            break;
2528
        case AVMEDIA_TYPE_VIDEO:
2529
        case AVMEDIA_TYPE_SUBTITLE:
2530
            if (!video_disable)
2531
                st_index[avctx->codec_type] = i;
2532
            break;
2533
        default:
2534
            break;
2535
        }
2536
    }
2537
    if (show_status) {
2538
        dump_format(ic, 0, is->filename, 0);
2539
    }
2540

    
2541
    /* open the streams */
2542
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2543
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2544
    }
2545

    
2546
    ret=-1;
2547
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2548
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2549
    }
2550
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2551
    if(ret<0) {
2552
        if (!display_disable)
2553
            is->show_audio = 2;
2554
    }
2555

    
2556
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2557
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2558
    }
2559

    
2560
    if (is->video_stream < 0 && is->audio_stream < 0) {
2561
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2562
        ret = -1;
2563
        goto fail;
2564
    }
2565

    
2566
    for(;;) {
2567
        if (is->abort_request)
2568
            break;
2569
        if (is->paused != is->last_paused) {
2570
            is->last_paused = is->paused;
2571
            if (is->paused)
2572
                is->read_pause_return= av_read_pause(ic);
2573
            else
2574
                av_read_play(ic);
2575
        }
2576
#if CONFIG_RTSP_DEMUXER
2577
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2578
            /* wait 10 ms to avoid trying to get another packet */
2579
            /* XXX: horrible */
2580
            SDL_Delay(10);
2581
            continue;
2582
        }
2583
#endif
2584
        if (is->seek_req) {
2585
            int64_t seek_target= is->seek_pos;
2586
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2587
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2588
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2589
//      of the seek_pos/seek_rel variables
2590

    
2591
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2592
            if (ret < 0) {
2593
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2594
            }else{
2595
                if (is->audio_stream >= 0) {
2596
                    packet_queue_flush(&is->audioq);
2597
                    packet_queue_put(&is->audioq, &flush_pkt);
2598
                }
2599
                if (is->subtitle_stream >= 0) {
2600
                    packet_queue_flush(&is->subtitleq);
2601
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2602
                }
2603
                if (is->video_stream >= 0) {
2604
                    packet_queue_flush(&is->videoq);
2605
                    packet_queue_put(&is->videoq, &flush_pkt);
2606
                }
2607
            }
2608
            is->seek_req = 0;
2609
            eof= 0;
2610
        }
2611

    
2612
        /* if the queue are full, no need to read more */
2613
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2614
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2615
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2616
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2617
            /* wait 10 ms */
2618
            SDL_Delay(10);
2619
            continue;
2620
        }
2621
        if(eof) {
2622
            if(is->video_stream >= 0){
2623
                av_init_packet(pkt);
2624
                pkt->data=NULL;
2625
                pkt->size=0;
2626
                pkt->stream_index= is->video_stream;
2627
                packet_queue_put(&is->videoq, pkt);
2628
            }
2629
            SDL_Delay(10);
2630
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2631
                if(loop!=1 && (!loop || --loop)){
2632
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2633
                }else if(autoexit){
2634
                    ret=AVERROR_EOF;
2635
                    goto fail;
2636
                }
2637
            }
2638
            continue;
2639
        }
2640
        ret = av_read_frame(ic, pkt);
2641
        if (ret < 0) {
2642
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2643
                eof=1;
2644
            if (url_ferror(ic->pb))
2645
                break;
2646
            SDL_Delay(100); /* wait for user event */
2647
            continue;
2648
        }
2649
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2650
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2651
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2652
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2653
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2654
                <= ((double)duration/1000000);
2655
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2656
            packet_queue_put(&is->audioq, pkt);
2657
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2658
            packet_queue_put(&is->videoq, pkt);
2659
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2660
            packet_queue_put(&is->subtitleq, pkt);
2661
        } else {
2662
            av_free_packet(pkt);
2663
        }
2664
    }
2665
    /* wait until the end */
2666
    while (!is->abort_request) {
2667
        SDL_Delay(100);
2668
    }
2669

    
2670
    ret = 0;
2671
 fail:
2672
    /* disable interrupting */
2673
    global_video_state = NULL;
2674

    
2675
    /* close each stream */
2676
    if (is->audio_stream >= 0)
2677
        stream_component_close(is, is->audio_stream);
2678
    if (is->video_stream >= 0)
2679
        stream_component_close(is, is->video_stream);
2680
    if (is->subtitle_stream >= 0)
2681
        stream_component_close(is, is->subtitle_stream);
2682
    if (is->ic) {
2683
        av_close_input_file(is->ic);
2684
        is->ic = NULL; /* safety */
2685
    }
2686
    url_set_interrupt_cb(NULL);
2687

    
2688
    if (ret != 0) {
2689
        SDL_Event event;
2690

    
2691
        event.type = FF_QUIT_EVENT;
2692
        event.user.data1 = is;
2693
        SDL_PushEvent(&event);
2694
    }
2695
    return 0;
2696
}
2697

    
2698
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2699
{
2700
    VideoState *is;
2701

    
2702
    is = av_mallocz(sizeof(VideoState));
2703
    if (!is)
2704
        return NULL;
2705
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2706
    is->iformat = iformat;
2707
    is->ytop = 0;
2708
    is->xleft = 0;
2709

    
2710
    /* start video display */
2711
    is->pictq_mutex = SDL_CreateMutex();
2712
    is->pictq_cond = SDL_CreateCond();
2713

    
2714
    is->subpq_mutex = SDL_CreateMutex();
2715
    is->subpq_cond = SDL_CreateCond();
2716

    
2717
    is->av_sync_type = av_sync_type;
2718
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2719
    if (!is->parse_tid) {
2720
        av_free(is);
2721
        return NULL;
2722
    }
2723
    return is;
2724
}
2725

    
2726
static void stream_cycle_channel(VideoState *is, int codec_type)
2727
{
2728
    AVFormatContext *ic = is->ic;
2729
    int start_index, stream_index;
2730
    AVStream *st;
2731

    
2732
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2733
        start_index = is->video_stream;
2734
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2735
        start_index = is->audio_stream;
2736
    else
2737
        start_index = is->subtitle_stream;
2738
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2739
        return;
2740
    stream_index = start_index;
2741
    for(;;) {
2742
        if (++stream_index >= is->ic->nb_streams)
2743
        {
2744
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2745
            {
2746
                stream_index = -1;
2747
                goto the_end;
2748
            } else
2749
                stream_index = 0;
2750
        }
2751
        if (stream_index == start_index)
2752
            return;
2753
        st = ic->streams[stream_index];
2754
        if (st->codec->codec_type == codec_type) {
2755
            /* check that parameters are OK */
2756
            switch(codec_type) {
2757
            case AVMEDIA_TYPE_AUDIO:
2758
                if (st->codec->sample_rate != 0 &&
2759
                    st->codec->channels != 0)
2760
                    goto the_end;
2761
                break;
2762
            case AVMEDIA_TYPE_VIDEO:
2763
            case AVMEDIA_TYPE_SUBTITLE:
2764
                goto the_end;
2765
            default:
2766
                break;
2767
            }
2768
        }
2769
    }
2770
 the_end:
2771
    stream_component_close(is, start_index);
2772
    stream_component_open(is, stream_index);
2773
}
2774

    
2775

    
2776
static void toggle_full_screen(void)
2777
{
2778
    is_full_screen = !is_full_screen;
2779
    if (!fs_screen_width) {
2780
        /* use default SDL method */
2781
//        SDL_WM_ToggleFullScreen(screen);
2782
    }
2783
    video_open(cur_stream);
2784
}
2785

    
2786
static void toggle_pause(void)
2787
{
2788
    if (cur_stream)
2789
        stream_pause(cur_stream);
2790
    step = 0;
2791
}
2792

    
2793
static void step_to_next_frame(void)
2794
{
2795
    if (cur_stream) {
2796
        /* if the stream is paused unpause it, then step */
2797
        if (cur_stream->paused)
2798
            stream_pause(cur_stream);
2799
    }
2800
    step = 1;
2801
}
2802

    
2803
static void toggle_audio_display(void)
2804
{
2805
    if (cur_stream) {
2806
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2807
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2808
        fill_rectangle(screen,
2809
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2810
                    bgcolor);
2811
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2812
    }
2813
}
2814

    
2815
/* handle an event sent by the GUI */
2816
static void event_loop(void)
2817
{
2818
    SDL_Event event;
2819
    double incr, pos, frac;
2820

    
2821
    for(;;) {
2822
        double x;
2823
        SDL_WaitEvent(&event);
2824
        switch(event.type) {
2825
        case SDL_KEYDOWN:
2826
            if (exit_on_keydown) {
2827
                do_exit();
2828
                break;
2829
            }
2830
            switch(event.key.keysym.sym) {
2831
            case SDLK_ESCAPE:
2832
            case SDLK_q:
2833
                do_exit();
2834
                break;
2835
            case SDLK_f:
2836
                toggle_full_screen();
2837
                break;
2838
            case SDLK_p:
2839
            case SDLK_SPACE:
2840
                toggle_pause();
2841
                break;
2842
            case SDLK_s: //S: Step to next frame
2843
                step_to_next_frame();
2844
                break;
2845
            case SDLK_a:
2846
                if (cur_stream)
2847
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2848
                break;
2849
            case SDLK_v:
2850
                if (cur_stream)
2851
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2852
                break;
2853
            case SDLK_t:
2854
                if (cur_stream)
2855
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2856
                break;
2857
            case SDLK_w:
2858
                toggle_audio_display();
2859
                break;
2860
            case SDLK_LEFT:
2861
                incr = -10.0;
2862
                goto do_seek;
2863
            case SDLK_RIGHT:
2864
                incr = 10.0;
2865
                goto do_seek;
2866
            case SDLK_UP:
2867
                incr = 60.0;
2868
                goto do_seek;
2869
            case SDLK_DOWN:
2870
                incr = -60.0;
2871
            do_seek:
2872
                if (cur_stream) {
2873
                    if (seek_by_bytes) {
2874
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2875
                            pos= cur_stream->video_current_pos;
2876
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2877
                            pos= cur_stream->audio_pkt.pos;
2878
                        }else
2879
                            pos = url_ftell(cur_stream->ic->pb);
2880
                        if (cur_stream->ic->bit_rate)
2881
                            incr *= cur_stream->ic->bit_rate / 8.0;
2882
                        else
2883
                            incr *= 180000.0;
2884
                        pos += incr;
2885
                        stream_seek(cur_stream, pos, incr, 1);
2886
                    } else {
2887
                        pos = get_master_clock(cur_stream);
2888
                        pos += incr;
2889
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2890
                    }
2891
                }
2892
                break;
2893
            default:
2894
                break;
2895
            }
2896
            break;
2897
        case SDL_MOUSEBUTTONDOWN:
2898
            if (exit_on_mousedown) {
2899
                do_exit();
2900
                break;
2901
            }
2902
        case SDL_MOUSEMOTION:
2903
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2904
                x= event.button.x;
2905
            }else{
2906
                if(event.motion.state != SDL_PRESSED)
2907
                    break;
2908
                x= event.motion.x;
2909
            }
2910
            if (cur_stream) {
2911
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2912
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2913
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2914
                }else{
2915
                    int64_t ts;
2916
                    int ns, hh, mm, ss;
2917
                    int tns, thh, tmm, tss;
2918
                    tns = cur_stream->ic->duration/1000000LL;
2919
                    thh = tns/3600;
2920
                    tmm = (tns%3600)/60;
2921
                    tss = (tns%60);
2922
                    frac = x/cur_stream->width;
2923
                    ns = frac*tns;
2924
                    hh = ns/3600;
2925
                    mm = (ns%3600)/60;
2926
                    ss = (ns%60);
2927
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2928
                            hh, mm, ss, thh, tmm, tss);
2929
                    ts = frac*cur_stream->ic->duration;
2930
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2931
                        ts += cur_stream->ic->start_time;
2932
                    stream_seek(cur_stream, ts, 0, 0);
2933
                }
2934
            }
2935
            break;
2936
        case SDL_VIDEORESIZE:
2937
            if (cur_stream) {
2938
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2939
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2940
                screen_width = cur_stream->width = event.resize.w;
2941
                screen_height= cur_stream->height= event.resize.h;
2942
            }
2943
            break;
2944
        case SDL_QUIT:
2945
        case FF_QUIT_EVENT:
2946
            do_exit();
2947
            break;
2948
        case FF_ALLOC_EVENT:
2949
            video_open(event.user.data1);
2950
            alloc_picture(event.user.data1);
2951
            break;
2952
        case FF_REFRESH_EVENT:
2953
            video_refresh_timer(event.user.data1);
2954
            cur_stream->refresh=0;
2955
            break;
2956
        default:
2957
            break;
2958
        }
2959
    }
2960
}
2961

    
2962
static void opt_frame_size(const char *arg)
2963
{
2964
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2965
        fprintf(stderr, "Incorrect frame size\n");
2966
        exit(1);
2967
    }
2968
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2969
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2970
        exit(1);
2971
    }
2972
}
2973

    
2974
static int opt_width(const char *opt, const char *arg)
2975
{
2976
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2977
    return 0;
2978
}
2979

    
2980
static int opt_height(const char *opt, const char *arg)
2981
{
2982
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2983
    return 0;
2984
}
2985

    
2986
static void opt_format(const char *arg)
2987
{
2988
    file_iformat = av_find_input_format(arg);
2989
    if (!file_iformat) {
2990
        fprintf(stderr, "Unknown input format: %s\n", arg);
2991
        exit(1);
2992
    }
2993
}
2994

    
2995
static void opt_frame_pix_fmt(const char *arg)
2996
{
2997
    frame_pix_fmt = av_get_pix_fmt(arg);
2998
}
2999

    
3000
static int opt_sync(const char *opt, const char *arg)
3001
{
3002
    if (!strcmp(arg, "audio"))
3003
        av_sync_type = AV_SYNC_AUDIO_MASTER;
3004
    else if (!strcmp(arg, "video"))
3005
        av_sync_type = AV_SYNC_VIDEO_MASTER;
3006
    else if (!strcmp(arg, "ext"))
3007
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3008
    else {
3009
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3010
        exit(1);
3011
    }
3012
    return 0;
3013
}
3014

    
3015
static int opt_seek(const char *opt, const char *arg)
3016
{
3017
    start_time = parse_time_or_die(opt, arg, 1);
3018
    return 0;
3019
}
3020

    
3021
static int opt_duration(const char *opt, const char *arg)
3022
{
3023
    duration = parse_time_or_die(opt, arg, 1);
3024
    return 0;
3025
}
3026

    
3027
static int opt_debug(const char *opt, const char *arg)
3028
{
3029
    av_log_set_level(99);
3030
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3031
    return 0;
3032
}
3033

    
3034
static int opt_vismv(const char *opt, const char *arg)
3035
{
3036
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3037
    return 0;
3038
}
3039

    
3040
static int opt_thread_count(const char *opt, const char *arg)
3041
{
3042
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3043
#if !HAVE_THREADS
3044
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3045
#endif
3046
    return 0;
3047
}
3048

    
3049
static const OptionDef options[] = {
3050
#include "cmdutils_common_opts.h"
3051
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3052
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3053
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3054
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3055
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3056
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3057
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3058
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3059
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3060
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3061
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3062
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3063
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3064
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3065
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3066
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3067
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3068
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3069
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3070
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3071
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3072
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3073
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3074
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3075
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3076
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3077
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3078
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3079
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3080
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3081
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3082
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3083
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3084
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3085
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3086
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3087
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3088
#if CONFIG_AVFILTER
3089
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3090
#endif
3091
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3092
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3093
    { NULL, },
3094
};
3095

    
3096
static void show_usage(void)
3097
{
3098
    printf("Simple media player\n");
3099
    printf("usage: ffplay [options] input_file\n");
3100
    printf("\n");
3101
}
3102

    
3103
static void show_help(void)
3104
{
3105
    av_log_set_callback(log_callback_help);
3106
    show_usage();
3107
    show_help_options(options, "Main options:\n",
3108
                      OPT_EXPERT, 0);
3109
    show_help_options(options, "\nAdvanced options:\n",
3110
                      OPT_EXPERT, OPT_EXPERT);
3111
    printf("\n");
3112
    av_opt_show2(avcodec_opts[0], NULL,
3113
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3114
    printf("\n");
3115
    av_opt_show2(avformat_opts, NULL,
3116
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3117
#if !CONFIG_AVFILTER
3118
    printf("\n");
3119
    av_opt_show2(sws_opts, NULL,
3120
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3121
#endif
3122
    printf("\nWhile playing:\n"
3123
           "q, ESC              quit\n"
3124
           "f                   toggle full screen\n"
3125
           "p, SPC              pause\n"
3126
           "a                   cycle audio channel\n"
3127
           "v                   cycle video channel\n"
3128
           "t                   cycle subtitle channel\n"
3129
           "w                   show audio waves\n"
3130
           "s                   activate frame-step mode\n"
3131
           "left/right          seek backward/forward 10 seconds\n"
3132
           "down/up             seek backward/forward 1 minute\n"
3133
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3134
           );
3135
}
3136

    
3137
static void opt_input_file(const char *filename)
3138
{
3139
    if (input_filename) {
3140
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3141
                filename, input_filename);
3142
        exit(1);
3143
    }
3144
    if (!strcmp(filename, "-"))
3145
        filename = "pipe:";
3146
    input_filename = filename;
3147
}
3148

    
3149
/* Called from the main */
3150
int main(int argc, char **argv)
3151
{
3152
    int flags;
3153

    
3154
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3155

    
3156
    /* register all codecs, demux and protocols */
3157
    avcodec_register_all();
3158
#if CONFIG_AVDEVICE
3159
    avdevice_register_all();
3160
#endif
3161
#if CONFIG_AVFILTER
3162
    avfilter_register_all();
3163
#endif
3164
    av_register_all();
3165

    
3166
    init_opts();
3167

    
3168
    show_banner();
3169

    
3170
    parse_options(argc, argv, options, opt_input_file);
3171

    
3172
    if (!input_filename) {
3173
        show_usage();
3174
        fprintf(stderr, "An input file must be specified\n");
3175
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3176
        exit(1);
3177
    }
3178

    
3179
    if (display_disable) {
3180
        video_disable = 1;
3181
    }
3182
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3183
#if !defined(__MINGW32__) && !defined(__APPLE__)
3184
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3185
#endif
3186
    if (SDL_Init (flags)) {
3187
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3188
        exit(1);
3189
    }
3190

    
3191
    if (!display_disable) {
3192
#if HAVE_SDL_VIDEO_SIZE
3193
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3194
        fs_screen_width = vi->current_w;
3195
        fs_screen_height = vi->current_h;
3196
#endif
3197
    }
3198

    
3199
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3200
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3201
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3202

    
3203
    av_init_packet(&flush_pkt);
3204
    flush_pkt.data= "FLUSH";
3205

    
3206
    cur_stream = stream_open(input_filename, file_iformat);
3207

    
3208
    event_loop();
3209

    
3210
    /* never returns */
3211

    
3212
    return 0;
3213
}