Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ d6705a27

History | View | Annotate | Download (99.1 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavcore/samplefmt.h"
34
#include "libavformat/avformat.h"
35
#include "libavdevice/avdevice.h"
36
#include "libswscale/swscale.h"
37
#include "libavcodec/audioconvert.h"
38
#include "libavcodec/opt.h"
39
#include "libavcodec/avfft.h"
40

    
41
#if CONFIG_AVFILTER
42
# include "libavfilter/avfilter.h"
43
# include "libavfilter/avfiltergraph.h"
44
#endif
45

    
46
#include "cmdutils.h"
47

    
48
#include <SDL.h>
49
#include <SDL_thread.h>
50

    
51
#ifdef __MINGW32__
52
#undef main /* We don't want SDL to override our main() */
53
#endif
54

    
55
#include <unistd.h>
56
#include <assert.h>
57

    
58
const char program_name[] = "FFplay";
59
const int program_birth_year = 2003;
60

    
61
//#define DEBUG
62
//#define DEBUG_SYNC
63

    
64
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66
#define MIN_FRAMES 5
67

    
68
/* SDL audio buffer size, in samples. Should be small to have precise
69
   A/V sync as SDL does not have hardware buffer fullness info. */
70
#define SDL_AUDIO_BUFFER_SIZE 1024
71

    
72
/* no AV sync correction is done if below the AV sync threshold */
73
#define AV_SYNC_THRESHOLD 0.01
74
/* no AV correction is done if too big error */
75
#define AV_NOSYNC_THRESHOLD 10.0
76

    
77
#define FRAME_SKIP_FACTOR 0.05
78

    
79
/* maximum audio speed change to get correct sync */
80
#define SAMPLE_CORRECTION_PERCENT_MAX 10
81

    
82
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83
#define AUDIO_DIFF_AVG_NB   20
84

    
85
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86
#define SAMPLE_ARRAY_SIZE (2*65536)
87

    
88
static int sws_flags = SWS_BICUBIC;
89

    
90
typedef struct PacketQueue {
91
    AVPacketList *first_pkt, *last_pkt;
92
    int nb_packets;
93
    int size;
94
    int abort_request;
95
    SDL_mutex *mutex;
96
    SDL_cond *cond;
97
} PacketQueue;
98

    
99
#define VIDEO_PICTURE_QUEUE_SIZE 2
100
#define SUBPICTURE_QUEUE_SIZE 4
101

    
102
typedef struct VideoPicture {
103
    double pts;                                  ///<presentation time stamp for this picture
104
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105
    int64_t pos;                                 ///<byte position in file
106
    SDL_Overlay *bmp;
107
    int width, height; /* source height & width */
108
    int allocated;
109
    enum PixelFormat pix_fmt;
110

    
111
#if CONFIG_AVFILTER
112
    AVFilterBufferRef *picref;
113
#endif
114
} VideoPicture;
115

    
116
typedef struct SubPicture {
117
    double pts; /* presentation time stamp for this picture */
118
    AVSubtitle sub;
119
} SubPicture;
120

    
121
enum {
122
    AV_SYNC_AUDIO_MASTER, /* default choice */
123
    AV_SYNC_VIDEO_MASTER,
124
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125
};
126

    
127
typedef struct VideoState {
128
    SDL_Thread *parse_tid;
129
    SDL_Thread *video_tid;
130
    SDL_Thread *refresh_tid;
131
    AVInputFormat *iformat;
132
    int no_background;
133
    int abort_request;
134
    int paused;
135
    int last_paused;
136
    int seek_req;
137
    int seek_flags;
138
    int64_t seek_pos;
139
    int64_t seek_rel;
140
    int read_pause_return;
141
    AVFormatContext *ic;
142
    int dtg_active_format;
143

    
144
    int audio_stream;
145

    
146
    int av_sync_type;
147
    double external_clock; /* external clock base */
148
    int64_t external_clock_time;
149

    
150
    double audio_clock;
151
    double audio_diff_cum; /* used for AV difference average computation */
152
    double audio_diff_avg_coef;
153
    double audio_diff_threshold;
154
    int audio_diff_avg_count;
155
    AVStream *audio_st;
156
    PacketQueue audioq;
157
    int audio_hw_buf_size;
158
    /* samples output by the codec. we reserve more space for avsync
159
       compensation */
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    uint8_t *audio_buf;
163
    unsigned int audio_buf_size; /* in bytes */
164
    int audio_buf_index; /* in bytes */
165
    AVPacket audio_pkt_temp;
166
    AVPacket audio_pkt;
167
    enum AVSampleFormat audio_src_fmt;
168
    AVAudioConvert *reformat_ctx;
169

    
170
    int show_audio; /* if true, display audio samples */
171
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
172
    int sample_array_index;
173
    int last_i_start;
174
    RDFTContext *rdft;
175
    int rdft_bits;
176
    FFTSample *rdft_data;
177
    int xpos;
178

    
179
    SDL_Thread *subtitle_tid;
180
    int subtitle_stream;
181
    int subtitle_stream_changed;
182
    AVStream *subtitle_st;
183
    PacketQueue subtitleq;
184
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185
    int subpq_size, subpq_rindex, subpq_windex;
186
    SDL_mutex *subpq_mutex;
187
    SDL_cond *subpq_cond;
188

    
189
    double frame_timer;
190
    double frame_last_pts;
191
    double frame_last_delay;
192
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193
    int video_stream;
194
    AVStream *video_st;
195
    PacketQueue videoq;
196
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198
    int64_t video_current_pos;                   ///<current displayed file pos
199
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200
    int pictq_size, pictq_rindex, pictq_windex;
201
    SDL_mutex *pictq_mutex;
202
    SDL_cond *pictq_cond;
203
#if !CONFIG_AVFILTER
204
    struct SwsContext *img_convert_ctx;
205
#endif
206

    
207
    //    QETimer *video_timer;
208
    char filename[1024];
209
    int width, height, xleft, ytop;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int exit_on_keydown;
264
static int exit_on_mousedown;
265
static int loop=1;
266
static int framedrop=1;
267

    
268
static int rdftspeed=20;
269
#if CONFIG_AVFILTER
270
static char *vfilters = NULL;
271
#endif
272

    
273
/* current context */
274
static int is_full_screen;
275
static VideoState *cur_stream;
276
static int64_t audio_callback_time;
277

    
278
static AVPacket flush_pkt;
279

    
280
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
281
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283

    
284
static SDL_Surface *screen;
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287

    
288
/* packet queue handling */
289
static void packet_queue_init(PacketQueue *q)
290
{
291
    memset(q, 0, sizeof(PacketQueue));
292
    q->mutex = SDL_CreateMutex();
293
    q->cond = SDL_CreateCond();
294
    packet_queue_put(q, &flush_pkt);
295
}
296

    
297
static void packet_queue_flush(PacketQueue *q)
298
{
299
    AVPacketList *pkt, *pkt1;
300

    
301
    SDL_LockMutex(q->mutex);
302
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303
        pkt1 = pkt->next;
304
        av_free_packet(&pkt->pkt);
305
        av_freep(&pkt);
306
    }
307
    q->last_pkt = NULL;
308
    q->first_pkt = NULL;
309
    q->nb_packets = 0;
310
    q->size = 0;
311
    SDL_UnlockMutex(q->mutex);
312
}
313

    
314
static void packet_queue_end(PacketQueue *q)
315
{
316
    packet_queue_flush(q);
317
    SDL_DestroyMutex(q->mutex);
318
    SDL_DestroyCond(q->cond);
319
}
320

    
321
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322
{
323
    AVPacketList *pkt1;
324

    
325
    /* duplicate the packet */
326
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327
        return -1;
328

    
329
    pkt1 = av_malloc(sizeof(AVPacketList));
330
    if (!pkt1)
331
        return -1;
332
    pkt1->pkt = *pkt;
333
    pkt1->next = NULL;
334

    
335

    
336
    SDL_LockMutex(q->mutex);
337

    
338
    if (!q->last_pkt)
339

    
340
        q->first_pkt = pkt1;
341
    else
342
        q->last_pkt->next = pkt1;
343
    q->last_pkt = pkt1;
344
    q->nb_packets++;
345
    q->size += pkt1->pkt.size + sizeof(*pkt1);
346
    /* XXX: should duplicate packet data in DV case */
347
    SDL_CondSignal(q->cond);
348

    
349
    SDL_UnlockMutex(q->mutex);
350
    return 0;
351
}
352

    
353
static void packet_queue_abort(PacketQueue *q)
354
{
355
    SDL_LockMutex(q->mutex);
356

    
357
    q->abort_request = 1;
358

    
359
    SDL_CondSignal(q->cond);
360

    
361
    SDL_UnlockMutex(q->mutex);
362
}
363

    
364
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366
{
367
    AVPacketList *pkt1;
368
    int ret;
369

    
370
    SDL_LockMutex(q->mutex);
371

    
372
    for(;;) {
373
        if (q->abort_request) {
374
            ret = -1;
375
            break;
376
        }
377

    
378
        pkt1 = q->first_pkt;
379
        if (pkt1) {
380
            q->first_pkt = pkt1->next;
381
            if (!q->first_pkt)
382
                q->last_pkt = NULL;
383
            q->nb_packets--;
384
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
385
            *pkt = pkt1->pkt;
386
            av_free(pkt1);
387
            ret = 1;
388
            break;
389
        } else if (!block) {
390
            ret = 0;
391
            break;
392
        } else {
393
            SDL_CondWait(q->cond, q->mutex);
394
        }
395
    }
396
    SDL_UnlockMutex(q->mutex);
397
    return ret;
398
}
399

    
400
static inline void fill_rectangle(SDL_Surface *screen,
401
                                  int x, int y, int w, int h, int color)
402
{
403
    SDL_Rect rect;
404
    rect.x = x;
405
    rect.y = y;
406
    rect.w = w;
407
    rect.h = h;
408
    SDL_FillRect(screen, &rect, color);
409
}
410

    
411
#if 0
412
/* draw only the border of a rectangle */
413
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414
{
415
    int w1, w2, h1, h2;
416

417
    /* fill the background */
418
    w1 = x;
419
    if (w1 < 0)
420
        w1 = 0;
421
    w2 = s->width - (x + w);
422
    if (w2 < 0)
423
        w2 = 0;
424
    h1 = y;
425
    if (h1 < 0)
426
        h1 = 0;
427
    h2 = s->height - (y + h);
428
    if (h2 < 0)
429
        h2 = 0;
430
    fill_rectangle(screen,
431
                   s->xleft, s->ytop,
432
                   w1, s->height,
433
                   color);
434
    fill_rectangle(screen,
435
                   s->xleft + s->width - w2, s->ytop,
436
                   w2, s->height,
437
                   color);
438
    fill_rectangle(screen,
439
                   s->xleft + w1, s->ytop,
440
                   s->width - w1 - w2, h1,
441
                   color);
442
    fill_rectangle(screen,
443
                   s->xleft + w1, s->ytop + s->height - h2,
444
                   s->width - w1 - w2, h2,
445
                   color);
446
}
447
#endif
448

    
449
#define ALPHA_BLEND(a, oldp, newp, s)\
450
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451

    
452
#define RGBA_IN(r, g, b, a, s)\
453
{\
454
    unsigned int v = ((const uint32_t *)(s))[0];\
455
    a = (v >> 24) & 0xff;\
456
    r = (v >> 16) & 0xff;\
457
    g = (v >> 8) & 0xff;\
458
    b = v & 0xff;\
459
}
460

    
461
#define YUVA_IN(y, u, v, a, s, pal)\
462
{\
463
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464
    a = (val >> 24) & 0xff;\
465
    y = (val >> 16) & 0xff;\
466
    u = (val >> 8) & 0xff;\
467
    v = val & 0xff;\
468
}
469

    
470
#define YUVA_OUT(d, y, u, v, a)\
471
{\
472
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473
}
474

    
475

    
476
#define BPP 1
477

    
478
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479
{
480
    int wrap, wrap3, width2, skip2;
481
    int y, u, v, a, u1, v1, a1, w, h;
482
    uint8_t *lum, *cb, *cr;
483
    const uint8_t *p;
484
    const uint32_t *pal;
485
    int dstx, dsty, dstw, dsth;
486

    
487
    dstw = av_clip(rect->w, 0, imgw);
488
    dsth = av_clip(rect->h, 0, imgh);
489
    dstx = av_clip(rect->x, 0, imgw - dstw);
490
    dsty = av_clip(rect->y, 0, imgh - dsth);
491
    lum = dst->data[0] + dsty * dst->linesize[0];
492
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494

    
495
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496
    skip2 = dstx >> 1;
497
    wrap = dst->linesize[0];
498
    wrap3 = rect->pict.linesize[0];
499
    p = rect->pict.data[0];
500
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501

    
502
    if (dsty & 1) {
503
        lum += dstx;
504
        cb += skip2;
505
        cr += skip2;
506

    
507
        if (dstx & 1) {
508
            YUVA_IN(y, u, v, a, p, pal);
509
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512
            cb++;
513
            cr++;
514
            lum++;
515
            p += BPP;
516
        }
517
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518
            YUVA_IN(y, u, v, a, p, pal);
519
            u1 = u;
520
            v1 = v;
521
            a1 = a;
522
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523

    
524
            YUVA_IN(y, u, v, a, p + BPP, pal);
525
            u1 += u;
526
            v1 += v;
527
            a1 += a;
528
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531
            cb++;
532
            cr++;
533
            p += 2 * BPP;
534
            lum += 2;
535
        }
536
        if (w) {
537
            YUVA_IN(y, u, v, a, p, pal);
538
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541
            p++;
542
            lum++;
543
        }
544
        p += wrap3 - dstw * BPP;
545
        lum += wrap - dstw - dstx;
546
        cb += dst->linesize[1] - width2 - skip2;
547
        cr += dst->linesize[2] - width2 - skip2;
548
    }
549
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550
        lum += dstx;
551
        cb += skip2;
552
        cr += skip2;
553

    
554
        if (dstx & 1) {
555
            YUVA_IN(y, u, v, a, p, pal);
556
            u1 = u;
557
            v1 = v;
558
            a1 = a;
559
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
            p += wrap3;
561
            lum += wrap;
562
            YUVA_IN(y, u, v, a, p, pal);
563
            u1 += u;
564
            v1 += v;
565
            a1 += a;
566
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569
            cb++;
570
            cr++;
571
            p += -wrap3 + BPP;
572
            lum += -wrap + 1;
573
        }
574
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 = u;
577
            v1 = v;
578
            a1 = a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586
            p += wrap3;
587
            lum += wrap;
588

    
589
            YUVA_IN(y, u, v, a, p, pal);
590
            u1 += u;
591
            v1 += v;
592
            a1 += a;
593
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594

    
595
            YUVA_IN(y, u, v, a, p + BPP, pal);
596
            u1 += u;
597
            v1 += v;
598
            a1 += a;
599
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600

    
601
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603

    
604
            cb++;
605
            cr++;
606
            p += -wrap3 + 2 * BPP;
607
            lum += -wrap + 2;
608
        }
609
        if (w) {
610
            YUVA_IN(y, u, v, a, p, pal);
611
            u1 = u;
612
            v1 = v;
613
            a1 = a;
614
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
            p += wrap3;
616
            lum += wrap;
617
            YUVA_IN(y, u, v, a, p, pal);
618
            u1 += u;
619
            v1 += v;
620
            a1 += a;
621
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624
            cb++;
625
            cr++;
626
            p += -wrap3 + BPP;
627
            lum += -wrap + 1;
628
        }
629
        p += wrap3 + (wrap3 - dstw * BPP);
630
        lum += wrap + (wrap - dstw - dstx);
631
        cb += dst->linesize[1] - width2 - skip2;
632
        cr += dst->linesize[2] - width2 - skip2;
633
    }
634
    /* handle odd height */
635
    if (h) {
636
        lum += dstx;
637
        cb += skip2;
638
        cr += skip2;
639

    
640
        if (dstx & 1) {
641
            YUVA_IN(y, u, v, a, p, pal);
642
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645
            cb++;
646
            cr++;
647
            lum++;
648
            p += BPP;
649
        }
650
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651
            YUVA_IN(y, u, v, a, p, pal);
652
            u1 = u;
653
            v1 = v;
654
            a1 = a;
655
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656

    
657
            YUVA_IN(y, u, v, a, p + BPP, pal);
658
            u1 += u;
659
            v1 += v;
660
            a1 += a;
661
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664
            cb++;
665
            cr++;
666
            p += 2 * BPP;
667
            lum += 2;
668
        }
669
        if (w) {
670
            YUVA_IN(y, u, v, a, p, pal);
671
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674
        }
675
    }
676
}
677

    
678
static void free_subpicture(SubPicture *sp)
679
{
680
    avsubtitle_free(&sp->sub);
681
}
682

    
683
static void video_image_display(VideoState *is)
684
{
685
    VideoPicture *vp;
686
    SubPicture *sp;
687
    AVPicture pict;
688
    float aspect_ratio;
689
    int width, height, x, y;
690
    SDL_Rect rect;
691
    int i;
692

    
693
    vp = &is->pictq[is->pictq_rindex];
694
    if (vp->bmp) {
695
#if CONFIG_AVFILTER
696
         if (vp->picref->video->pixel_aspect.num == 0)
697
             aspect_ratio = 0;
698
         else
699
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
700
#else
701

    
702
        /* XXX: use variable in the frame */
703
        if (is->video_st->sample_aspect_ratio.num)
704
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705
        else if (is->video_st->codec->sample_aspect_ratio.num)
706
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707
        else
708
            aspect_ratio = 0;
709
#endif
710
        if (aspect_ratio <= 0.0)
711
            aspect_ratio = 1.0;
712
        aspect_ratio *= (float)vp->width / (float)vp->height;
713
        /* if an active format is indicated, then it overrides the
714
           mpeg format */
715
#if 0
716
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
717
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
718
            printf("dtg_active_format=%d\n", is->dtg_active_format);
719
        }
720
#endif
721
#if 0
722
        switch(is->video_st->codec->dtg_active_format) {
723
        case FF_DTG_AFD_SAME:
724
        default:
725
            /* nothing to do */
726
            break;
727
        case FF_DTG_AFD_4_3:
728
            aspect_ratio = 4.0 / 3.0;
729
            break;
730
        case FF_DTG_AFD_16_9:
731
            aspect_ratio = 16.0 / 9.0;
732
            break;
733
        case FF_DTG_AFD_14_9:
734
            aspect_ratio = 14.0 / 9.0;
735
            break;
736
        case FF_DTG_AFD_4_3_SP_14_9:
737
            aspect_ratio = 14.0 / 9.0;
738
            break;
739
        case FF_DTG_AFD_16_9_SP_14_9:
740
            aspect_ratio = 14.0 / 9.0;
741
            break;
742
        case FF_DTG_AFD_SP_4_3:
743
            aspect_ratio = 4.0 / 3.0;
744
            break;
745
        }
746
#endif
747

    
748
        if (is->subtitle_st)
749
        {
750
            if (is->subpq_size > 0)
751
            {
752
                sp = &is->subpq[is->subpq_rindex];
753

    
754
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
755
                {
756
                    SDL_LockYUVOverlay (vp->bmp);
757

    
758
                    pict.data[0] = vp->bmp->pixels[0];
759
                    pict.data[1] = vp->bmp->pixels[2];
760
                    pict.data[2] = vp->bmp->pixels[1];
761

    
762
                    pict.linesize[0] = vp->bmp->pitches[0];
763
                    pict.linesize[1] = vp->bmp->pitches[2];
764
                    pict.linesize[2] = vp->bmp->pitches[1];
765

    
766
                    for (i = 0; i < sp->sub.num_rects; i++)
767
                        blend_subrect(&pict, sp->sub.rects[i],
768
                                      vp->bmp->w, vp->bmp->h);
769

    
770
                    SDL_UnlockYUVOverlay (vp->bmp);
771
                }
772
            }
773
        }
774

    
775

    
776
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
777
        height = is->height;
778
        width = ((int)rint(height * aspect_ratio)) & ~1;
779
        if (width > is->width) {
780
            width = is->width;
781
            height = ((int)rint(width / aspect_ratio)) & ~1;
782
        }
783
        x = (is->width - width) / 2;
784
        y = (is->height - height) / 2;
785
        if (!is->no_background) {
786
            /* fill the background */
787
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
788
        } else {
789
            is->no_background = 0;
790
        }
791
        rect.x = is->xleft + x;
792
        rect.y = is->ytop  + y;
793
        rect.w = width;
794
        rect.h = height;
795
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
796
    } else {
797
#if 0
798
        fill_rectangle(screen,
799
                       is->xleft, is->ytop, is->width, is->height,
800
                       QERGB(0x00, 0x00, 0x00));
801
#endif
802
    }
803
}
804

    
805
static inline int compute_mod(int a, int b)
806
{
807
    a = a % b;
808
    if (a >= 0)
809
        return a;
810
    else
811
        return a + b;
812
}
813

    
814
static void video_audio_display(VideoState *s)
815
{
816
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
817
    int ch, channels, h, h2, bgcolor, fgcolor;
818
    int16_t time_diff;
819
    int rdft_bits, nb_freq;
820

    
821
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
822
        ;
823
    nb_freq= 1<<(rdft_bits-1);
824

    
825
    /* compute display index : center on currently output samples */
826
    channels = s->audio_st->codec->channels;
827
    nb_display_channels = channels;
828
    if (!s->paused) {
829
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
830
        n = 2 * channels;
831
        delay = audio_write_get_buf_size(s);
832
        delay /= n;
833

    
834
        /* to be more precise, we take into account the time spent since
835
           the last buffer computation */
836
        if (audio_callback_time) {
837
            time_diff = av_gettime() - audio_callback_time;
838
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
839
        }
840

    
841
        delay += 2*data_used;
842
        if (delay < data_used)
843
            delay = data_used;
844

    
845
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
846
        if(s->show_audio==1){
847
            h= INT_MIN;
848
            for(i=0; i<1000; i+=channels){
849
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
850
                int a= s->sample_array[idx];
851
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
852
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
853
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
854
                int score= a-d;
855
                if(h<score && (b^c)<0){
856
                    h= score;
857
                    i_start= idx;
858
                }
859
            }
860
        }
861

    
862
        s->last_i_start = i_start;
863
    } else {
864
        i_start = s->last_i_start;
865
    }
866

    
867
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
868
    if(s->show_audio==1){
869
        fill_rectangle(screen,
870
                       s->xleft, s->ytop, s->width, s->height,
871
                       bgcolor);
872

    
873
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
874

    
875
        /* total height for one channel */
876
        h = s->height / nb_display_channels;
877
        /* graph height / 2 */
878
        h2 = (h * 9) / 20;
879
        for(ch = 0;ch < nb_display_channels; ch++) {
880
            i = i_start + ch;
881
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
882
            for(x = 0; x < s->width; x++) {
883
                y = (s->sample_array[i] * h2) >> 15;
884
                if (y < 0) {
885
                    y = -y;
886
                    ys = y1 - y;
887
                } else {
888
                    ys = y1;
889
                }
890
                fill_rectangle(screen,
891
                               s->xleft + x, ys, 1, y,
892
                               fgcolor);
893
                i += channels;
894
                if (i >= SAMPLE_ARRAY_SIZE)
895
                    i -= SAMPLE_ARRAY_SIZE;
896
            }
897
        }
898

    
899
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
900

    
901
        for(ch = 1;ch < nb_display_channels; ch++) {
902
            y = s->ytop + ch * h;
903
            fill_rectangle(screen,
904
                           s->xleft, y, s->width, 1,
905
                           fgcolor);
906
        }
907
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
908
    }else{
909
        nb_display_channels= FFMIN(nb_display_channels, 2);
910
        if(rdft_bits != s->rdft_bits){
911
            av_rdft_end(s->rdft);
912
            av_free(s->rdft_data);
913
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
914
            s->rdft_bits= rdft_bits;
915
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
916
        }
917
        {
918
            FFTSample *data[2];
919
            for(ch = 0;ch < nb_display_channels; ch++) {
920
                data[ch] = s->rdft_data + 2*nb_freq*ch;
921
                i = i_start + ch;
922
                for(x = 0; x < 2*nb_freq; x++) {
923
                    double w= (x-nb_freq)*(1.0/nb_freq);
924
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
925
                    i += channels;
926
                    if (i >= SAMPLE_ARRAY_SIZE)
927
                        i -= SAMPLE_ARRAY_SIZE;
928
                }
929
                av_rdft_calc(s->rdft, data[ch]);
930
            }
931
            //least efficient way to do this, we should of course directly access it but its more than fast enough
932
            for(y=0; y<s->height; y++){
933
                double w= 1/sqrt(nb_freq);
934
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
935
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
936
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
937
                a= FFMIN(a,255);
938
                b= FFMIN(b,255);
939
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
940

    
941
                fill_rectangle(screen,
942
                            s->xpos, s->height-y, 1, 1,
943
                            fgcolor);
944
            }
945
        }
946
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
947
        s->xpos++;
948
        if(s->xpos >= s->width)
949
            s->xpos= s->xleft;
950
    }
951
}
952

    
953
static int video_open(VideoState *is){
954
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
955
    int w,h;
956

    
957
    if(is_full_screen) flags |= SDL_FULLSCREEN;
958
    else               flags |= SDL_RESIZABLE;
959

    
960
    if (is_full_screen && fs_screen_width) {
961
        w = fs_screen_width;
962
        h = fs_screen_height;
963
    } else if(!is_full_screen && screen_width){
964
        w = screen_width;
965
        h = screen_height;
966
#if CONFIG_AVFILTER
967
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
968
        w = is->out_video_filter->inputs[0]->w;
969
        h = is->out_video_filter->inputs[0]->h;
970
#else
971
    }else if (is->video_st && is->video_st->codec->width){
972
        w = is->video_st->codec->width;
973
        h = is->video_st->codec->height;
974
#endif
975
    } else {
976
        w = 640;
977
        h = 480;
978
    }
979
    if(screen && is->width == screen->w && screen->w == w
980
       && is->height== screen->h && screen->h == h)
981
        return 0;
982

    
983
#ifndef __APPLE__
984
    screen = SDL_SetVideoMode(w, h, 0, flags);
985
#else
986
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
987
    screen = SDL_SetVideoMode(w, h, 24, flags);
988
#endif
989
    if (!screen) {
990
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
991
        return -1;
992
    }
993
    if (!window_title)
994
        window_title = input_filename;
995
    SDL_WM_SetCaption(window_title, window_title);
996

    
997
    is->width = screen->w;
998
    is->height = screen->h;
999

    
1000
    return 0;
1001
}
1002

    
1003
/* display the current picture, if any */
1004
static void video_display(VideoState *is)
1005
{
1006
    if(!screen)
1007
        video_open(cur_stream);
1008
    if (is->audio_st && is->show_audio)
1009
        video_audio_display(is);
1010
    else if (is->video_st)
1011
        video_image_display(is);
1012
}
1013

    
1014
static int refresh_thread(void *opaque)
1015
{
1016
    VideoState *is= opaque;
1017
    while(!is->abort_request){
1018
        SDL_Event event;
1019
        event.type = FF_REFRESH_EVENT;
1020
        event.user.data1 = opaque;
1021
        if(!is->refresh){
1022
            is->refresh=1;
1023
            SDL_PushEvent(&event);
1024
        }
1025
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1026
    }
1027
    return 0;
1028
}
1029

    
1030
/* get the current audio clock value */
1031
static double get_audio_clock(VideoState *is)
1032
{
1033
    double pts;
1034
    int hw_buf_size, bytes_per_sec;
1035
    pts = is->audio_clock;
1036
    hw_buf_size = audio_write_get_buf_size(is);
1037
    bytes_per_sec = 0;
1038
    if (is->audio_st) {
1039
        bytes_per_sec = is->audio_st->codec->sample_rate *
1040
            2 * is->audio_st->codec->channels;
1041
    }
1042
    if (bytes_per_sec)
1043
        pts -= (double)hw_buf_size / bytes_per_sec;
1044
    return pts;
1045
}
1046

    
1047
/* get the current video clock value */
1048
static double get_video_clock(VideoState *is)
1049
{
1050
    if (is->paused) {
1051
        return is->video_current_pts;
1052
    } else {
1053
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1054
    }
1055
}
1056

    
1057
/* get the current external clock value */
1058
static double get_external_clock(VideoState *is)
1059
{
1060
    int64_t ti;
1061
    ti = av_gettime();
1062
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1063
}
1064

    
1065
/* get the current master clock value */
1066
static double get_master_clock(VideoState *is)
1067
{
1068
    double val;
1069

    
1070
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1071
        if (is->video_st)
1072
            val = get_video_clock(is);
1073
        else
1074
            val = get_audio_clock(is);
1075
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1076
        if (is->audio_st)
1077
            val = get_audio_clock(is);
1078
        else
1079
            val = get_video_clock(is);
1080
    } else {
1081
        val = get_external_clock(is);
1082
    }
1083
    return val;
1084
}
1085

    
1086
/* seek in the stream */
1087
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1088
{
1089
    if (!is->seek_req) {
1090
        is->seek_pos = pos;
1091
        is->seek_rel = rel;
1092
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1093
        if (seek_by_bytes)
1094
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1095
        is->seek_req = 1;
1096
    }
1097
}
1098

    
1099
/* pause or resume the video */
1100
static void stream_pause(VideoState *is)
1101
{
1102
    if (is->paused) {
1103
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1104
        if(is->read_pause_return != AVERROR(ENOSYS)){
1105
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1106
        }
1107
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1108
    }
1109
    is->paused = !is->paused;
1110
}
1111

    
1112
static double compute_target_time(double frame_current_pts, VideoState *is)
1113
{
1114
    double delay, sync_threshold, diff;
1115

    
1116
    /* compute nominal delay */
1117
    delay = frame_current_pts - is->frame_last_pts;
1118
    if (delay <= 0 || delay >= 10.0) {
1119
        /* if incorrect delay, use previous one */
1120
        delay = is->frame_last_delay;
1121
    } else {
1122
        is->frame_last_delay = delay;
1123
    }
1124
    is->frame_last_pts = frame_current_pts;
1125

    
1126
    /* update delay to follow master synchronisation source */
1127
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1128
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1129
        /* if video is slave, we try to correct big delays by
1130
           duplicating or deleting a frame */
1131
        diff = get_video_clock(is) - get_master_clock(is);
1132

    
1133
        /* skip or repeat frame. We take into account the
1134
           delay to compute the threshold. I still don't know
1135
           if it is the best guess */
1136
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1137
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1138
            if (diff <= -sync_threshold)
1139
                delay = 0;
1140
            else if (diff >= sync_threshold)
1141
                delay = 2 * delay;
1142
        }
1143
    }
1144
    is->frame_timer += delay;
1145
#if defined(DEBUG_SYNC)
1146
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1147
            delay, actual_delay, frame_current_pts, -diff);
1148
#endif
1149

    
1150
    return is->frame_timer;
1151
}
1152

    
1153
/* called to display each frame */
1154
static void video_refresh_timer(void *opaque)
1155
{
1156
    VideoState *is = opaque;
1157
    VideoPicture *vp;
1158

    
1159
    SubPicture *sp, *sp2;
1160

    
1161
    if (is->video_st) {
1162
retry:
1163
        if (is->pictq_size == 0) {
1164
            //nothing to do, no picture to display in the que
1165
        } else {
1166
            double time= av_gettime()/1000000.0;
1167
            double next_target;
1168
            /* dequeue the picture */
1169
            vp = &is->pictq[is->pictq_rindex];
1170

    
1171
            if(time < vp->target_clock)
1172
                return;
1173
            /* update current video pts */
1174
            is->video_current_pts = vp->pts;
1175
            is->video_current_pts_drift = is->video_current_pts - time;
1176
            is->video_current_pos = vp->pos;
1177
            if(is->pictq_size > 1){
1178
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1179
                assert(nextvp->target_clock >= vp->target_clock);
1180
                next_target= nextvp->target_clock;
1181
            }else{
1182
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1183
            }
1184
            if(framedrop && time > next_target){
1185
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1186
                if(is->pictq_size > 1 || time > next_target + 0.5){
1187
                    /* update queue size and signal for next picture */
1188
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1189
                        is->pictq_rindex = 0;
1190

    
1191
                    SDL_LockMutex(is->pictq_mutex);
1192
                    is->pictq_size--;
1193
                    SDL_CondSignal(is->pictq_cond);
1194
                    SDL_UnlockMutex(is->pictq_mutex);
1195
                    goto retry;
1196
                }
1197
            }
1198

    
1199
            if(is->subtitle_st) {
1200
                if (is->subtitle_stream_changed) {
1201
                    SDL_LockMutex(is->subpq_mutex);
1202

    
1203
                    while (is->subpq_size) {
1204
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1205

    
1206
                        /* update queue size and signal for next picture */
1207
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1208
                            is->subpq_rindex = 0;
1209

    
1210
                        is->subpq_size--;
1211
                    }
1212
                    is->subtitle_stream_changed = 0;
1213

    
1214
                    SDL_CondSignal(is->subpq_cond);
1215
                    SDL_UnlockMutex(is->subpq_mutex);
1216
                } else {
1217
                    if (is->subpq_size > 0) {
1218
                        sp = &is->subpq[is->subpq_rindex];
1219

    
1220
                        if (is->subpq_size > 1)
1221
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1222
                        else
1223
                            sp2 = NULL;
1224

    
1225
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1226
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1227
                        {
1228
                            free_subpicture(sp);
1229

    
1230
                            /* update queue size and signal for next picture */
1231
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1232
                                is->subpq_rindex = 0;
1233

    
1234
                            SDL_LockMutex(is->subpq_mutex);
1235
                            is->subpq_size--;
1236
                            SDL_CondSignal(is->subpq_cond);
1237
                            SDL_UnlockMutex(is->subpq_mutex);
1238
                        }
1239
                    }
1240
                }
1241
            }
1242

    
1243
            /* display picture */
1244
            if (!display_disable)
1245
                video_display(is);
1246

    
1247
            /* update queue size and signal for next picture */
1248
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249
                is->pictq_rindex = 0;
1250

    
1251
            SDL_LockMutex(is->pictq_mutex);
1252
            is->pictq_size--;
1253
            SDL_CondSignal(is->pictq_cond);
1254
            SDL_UnlockMutex(is->pictq_mutex);
1255
        }
1256
    } else if (is->audio_st) {
1257
        /* draw the next audio frame */
1258

    
1259
        /* if only audio stream, then display the audio bars (better
1260
           than nothing, just to test the implementation */
1261

    
1262
        /* display picture */
1263
        if (!display_disable)
1264
            video_display(is);
1265
    }
1266
    if (show_status) {
1267
        static int64_t last_time;
1268
        int64_t cur_time;
1269
        int aqsize, vqsize, sqsize;
1270
        double av_diff;
1271

    
1272
        cur_time = av_gettime();
1273
        if (!last_time || (cur_time - last_time) >= 30000) {
1274
            aqsize = 0;
1275
            vqsize = 0;
1276
            sqsize = 0;
1277
            if (is->audio_st)
1278
                aqsize = is->audioq.size;
1279
            if (is->video_st)
1280
                vqsize = is->videoq.size;
1281
            if (is->subtitle_st)
1282
                sqsize = is->subtitleq.size;
1283
            av_diff = 0;
1284
            if (is->audio_st && is->video_st)
1285
                av_diff = get_audio_clock(is) - get_video_clock(is);
1286
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1287
                   get_master_clock(is),
1288
                   av_diff,
1289
                   FFMAX(is->skip_frames-1, 0),
1290
                   aqsize / 1024,
1291
                   vqsize / 1024,
1292
                   sqsize,
1293
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1294
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1295
            fflush(stdout);
1296
            last_time = cur_time;
1297
        }
1298
    }
1299
}
1300

    
1301
static void stream_close(VideoState *is)
1302
{
1303
    VideoPicture *vp;
1304
    int i;
1305
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1306
    is->abort_request = 1;
1307
    SDL_WaitThread(is->parse_tid, NULL);
1308
    SDL_WaitThread(is->refresh_tid, NULL);
1309

    
1310
    /* free all pictures */
1311
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1312
        vp = &is->pictq[i];
1313
#if CONFIG_AVFILTER
1314
        if (vp->picref) {
1315
            avfilter_unref_buffer(vp->picref);
1316
            vp->picref = NULL;
1317
        }
1318
#endif
1319
        if (vp->bmp) {
1320
            SDL_FreeYUVOverlay(vp->bmp);
1321
            vp->bmp = NULL;
1322
        }
1323
    }
1324
    SDL_DestroyMutex(is->pictq_mutex);
1325
    SDL_DestroyCond(is->pictq_cond);
1326
    SDL_DestroyMutex(is->subpq_mutex);
1327
    SDL_DestroyCond(is->subpq_cond);
1328
#if !CONFIG_AVFILTER
1329
    if (is->img_convert_ctx)
1330
        sws_freeContext(is->img_convert_ctx);
1331
#endif
1332
    av_free(is);
1333
}
1334

    
1335
static void do_exit(void)
1336
{
1337
    if (cur_stream) {
1338
        stream_close(cur_stream);
1339
        cur_stream = NULL;
1340
    }
1341
    uninit_opts();
1342
#if CONFIG_AVFILTER
1343
    avfilter_uninit();
1344
#endif
1345
    if (show_status)
1346
        printf("\n");
1347
    SDL_Quit();
1348
    av_log(NULL, AV_LOG_QUIET, "");
1349
    exit(0);
1350
}
1351

    
1352
/* allocate a picture (needs to do that in main thread to avoid
1353
   potential locking problems */
1354
static void alloc_picture(void *opaque)
1355
{
1356
    VideoState *is = opaque;
1357
    VideoPicture *vp;
1358

    
1359
    vp = &is->pictq[is->pictq_windex];
1360

    
1361
    if (vp->bmp)
1362
        SDL_FreeYUVOverlay(vp->bmp);
1363

    
1364
#if CONFIG_AVFILTER
1365
    if (vp->picref)
1366
        avfilter_unref_buffer(vp->picref);
1367
    vp->picref = NULL;
1368

    
1369
    vp->width   = is->out_video_filter->inputs[0]->w;
1370
    vp->height  = is->out_video_filter->inputs[0]->h;
1371
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1372
#else
1373
    vp->width   = is->video_st->codec->width;
1374
    vp->height  = is->video_st->codec->height;
1375
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1376
#endif
1377

    
1378
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1379
                                   SDL_YV12_OVERLAY,
1380
                                   screen);
1381
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1382
        /* SDL allocates a buffer smaller than requested if the video
1383
         * overlay hardware is unable to support the requested size. */
1384
        fprintf(stderr, "Error: the video system does not support an image\n"
1385
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1386
                        "to reduce the image size.\n", vp->width, vp->height );
1387
        do_exit();
1388
    }
1389

    
1390
    SDL_LockMutex(is->pictq_mutex);
1391
    vp->allocated = 1;
1392
    SDL_CondSignal(is->pictq_cond);
1393
    SDL_UnlockMutex(is->pictq_mutex);
1394
}
1395

    
1396
/**
1397
 *
1398
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1399
 */
1400
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1401
{
1402
    VideoPicture *vp;
1403
    int dst_pix_fmt;
1404
#if CONFIG_AVFILTER
1405
    AVPicture pict_src;
1406
#endif
1407
    /* wait until we have space to put a new picture */
1408
    SDL_LockMutex(is->pictq_mutex);
1409

    
1410
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1411
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1412

    
1413
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1414
           !is->videoq.abort_request) {
1415
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1416
    }
1417
    SDL_UnlockMutex(is->pictq_mutex);
1418

    
1419
    if (is->videoq.abort_request)
1420
        return -1;
1421

    
1422
    vp = &is->pictq[is->pictq_windex];
1423

    
1424
    /* alloc or resize hardware picture buffer */
1425
    if (!vp->bmp ||
1426
#if CONFIG_AVFILTER
1427
        vp->width  != is->out_video_filter->inputs[0]->w ||
1428
        vp->height != is->out_video_filter->inputs[0]->h) {
1429
#else
1430
        vp->width != is->video_st->codec->width ||
1431
        vp->height != is->video_st->codec->height) {
1432
#endif
1433
        SDL_Event event;
1434

    
1435
        vp->allocated = 0;
1436

    
1437
        /* the allocation must be done in the main thread to avoid
1438
           locking problems */
1439
        event.type = FF_ALLOC_EVENT;
1440
        event.user.data1 = is;
1441
        SDL_PushEvent(&event);
1442

    
1443
        /* wait until the picture is allocated */
1444
        SDL_LockMutex(is->pictq_mutex);
1445
        while (!vp->allocated && !is->videoq.abort_request) {
1446
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1447
        }
1448
        SDL_UnlockMutex(is->pictq_mutex);
1449

    
1450
        if (is->videoq.abort_request)
1451
            return -1;
1452
    }
1453

    
1454
    /* if the frame is not skipped, then display it */
1455
    if (vp->bmp) {
1456
        AVPicture pict;
1457
#if CONFIG_AVFILTER
1458
        if(vp->picref)
1459
            avfilter_unref_buffer(vp->picref);
1460
        vp->picref = src_frame->opaque;
1461
#endif
1462

    
1463
        /* get a pointer on the bitmap */
1464
        SDL_LockYUVOverlay (vp->bmp);
1465

    
1466
        dst_pix_fmt = PIX_FMT_YUV420P;
1467
        memset(&pict,0,sizeof(AVPicture));
1468
        pict.data[0] = vp->bmp->pixels[0];
1469
        pict.data[1] = vp->bmp->pixels[2];
1470
        pict.data[2] = vp->bmp->pixels[1];
1471

    
1472
        pict.linesize[0] = vp->bmp->pitches[0];
1473
        pict.linesize[1] = vp->bmp->pitches[2];
1474
        pict.linesize[2] = vp->bmp->pitches[1];
1475

    
1476
#if CONFIG_AVFILTER
1477
        pict_src.data[0] = src_frame->data[0];
1478
        pict_src.data[1] = src_frame->data[1];
1479
        pict_src.data[2] = src_frame->data[2];
1480

    
1481
        pict_src.linesize[0] = src_frame->linesize[0];
1482
        pict_src.linesize[1] = src_frame->linesize[1];
1483
        pict_src.linesize[2] = src_frame->linesize[2];
1484

    
1485
        //FIXME use direct rendering
1486
        av_picture_copy(&pict, &pict_src,
1487
                        vp->pix_fmt, vp->width, vp->height);
1488
#else
1489
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1490
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1491
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1492
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1493
        if (is->img_convert_ctx == NULL) {
1494
            fprintf(stderr, "Cannot initialize the conversion context\n");
1495
            exit(1);
1496
        }
1497
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1498
                  0, vp->height, pict.data, pict.linesize);
1499
#endif
1500
        /* update the bitmap content */
1501
        SDL_UnlockYUVOverlay(vp->bmp);
1502

    
1503
        vp->pts = pts;
1504
        vp->pos = pos;
1505

    
1506
        /* now we can update the picture count */
1507
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1508
            is->pictq_windex = 0;
1509
        SDL_LockMutex(is->pictq_mutex);
1510
        vp->target_clock= compute_target_time(vp->pts, is);
1511

    
1512
        is->pictq_size++;
1513
        SDL_UnlockMutex(is->pictq_mutex);
1514
    }
1515
    return 0;
1516
}
1517

    
1518
/**
1519
 * compute the exact PTS for the picture if it is omitted in the stream
1520
 * @param pts1 the dts of the pkt / pts of the frame
1521
 */
1522
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1523
{
1524
    double frame_delay, pts;
1525

    
1526
    pts = pts1;
1527

    
1528
    if (pts != 0) {
1529
        /* update video clock with pts, if present */
1530
        is->video_clock = pts;
1531
    } else {
1532
        pts = is->video_clock;
1533
    }
1534
    /* update video clock for next frame */
1535
    frame_delay = av_q2d(is->video_st->codec->time_base);
1536
    /* for MPEG2, the frame can be repeated, so we update the
1537
       clock accordingly */
1538
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1539
    is->video_clock += frame_delay;
1540

    
1541
#if defined(DEBUG_SYNC) && 0
1542
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1543
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1544
#endif
1545
    return queue_picture(is, src_frame, pts, pos);
1546
}
1547

    
1548
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1549
{
1550
    int len1, got_picture, i;
1551

    
1552
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1553
        return -1;
1554

    
1555
    if (pkt->data == flush_pkt.data) {
1556
        avcodec_flush_buffers(is->video_st->codec);
1557

    
1558
        SDL_LockMutex(is->pictq_mutex);
1559
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1560
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1561
            is->pictq[i].target_clock= 0;
1562
        }
1563
        while (is->pictq_size && !is->videoq.abort_request) {
1564
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1565
        }
1566
        is->video_current_pos = -1;
1567
        SDL_UnlockMutex(is->pictq_mutex);
1568

    
1569
        is->frame_last_pts = AV_NOPTS_VALUE;
1570
        is->frame_last_delay = 0;
1571
        is->frame_timer = (double)av_gettime() / 1000000.0;
1572
        is->skip_frames = 1;
1573
        is->skip_frames_index = 0;
1574
        return 0;
1575
    }
1576

    
1577
    len1 = avcodec_decode_video2(is->video_st->codec,
1578
                                 frame, &got_picture,
1579
                                 pkt);
1580

    
1581
    if (got_picture) {
1582
        if (decoder_reorder_pts == -1) {
1583
            *pts = frame->best_effort_timestamp;
1584
        } else if (decoder_reorder_pts) {
1585
            *pts = frame->pkt_pts;
1586
        } else {
1587
            *pts = frame->pkt_dts;
1588
        }
1589

    
1590
        if (*pts == AV_NOPTS_VALUE) {
1591
            *pts = 0;
1592
        }
1593

    
1594
        is->skip_frames_index += 1;
1595
        if(is->skip_frames_index >= is->skip_frames){
1596
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1597
            return 1;
1598
        }
1599

    
1600
    }
1601
    return 0;
1602
}
1603

    
1604
#if CONFIG_AVFILTER
1605
typedef struct {
1606
    VideoState *is;
1607
    AVFrame *frame;
1608
    int use_dr1;
1609
} FilterPriv;
1610

    
1611
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1612
{
1613
    AVFilterContext *ctx = codec->opaque;
1614
    AVFilterBufferRef  *ref;
1615
    int perms = AV_PERM_WRITE;
1616
    int i, w, h, stride[4];
1617
    unsigned edge;
1618

    
1619
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1620
        perms |= AV_PERM_NEG_LINESIZES;
1621

    
1622
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1623
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1624
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1625
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1626
    }
1627
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1628

    
1629
    w = codec->width;
1630
    h = codec->height;
1631
    avcodec_align_dimensions2(codec, &w, &h, stride);
1632
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1633
    w += edge << 1;
1634
    h += edge << 1;
1635

    
1636
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1637
        return -1;
1638

    
1639
    ref->video->w = codec->width;
1640
    ref->video->h = codec->height;
1641
    for(i = 0; i < 4; i ++) {
1642
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1643
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1644

    
1645
        if (ref->data[i]) {
1646
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1647
        }
1648
        pic->data[i]     = ref->data[i];
1649
        pic->linesize[i] = ref->linesize[i];
1650
    }
1651
    pic->opaque = ref;
1652
    pic->age    = INT_MAX;
1653
    pic->type   = FF_BUFFER_TYPE_USER;
1654
    pic->reordered_opaque = codec->reordered_opaque;
1655
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1656
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1657
    return 0;
1658
}
1659

    
1660
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1661
{
1662
    memset(pic->data, 0, sizeof(pic->data));
1663
    avfilter_unref_buffer(pic->opaque);
1664
}
1665

    
1666
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1667
{
1668
    AVFilterBufferRef *ref = pic->opaque;
1669

    
1670
    if (pic->data[0] == NULL) {
1671
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1672
        return codec->get_buffer(codec, pic);
1673
    }
1674

    
1675
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1676
        (codec->pix_fmt != ref->format)) {
1677
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1678
        return -1;
1679
    }
1680

    
1681
    pic->reordered_opaque = codec->reordered_opaque;
1682
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1683
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1684
    return 0;
1685
}
1686

    
1687
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1688
{
1689
    FilterPriv *priv = ctx->priv;
1690
    AVCodecContext *codec;
1691
    if(!opaque) return -1;
1692

    
1693
    priv->is = opaque;
1694
    codec    = priv->is->video_st->codec;
1695
    codec->opaque = ctx;
1696
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1697
        priv->use_dr1 = 1;
1698
        codec->get_buffer     = input_get_buffer;
1699
        codec->release_buffer = input_release_buffer;
1700
        codec->reget_buffer   = input_reget_buffer;
1701
        codec->thread_safe_callbacks = 1;
1702
    }
1703

    
1704
    priv->frame = avcodec_alloc_frame();
1705

    
1706
    return 0;
1707
}
1708

    
1709
static void input_uninit(AVFilterContext *ctx)
1710
{
1711
    FilterPriv *priv = ctx->priv;
1712
    av_free(priv->frame);
1713
}
1714

    
1715
static int input_request_frame(AVFilterLink *link)
1716
{
1717
    FilterPriv *priv = link->src->priv;
1718
    AVFilterBufferRef *picref;
1719
    int64_t pts = 0;
1720
    AVPacket pkt;
1721
    int ret;
1722

    
1723
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1724
        av_free_packet(&pkt);
1725
    if (ret < 0)
1726
        return -1;
1727

    
1728
    if(priv->use_dr1) {
1729
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1730
    } else {
1731
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1732
        av_image_copy(picref->data, picref->linesize,
1733
                      priv->frame->data, priv->frame->linesize,
1734
                      picref->format, link->w, link->h);
1735
    }
1736
    av_free_packet(&pkt);
1737

    
1738
    picref->pts = pts;
1739
    picref->pos = pkt.pos;
1740
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1741
    avfilter_start_frame(link, picref);
1742
    avfilter_draw_slice(link, 0, link->h, 1);
1743
    avfilter_end_frame(link);
1744

    
1745
    return 0;
1746
}
1747

    
1748
static int input_query_formats(AVFilterContext *ctx)
1749
{
1750
    FilterPriv *priv = ctx->priv;
1751
    enum PixelFormat pix_fmts[] = {
1752
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1753
    };
1754

    
1755
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1756
    return 0;
1757
}
1758

    
1759
static int input_config_props(AVFilterLink *link)
1760
{
1761
    FilterPriv *priv  = link->src->priv;
1762
    AVCodecContext *c = priv->is->video_st->codec;
1763

    
1764
    link->w = c->width;
1765
    link->h = c->height;
1766
    link->time_base = priv->is->video_st->time_base;
1767

    
1768
    return 0;
1769
}
1770

    
1771
static AVFilter input_filter =
1772
{
1773
    .name      = "ffplay_input",
1774

    
1775
    .priv_size = sizeof(FilterPriv),
1776

    
1777
    .init      = input_init,
1778
    .uninit    = input_uninit,
1779

    
1780
    .query_formats = input_query_formats,
1781

    
1782
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1783
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1784
                                    .type = AVMEDIA_TYPE_VIDEO,
1785
                                    .request_frame = input_request_frame,
1786
                                    .config_props  = input_config_props, },
1787
                                  { .name = NULL }},
1788
};
1789

    
1790
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1791
{
1792
    char sws_flags_str[128];
1793
    int ret;
1794
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1795
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1796
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1797
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1798

    
1799
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1800
                                            NULL, is, graph)) < 0)
1801
        goto the_end;
1802
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1803
                                            NULL, &ffsink_ctx, graph)) < 0)
1804
        goto the_end;
1805

    
1806
    if(vfilters) {
1807
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1808
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1809

    
1810
        outputs->name    = av_strdup("in");
1811
        outputs->filter_ctx = filt_src;
1812
        outputs->pad_idx = 0;
1813
        outputs->next    = NULL;
1814

    
1815
        inputs->name    = av_strdup("out");
1816
        inputs->filter_ctx = filt_out;
1817
        inputs->pad_idx = 0;
1818
        inputs->next    = NULL;
1819

    
1820
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1821
            goto the_end;
1822
        av_freep(&vfilters);
1823
    } else {
1824
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1825
            goto the_end;
1826
    }
1827

    
1828
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1829
        goto the_end;
1830

    
1831
    is->out_video_filter = filt_out;
1832
the_end:
1833
    return ret;
1834
}
1835

    
1836
#endif  /* CONFIG_AVFILTER */
1837

    
1838
static int video_thread(void *arg)
1839
{
1840
    VideoState *is = arg;
1841
    AVFrame *frame= avcodec_alloc_frame();
1842
    int64_t pts_int;
1843
    double pts;
1844
    int ret;
1845

    
1846
#if CONFIG_AVFILTER
1847
    AVFilterGraph *graph = avfilter_graph_alloc();
1848
    AVFilterContext *filt_out = NULL;
1849
    int64_t pos;
1850

    
1851
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1852
        goto the_end;
1853
    filt_out = is->out_video_filter;
1854
#endif
1855

    
1856
    for(;;) {
1857
#if !CONFIG_AVFILTER
1858
        AVPacket pkt;
1859
#else
1860
        AVFilterBufferRef *picref;
1861
        AVRational tb;
1862
#endif
1863
        while (is->paused && !is->videoq.abort_request)
1864
            SDL_Delay(10);
1865
#if CONFIG_AVFILTER
1866
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1867
        if (picref) {
1868
            pts_int = picref->pts;
1869
            pos     = picref->pos;
1870
            frame->opaque = picref;
1871
        }
1872

    
1873
        if (av_cmp_q(tb, is->video_st->time_base)) {
1874
            av_unused int64_t pts1 = pts_int;
1875
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1876
            av_dlog(NULL, "video_thread(): "
1877
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1878
                    tb.num, tb.den, pts1,
1879
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1880
        }
1881
#else
1882
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1883
#endif
1884

    
1885
        if (ret < 0) goto the_end;
1886

    
1887
        if (!ret)
1888
            continue;
1889

    
1890
        pts = pts_int*av_q2d(is->video_st->time_base);
1891

    
1892
#if CONFIG_AVFILTER
1893
        ret = output_picture2(is, frame, pts, pos);
1894
#else
1895
        ret = output_picture2(is, frame, pts,  pkt.pos);
1896
        av_free_packet(&pkt);
1897
#endif
1898
        if (ret < 0)
1899
            goto the_end;
1900

    
1901
        if (step)
1902
            if (cur_stream)
1903
                stream_pause(cur_stream);
1904
    }
1905
 the_end:
1906
#if CONFIG_AVFILTER
1907
    avfilter_graph_free(&graph);
1908
#endif
1909
    av_free(frame);
1910
    return 0;
1911
}
1912

    
1913
static int subtitle_thread(void *arg)
1914
{
1915
    VideoState *is = arg;
1916
    SubPicture *sp;
1917
    AVPacket pkt1, *pkt = &pkt1;
1918
    int len1, got_subtitle;
1919
    double pts;
1920
    int i, j;
1921
    int r, g, b, y, u, v, a;
1922

    
1923
    for(;;) {
1924
        while (is->paused && !is->subtitleq.abort_request) {
1925
            SDL_Delay(10);
1926
        }
1927
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1928
            break;
1929

    
1930
        if(pkt->data == flush_pkt.data){
1931
            avcodec_flush_buffers(is->subtitle_st->codec);
1932
            continue;
1933
        }
1934
        SDL_LockMutex(is->subpq_mutex);
1935
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1936
               !is->subtitleq.abort_request) {
1937
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1938
        }
1939
        SDL_UnlockMutex(is->subpq_mutex);
1940

    
1941
        if (is->subtitleq.abort_request)
1942
            goto the_end;
1943

    
1944
        sp = &is->subpq[is->subpq_windex];
1945

    
1946
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1947
           this packet, if any */
1948
        pts = 0;
1949
        if (pkt->pts != AV_NOPTS_VALUE)
1950
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1951

    
1952
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1953
                                    &sp->sub, &got_subtitle,
1954
                                    pkt);
1955
//            if (len1 < 0)
1956
//                break;
1957
        if (got_subtitle && sp->sub.format == 0) {
1958
            sp->pts = pts;
1959

    
1960
            for (i = 0; i < sp->sub.num_rects; i++)
1961
            {
1962
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1963
                {
1964
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1965
                    y = RGB_TO_Y_CCIR(r, g, b);
1966
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1967
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1968
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1969
                }
1970
            }
1971

    
1972
            /* now we can update the picture count */
1973
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1974
                is->subpq_windex = 0;
1975
            SDL_LockMutex(is->subpq_mutex);
1976
            is->subpq_size++;
1977
            SDL_UnlockMutex(is->subpq_mutex);
1978
        }
1979
        av_free_packet(pkt);
1980
//        if (step)
1981
//            if (cur_stream)
1982
//                stream_pause(cur_stream);
1983
    }
1984
 the_end:
1985
    return 0;
1986
}
1987

    
1988
/* copy samples for viewing in editor window */
1989
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1990
{
1991
    int size, len, channels;
1992

    
1993
    channels = is->audio_st->codec->channels;
1994

    
1995
    size = samples_size / sizeof(short);
1996
    while (size > 0) {
1997
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1998
        if (len > size)
1999
            len = size;
2000
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2001
        samples += len;
2002
        is->sample_array_index += len;
2003
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
2004
            is->sample_array_index = 0;
2005
        size -= len;
2006
    }
2007
}
2008

    
2009
/* return the new audio buffer size (samples can be added or deleted
2010
   to get better sync if video or external master clock) */
2011
static int synchronize_audio(VideoState *is, short *samples,
2012
                             int samples_size1, double pts)
2013
{
2014
    int n, samples_size;
2015
    double ref_clock;
2016

    
2017
    n = 2 * is->audio_st->codec->channels;
2018
    samples_size = samples_size1;
2019

    
2020
    /* if not master, then we try to remove or add samples to correct the clock */
2021
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2022
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2023
        double diff, avg_diff;
2024
        int wanted_size, min_size, max_size, nb_samples;
2025

    
2026
        ref_clock = get_master_clock(is);
2027
        diff = get_audio_clock(is) - ref_clock;
2028

    
2029
        if (diff < AV_NOSYNC_THRESHOLD) {
2030
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2031
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2032
                /* not enough measures to have a correct estimate */
2033
                is->audio_diff_avg_count++;
2034
            } else {
2035
                /* estimate the A-V difference */
2036
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2037

    
2038
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2039
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2040
                    nb_samples = samples_size / n;
2041

    
2042
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2043
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2044
                    if (wanted_size < min_size)
2045
                        wanted_size = min_size;
2046
                    else if (wanted_size > max_size)
2047
                        wanted_size = max_size;
2048

    
2049
                    /* add or remove samples to correction the synchro */
2050
                    if (wanted_size < samples_size) {
2051
                        /* remove samples */
2052
                        samples_size = wanted_size;
2053
                    } else if (wanted_size > samples_size) {
2054
                        uint8_t *samples_end, *q;
2055
                        int nb;
2056

    
2057
                        /* add samples */
2058
                        nb = (samples_size - wanted_size);
2059
                        samples_end = (uint8_t *)samples + samples_size - n;
2060
                        q = samples_end + n;
2061
                        while (nb > 0) {
2062
                            memcpy(q, samples_end, n);
2063
                            q += n;
2064
                            nb -= n;
2065
                        }
2066
                        samples_size = wanted_size;
2067
                    }
2068
                }
2069
#if 0
2070
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2071
                       diff, avg_diff, samples_size - samples_size1,
2072
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2073
#endif
2074
            }
2075
        } else {
2076
            /* too big difference : may be initial PTS errors, so
2077
               reset A-V filter */
2078
            is->audio_diff_avg_count = 0;
2079
            is->audio_diff_cum = 0;
2080
        }
2081
    }
2082

    
2083
    return samples_size;
2084
}
2085

    
2086
/* decode one audio frame and returns its uncompressed size */
2087
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2088
{
2089
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2090
    AVPacket *pkt = &is->audio_pkt;
2091
    AVCodecContext *dec= is->audio_st->codec;
2092
    int n, len1, data_size;
2093
    double pts;
2094

    
2095
    for(;;) {
2096
        /* NOTE: the audio packet can contain several frames */
2097
        while (pkt_temp->size > 0) {
2098
            data_size = sizeof(is->audio_buf1);
2099
            len1 = avcodec_decode_audio3(dec,
2100
                                        (int16_t *)is->audio_buf1, &data_size,
2101
                                        pkt_temp);
2102
            if (len1 < 0) {
2103
                /* if error, we skip the frame */
2104
                pkt_temp->size = 0;
2105
                break;
2106
            }
2107

    
2108
            pkt_temp->data += len1;
2109
            pkt_temp->size -= len1;
2110
            if (data_size <= 0)
2111
                continue;
2112

    
2113
            if (dec->sample_fmt != is->audio_src_fmt) {
2114
                if (is->reformat_ctx)
2115
                    av_audio_convert_free(is->reformat_ctx);
2116
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2117
                                                         dec->sample_fmt, 1, NULL, 0);
2118
                if (!is->reformat_ctx) {
2119
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2120
                        av_get_sample_fmt_name(dec->sample_fmt),
2121
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2122
                        break;
2123
                }
2124
                is->audio_src_fmt= dec->sample_fmt;
2125
            }
2126

    
2127
            if (is->reformat_ctx) {
2128
                const void *ibuf[6]= {is->audio_buf1};
2129
                void *obuf[6]= {is->audio_buf2};
2130
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2131
                int ostride[6]= {2};
2132
                int len= data_size/istride[0];
2133
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2134
                    printf("av_audio_convert() failed\n");
2135
                    break;
2136
                }
2137
                is->audio_buf= is->audio_buf2;
2138
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2139
                          remove this legacy cruft */
2140
                data_size= len*2;
2141
            }else{
2142
                is->audio_buf= is->audio_buf1;
2143
            }
2144

    
2145
            /* if no pts, then compute it */
2146
            pts = is->audio_clock;
2147
            *pts_ptr = pts;
2148
            n = 2 * dec->channels;
2149
            is->audio_clock += (double)data_size /
2150
                (double)(n * dec->sample_rate);
2151
#if defined(DEBUG_SYNC)
2152
            {
2153
                static double last_clock;
2154
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2155
                       is->audio_clock - last_clock,
2156
                       is->audio_clock, pts);
2157
                last_clock = is->audio_clock;
2158
            }
2159
#endif
2160
            return data_size;
2161
        }
2162

    
2163
        /* free the current packet */
2164
        if (pkt->data)
2165
            av_free_packet(pkt);
2166

    
2167
        if (is->paused || is->audioq.abort_request) {
2168
            return -1;
2169
        }
2170

    
2171
        /* read next packet */
2172
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2173
            return -1;
2174
        if(pkt->data == flush_pkt.data){
2175
            avcodec_flush_buffers(dec);
2176
            continue;
2177
        }
2178

    
2179
        pkt_temp->data = pkt->data;
2180
        pkt_temp->size = pkt->size;
2181

    
2182
        /* if update the audio clock with the pts */
2183
        if (pkt->pts != AV_NOPTS_VALUE) {
2184
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2185
        }
2186
    }
2187
}
2188

    
2189
/* get the current audio output buffer size, in samples. With SDL, we
2190
   cannot have a precise information */
2191
static int audio_write_get_buf_size(VideoState *is)
2192
{
2193
    return is->audio_buf_size - is->audio_buf_index;
2194
}
2195

    
2196

    
2197
/* prepare a new audio buffer */
2198
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2199
{
2200
    VideoState *is = opaque;
2201
    int audio_size, len1;
2202
    double pts;
2203

    
2204
    audio_callback_time = av_gettime();
2205

    
2206
    while (len > 0) {
2207
        if (is->audio_buf_index >= is->audio_buf_size) {
2208
           audio_size = audio_decode_frame(is, &pts);
2209
           if (audio_size < 0) {
2210
                /* if error, just output silence */
2211
               is->audio_buf = is->audio_buf1;
2212
               is->audio_buf_size = 1024;
2213
               memset(is->audio_buf, 0, is->audio_buf_size);
2214
           } else {
2215
               if (is->show_audio)
2216
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2217
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2218
                                              pts);
2219
               is->audio_buf_size = audio_size;
2220
           }
2221
           is->audio_buf_index = 0;
2222
        }
2223
        len1 = is->audio_buf_size - is->audio_buf_index;
2224
        if (len1 > len)
2225
            len1 = len;
2226
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2227
        len -= len1;
2228
        stream += len1;
2229
        is->audio_buf_index += len1;
2230
    }
2231
}
2232

    
2233
/* open a given stream. Return 0 if OK */
2234
static int stream_component_open(VideoState *is, int stream_index)
2235
{
2236
    AVFormatContext *ic = is->ic;
2237
    AVCodecContext *avctx;
2238
    AVCodec *codec;
2239
    SDL_AudioSpec wanted_spec, spec;
2240

    
2241
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2242
        return -1;
2243
    avctx = ic->streams[stream_index]->codec;
2244

    
2245
    /* prepare audio output */
2246
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2247
        if (avctx->channels > 0) {
2248
            avctx->request_channels = FFMIN(2, avctx->channels);
2249
        } else {
2250
            avctx->request_channels = 2;
2251
        }
2252
    }
2253

    
2254
    codec = avcodec_find_decoder(avctx->codec_id);
2255
    avctx->debug_mv = debug_mv;
2256
    avctx->debug = debug;
2257
    avctx->workaround_bugs = workaround_bugs;
2258
    avctx->lowres = lowres;
2259
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2260
    avctx->idct_algo= idct;
2261
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2262
    avctx->skip_frame= skip_frame;
2263
    avctx->skip_idct= skip_idct;
2264
    avctx->skip_loop_filter= skip_loop_filter;
2265
    avctx->error_recognition= error_recognition;
2266
    avctx->error_concealment= error_concealment;
2267
    avctx->thread_count= thread_count;
2268

    
2269
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2270

    
2271
    if (!codec ||
2272
        avcodec_open(avctx, codec) < 0)
2273
        return -1;
2274

    
2275
    /* prepare audio output */
2276
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2277
        wanted_spec.freq = avctx->sample_rate;
2278
        wanted_spec.format = AUDIO_S16SYS;
2279
        wanted_spec.channels = avctx->channels;
2280
        wanted_spec.silence = 0;
2281
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2282
        wanted_spec.callback = sdl_audio_callback;
2283
        wanted_spec.userdata = is;
2284
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2285
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2286
            return -1;
2287
        }
2288
        is->audio_hw_buf_size = spec.size;
2289
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2290
    }
2291

    
2292
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2293
    switch(avctx->codec_type) {
2294
    case AVMEDIA_TYPE_AUDIO:
2295
        is->audio_stream = stream_index;
2296
        is->audio_st = ic->streams[stream_index];
2297
        is->audio_buf_size = 0;
2298
        is->audio_buf_index = 0;
2299

    
2300
        /* init averaging filter */
2301
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2302
        is->audio_diff_avg_count = 0;
2303
        /* since we do not have a precise anough audio fifo fullness,
2304
           we correct audio sync only if larger than this threshold */
2305
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2306

    
2307
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2308
        packet_queue_init(&is->audioq);
2309
        SDL_PauseAudio(0);
2310
        break;
2311
    case AVMEDIA_TYPE_VIDEO:
2312
        is->video_stream = stream_index;
2313
        is->video_st = ic->streams[stream_index];
2314

    
2315
//        is->video_current_pts_time = av_gettime();
2316

    
2317
        packet_queue_init(&is->videoq);
2318
        is->video_tid = SDL_CreateThread(video_thread, is);
2319
        break;
2320
    case AVMEDIA_TYPE_SUBTITLE:
2321
        is->subtitle_stream = stream_index;
2322
        is->subtitle_st = ic->streams[stream_index];
2323
        packet_queue_init(&is->subtitleq);
2324

    
2325
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2326
        break;
2327
    default:
2328
        break;
2329
    }
2330
    return 0;
2331
}
2332

    
2333
static void stream_component_close(VideoState *is, int stream_index)
2334
{
2335
    AVFormatContext *ic = is->ic;
2336
    AVCodecContext *avctx;
2337

    
2338
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2339
        return;
2340
    avctx = ic->streams[stream_index]->codec;
2341

    
2342
    switch(avctx->codec_type) {
2343
    case AVMEDIA_TYPE_AUDIO:
2344
        packet_queue_abort(&is->audioq);
2345

    
2346
        SDL_CloseAudio();
2347

    
2348
        packet_queue_end(&is->audioq);
2349
        if (is->reformat_ctx)
2350
            av_audio_convert_free(is->reformat_ctx);
2351
        is->reformat_ctx = NULL;
2352
        break;
2353
    case AVMEDIA_TYPE_VIDEO:
2354
        packet_queue_abort(&is->videoq);
2355

    
2356
        /* note: we also signal this mutex to make sure we deblock the
2357
           video thread in all cases */
2358
        SDL_LockMutex(is->pictq_mutex);
2359
        SDL_CondSignal(is->pictq_cond);
2360
        SDL_UnlockMutex(is->pictq_mutex);
2361

    
2362
        SDL_WaitThread(is->video_tid, NULL);
2363

    
2364
        packet_queue_end(&is->videoq);
2365
        break;
2366
    case AVMEDIA_TYPE_SUBTITLE:
2367
        packet_queue_abort(&is->subtitleq);
2368

    
2369
        /* note: we also signal this mutex to make sure we deblock the
2370
           video thread in all cases */
2371
        SDL_LockMutex(is->subpq_mutex);
2372
        is->subtitle_stream_changed = 1;
2373

    
2374
        SDL_CondSignal(is->subpq_cond);
2375
        SDL_UnlockMutex(is->subpq_mutex);
2376

    
2377
        SDL_WaitThread(is->subtitle_tid, NULL);
2378

    
2379
        packet_queue_end(&is->subtitleq);
2380
        break;
2381
    default:
2382
        break;
2383
    }
2384

    
2385
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2386
    avcodec_close(avctx);
2387
    switch(avctx->codec_type) {
2388
    case AVMEDIA_TYPE_AUDIO:
2389
        is->audio_st = NULL;
2390
        is->audio_stream = -1;
2391
        break;
2392
    case AVMEDIA_TYPE_VIDEO:
2393
        is->video_st = NULL;
2394
        is->video_stream = -1;
2395
        break;
2396
    case AVMEDIA_TYPE_SUBTITLE:
2397
        is->subtitle_st = NULL;
2398
        is->subtitle_stream = -1;
2399
        break;
2400
    default:
2401
        break;
2402
    }
2403
}
2404

    
2405
/* since we have only one decoding thread, we can use a global
2406
   variable instead of a thread local variable */
2407
static VideoState *global_video_state;
2408

    
2409
static int decode_interrupt_cb(void)
2410
{
2411
    return (global_video_state && global_video_state->abort_request);
2412
}
2413

    
2414
/* this thread gets the stream from the disk or the network */
2415
static int decode_thread(void *arg)
2416
{
2417
    VideoState *is = arg;
2418
    AVFormatContext *ic;
2419
    int err, i, ret;
2420
    int st_index[AVMEDIA_TYPE_NB];
2421
    AVPacket pkt1, *pkt = &pkt1;
2422
    AVFormatParameters params, *ap = &params;
2423
    int eof=0;
2424
    int pkt_in_play_range = 0;
2425

    
2426
    ic = avformat_alloc_context();
2427

    
2428
    memset(st_index, -1, sizeof(st_index));
2429
    is->video_stream = -1;
2430
    is->audio_stream = -1;
2431
    is->subtitle_stream = -1;
2432

    
2433
    global_video_state = is;
2434
    url_set_interrupt_cb(decode_interrupt_cb);
2435

    
2436
    memset(ap, 0, sizeof(*ap));
2437

    
2438
    ap->prealloced_context = 1;
2439
    ap->width = frame_width;
2440
    ap->height= frame_height;
2441
    ap->time_base= (AVRational){1, 25};
2442
    ap->pix_fmt = frame_pix_fmt;
2443

    
2444
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2445

    
2446
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2447
    if (err < 0) {
2448
        print_error(is->filename, err);
2449
        ret = -1;
2450
        goto fail;
2451
    }
2452
    is->ic = ic;
2453

    
2454
    if(genpts)
2455
        ic->flags |= AVFMT_FLAG_GENPTS;
2456

    
2457
    err = av_find_stream_info(ic);
2458
    if (err < 0) {
2459
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2460
        ret = -1;
2461
        goto fail;
2462
    }
2463
    if(ic->pb)
2464
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2465

    
2466
    if(seek_by_bytes<0)
2467
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2468

    
2469
    /* if seeking requested, we execute it */
2470
    if (start_time != AV_NOPTS_VALUE) {
2471
        int64_t timestamp;
2472

    
2473
        timestamp = start_time;
2474
        /* add the stream start time */
2475
        if (ic->start_time != AV_NOPTS_VALUE)
2476
            timestamp += ic->start_time;
2477
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2478
        if (ret < 0) {
2479
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2480
                    is->filename, (double)timestamp / AV_TIME_BASE);
2481
        }
2482
    }
2483

    
2484
    for (i = 0; i < ic->nb_streams; i++)
2485
        ic->streams[i]->discard = AVDISCARD_ALL;
2486
    if (!video_disable)
2487
        st_index[AVMEDIA_TYPE_VIDEO] =
2488
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2489
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2490
    if (!audio_disable)
2491
        st_index[AVMEDIA_TYPE_AUDIO] =
2492
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2493
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2494
                                st_index[AVMEDIA_TYPE_VIDEO],
2495
                                NULL, 0);
2496
    if (!video_disable)
2497
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2498
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2499
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2500
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2501
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2502
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2503
                                NULL, 0);
2504
    if (show_status) {
2505
        dump_format(ic, 0, is->filename, 0);
2506
    }
2507

    
2508
    /* open the streams */
2509
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2510
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2511
    }
2512

    
2513
    ret=-1;
2514
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2515
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2516
    }
2517
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2518
    if(ret<0) {
2519
        if (!display_disable)
2520
            is->show_audio = 2;
2521
    }
2522

    
2523
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2524
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2525
    }
2526

    
2527
    if (is->video_stream < 0 && is->audio_stream < 0) {
2528
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2529
        ret = -1;
2530
        goto fail;
2531
    }
2532

    
2533
    for(;;) {
2534
        if (is->abort_request)
2535
            break;
2536
        if (is->paused != is->last_paused) {
2537
            is->last_paused = is->paused;
2538
            if (is->paused)
2539
                is->read_pause_return= av_read_pause(ic);
2540
            else
2541
                av_read_play(ic);
2542
        }
2543
#if CONFIG_RTSP_DEMUXER
2544
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2545
            /* wait 10 ms to avoid trying to get another packet */
2546
            /* XXX: horrible */
2547
            SDL_Delay(10);
2548
            continue;
2549
        }
2550
#endif
2551
        if (is->seek_req) {
2552
            int64_t seek_target= is->seek_pos;
2553
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2554
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2555
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2556
//      of the seek_pos/seek_rel variables
2557

    
2558
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2559
            if (ret < 0) {
2560
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2561
            }else{
2562
                if (is->audio_stream >= 0) {
2563
                    packet_queue_flush(&is->audioq);
2564
                    packet_queue_put(&is->audioq, &flush_pkt);
2565
                }
2566
                if (is->subtitle_stream >= 0) {
2567
                    packet_queue_flush(&is->subtitleq);
2568
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2569
                }
2570
                if (is->video_stream >= 0) {
2571
                    packet_queue_flush(&is->videoq);
2572
                    packet_queue_put(&is->videoq, &flush_pkt);
2573
                }
2574
            }
2575
            is->seek_req = 0;
2576
            eof= 0;
2577
        }
2578

    
2579
        /* if the queue are full, no need to read more */
2580
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2581
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2582
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2583
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2584
            /* wait 10 ms */
2585
            SDL_Delay(10);
2586
            continue;
2587
        }
2588
        if(eof) {
2589
            if(is->video_stream >= 0){
2590
                av_init_packet(pkt);
2591
                pkt->data=NULL;
2592
                pkt->size=0;
2593
                pkt->stream_index= is->video_stream;
2594
                packet_queue_put(&is->videoq, pkt);
2595
            }
2596
            SDL_Delay(10);
2597
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2598
                if(loop!=1 && (!loop || --loop)){
2599
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2600
                }else if(autoexit){
2601
                    ret=AVERROR_EOF;
2602
                    goto fail;
2603
                }
2604
            }
2605
            continue;
2606
        }
2607
        ret = av_read_frame(ic, pkt);
2608
        if (ret < 0) {
2609
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2610
                eof=1;
2611
            if (url_ferror(ic->pb))
2612
                break;
2613
            SDL_Delay(100); /* wait for user event */
2614
            continue;
2615
        }
2616
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2617
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2618
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2619
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2620
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2621
                <= ((double)duration/1000000);
2622
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2623
            packet_queue_put(&is->audioq, pkt);
2624
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2625
            packet_queue_put(&is->videoq, pkt);
2626
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2627
            packet_queue_put(&is->subtitleq, pkt);
2628
        } else {
2629
            av_free_packet(pkt);
2630
        }
2631
    }
2632
    /* wait until the end */
2633
    while (!is->abort_request) {
2634
        SDL_Delay(100);
2635
    }
2636

    
2637
    ret = 0;
2638
 fail:
2639
    /* disable interrupting */
2640
    global_video_state = NULL;
2641

    
2642
    /* close each stream */
2643
    if (is->audio_stream >= 0)
2644
        stream_component_close(is, is->audio_stream);
2645
    if (is->video_stream >= 0)
2646
        stream_component_close(is, is->video_stream);
2647
    if (is->subtitle_stream >= 0)
2648
        stream_component_close(is, is->subtitle_stream);
2649
    if (is->ic) {
2650
        av_close_input_file(is->ic);
2651
        is->ic = NULL; /* safety */
2652
    }
2653
    url_set_interrupt_cb(NULL);
2654

    
2655
    if (ret != 0) {
2656
        SDL_Event event;
2657

    
2658
        event.type = FF_QUIT_EVENT;
2659
        event.user.data1 = is;
2660
        SDL_PushEvent(&event);
2661
    }
2662
    return 0;
2663
}
2664

    
2665
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2666
{
2667
    VideoState *is;
2668

    
2669
    is = av_mallocz(sizeof(VideoState));
2670
    if (!is)
2671
        return NULL;
2672
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2673
    is->iformat = iformat;
2674
    is->ytop = 0;
2675
    is->xleft = 0;
2676

    
2677
    /* start video display */
2678
    is->pictq_mutex = SDL_CreateMutex();
2679
    is->pictq_cond = SDL_CreateCond();
2680

    
2681
    is->subpq_mutex = SDL_CreateMutex();
2682
    is->subpq_cond = SDL_CreateCond();
2683

    
2684
    is->av_sync_type = av_sync_type;
2685
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2686
    if (!is->parse_tid) {
2687
        av_free(is);
2688
        return NULL;
2689
    }
2690
    return is;
2691
}
2692

    
2693
static void stream_cycle_channel(VideoState *is, int codec_type)
2694
{
2695
    AVFormatContext *ic = is->ic;
2696
    int start_index, stream_index;
2697
    AVStream *st;
2698

    
2699
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2700
        start_index = is->video_stream;
2701
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2702
        start_index = is->audio_stream;
2703
    else
2704
        start_index = is->subtitle_stream;
2705
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2706
        return;
2707
    stream_index = start_index;
2708
    for(;;) {
2709
        if (++stream_index >= is->ic->nb_streams)
2710
        {
2711
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2712
            {
2713
                stream_index = -1;
2714
                goto the_end;
2715
            } else
2716
                stream_index = 0;
2717
        }
2718
        if (stream_index == start_index)
2719
            return;
2720
        st = ic->streams[stream_index];
2721
        if (st->codec->codec_type == codec_type) {
2722
            /* check that parameters are OK */
2723
            switch(codec_type) {
2724
            case AVMEDIA_TYPE_AUDIO:
2725
                if (st->codec->sample_rate != 0 &&
2726
                    st->codec->channels != 0)
2727
                    goto the_end;
2728
                break;
2729
            case AVMEDIA_TYPE_VIDEO:
2730
            case AVMEDIA_TYPE_SUBTITLE:
2731
                goto the_end;
2732
            default:
2733
                break;
2734
            }
2735
        }
2736
    }
2737
 the_end:
2738
    stream_component_close(is, start_index);
2739
    stream_component_open(is, stream_index);
2740
}
2741

    
2742

    
2743
static void toggle_full_screen(void)
2744
{
2745
    is_full_screen = !is_full_screen;
2746
    if (!fs_screen_width) {
2747
        /* use default SDL method */
2748
//        SDL_WM_ToggleFullScreen(screen);
2749
    }
2750
    video_open(cur_stream);
2751
}
2752

    
2753
static void toggle_pause(void)
2754
{
2755
    if (cur_stream)
2756
        stream_pause(cur_stream);
2757
    step = 0;
2758
}
2759

    
2760
static void step_to_next_frame(void)
2761
{
2762
    if (cur_stream) {
2763
        /* if the stream is paused unpause it, then step */
2764
        if (cur_stream->paused)
2765
            stream_pause(cur_stream);
2766
    }
2767
    step = 1;
2768
}
2769

    
2770
static void toggle_audio_display(void)
2771
{
2772
    if (cur_stream) {
2773
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2774
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2775
        fill_rectangle(screen,
2776
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2777
                    bgcolor);
2778
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2779
    }
2780
}
2781

    
2782
/* handle an event sent by the GUI */
2783
static void event_loop(void)
2784
{
2785
    SDL_Event event;
2786
    double incr, pos, frac;
2787

    
2788
    for(;;) {
2789
        double x;
2790
        SDL_WaitEvent(&event);
2791
        switch(event.type) {
2792
        case SDL_KEYDOWN:
2793
            if (exit_on_keydown) {
2794
                do_exit();
2795
                break;
2796
            }
2797
            switch(event.key.keysym.sym) {
2798
            case SDLK_ESCAPE:
2799
            case SDLK_q:
2800
                do_exit();
2801
                break;
2802
            case SDLK_f:
2803
                toggle_full_screen();
2804
                break;
2805
            case SDLK_p:
2806
            case SDLK_SPACE:
2807
                toggle_pause();
2808
                break;
2809
            case SDLK_s: //S: Step to next frame
2810
                step_to_next_frame();
2811
                break;
2812
            case SDLK_a:
2813
                if (cur_stream)
2814
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2815
                break;
2816
            case SDLK_v:
2817
                if (cur_stream)
2818
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2819
                break;
2820
            case SDLK_t:
2821
                if (cur_stream)
2822
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2823
                break;
2824
            case SDLK_w:
2825
                toggle_audio_display();
2826
                break;
2827
            case SDLK_LEFT:
2828
                incr = -10.0;
2829
                goto do_seek;
2830
            case SDLK_RIGHT:
2831
                incr = 10.0;
2832
                goto do_seek;
2833
            case SDLK_UP:
2834
                incr = 60.0;
2835
                goto do_seek;
2836
            case SDLK_DOWN:
2837
                incr = -60.0;
2838
            do_seek:
2839
                if (cur_stream) {
2840
                    if (seek_by_bytes) {
2841
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2842
                            pos= cur_stream->video_current_pos;
2843
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2844
                            pos= cur_stream->audio_pkt.pos;
2845
                        }else
2846
                            pos = url_ftell(cur_stream->ic->pb);
2847
                        if (cur_stream->ic->bit_rate)
2848
                            incr *= cur_stream->ic->bit_rate / 8.0;
2849
                        else
2850
                            incr *= 180000.0;
2851
                        pos += incr;
2852
                        stream_seek(cur_stream, pos, incr, 1);
2853
                    } else {
2854
                        pos = get_master_clock(cur_stream);
2855
                        pos += incr;
2856
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2857
                    }
2858
                }
2859
                break;
2860
            default:
2861
                break;
2862
            }
2863
            break;
2864
        case SDL_MOUSEBUTTONDOWN:
2865
            if (exit_on_mousedown) {
2866
                do_exit();
2867
                break;
2868
            }
2869
        case SDL_MOUSEMOTION:
2870
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2871
                x= event.button.x;
2872
            }else{
2873
                if(event.motion.state != SDL_PRESSED)
2874
                    break;
2875
                x= event.motion.x;
2876
            }
2877
            if (cur_stream) {
2878
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2879
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2880
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2881
                }else{
2882
                    int64_t ts;
2883
                    int ns, hh, mm, ss;
2884
                    int tns, thh, tmm, tss;
2885
                    tns = cur_stream->ic->duration/1000000LL;
2886
                    thh = tns/3600;
2887
                    tmm = (tns%3600)/60;
2888
                    tss = (tns%60);
2889
                    frac = x/cur_stream->width;
2890
                    ns = frac*tns;
2891
                    hh = ns/3600;
2892
                    mm = (ns%3600)/60;
2893
                    ss = (ns%60);
2894
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2895
                            hh, mm, ss, thh, tmm, tss);
2896
                    ts = frac*cur_stream->ic->duration;
2897
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2898
                        ts += cur_stream->ic->start_time;
2899
                    stream_seek(cur_stream, ts, 0, 0);
2900
                }
2901
            }
2902
            break;
2903
        case SDL_VIDEORESIZE:
2904
            if (cur_stream) {
2905
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2906
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2907
                screen_width = cur_stream->width = event.resize.w;
2908
                screen_height= cur_stream->height= event.resize.h;
2909
            }
2910
            break;
2911
        case SDL_QUIT:
2912
        case FF_QUIT_EVENT:
2913
            do_exit();
2914
            break;
2915
        case FF_ALLOC_EVENT:
2916
            video_open(event.user.data1);
2917
            alloc_picture(event.user.data1);
2918
            break;
2919
        case FF_REFRESH_EVENT:
2920
            video_refresh_timer(event.user.data1);
2921
            cur_stream->refresh=0;
2922
            break;
2923
        default:
2924
            break;
2925
        }
2926
    }
2927
}
2928

    
2929
static void opt_frame_size(const char *arg)
2930
{
2931
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2932
        fprintf(stderr, "Incorrect frame size\n");
2933
        exit(1);
2934
    }
2935
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2936
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2937
        exit(1);
2938
    }
2939
}
2940

    
2941
static int opt_width(const char *opt, const char *arg)
2942
{
2943
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2944
    return 0;
2945
}
2946

    
2947
static int opt_height(const char *opt, const char *arg)
2948
{
2949
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2950
    return 0;
2951
}
2952

    
2953
static void opt_format(const char *arg)
2954
{
2955
    file_iformat = av_find_input_format(arg);
2956
    if (!file_iformat) {
2957
        fprintf(stderr, "Unknown input format: %s\n", arg);
2958
        exit(1);
2959
    }
2960
}
2961

    
2962
static void opt_frame_pix_fmt(const char *arg)
2963
{
2964
    frame_pix_fmt = av_get_pix_fmt(arg);
2965
}
2966

    
2967
static int opt_sync(const char *opt, const char *arg)
2968
{
2969
    if (!strcmp(arg, "audio"))
2970
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2971
    else if (!strcmp(arg, "video"))
2972
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2973
    else if (!strcmp(arg, "ext"))
2974
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2975
    else {
2976
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2977
        exit(1);
2978
    }
2979
    return 0;
2980
}
2981

    
2982
static int opt_seek(const char *opt, const char *arg)
2983
{
2984
    start_time = parse_time_or_die(opt, arg, 1);
2985
    return 0;
2986
}
2987

    
2988
static int opt_duration(const char *opt, const char *arg)
2989
{
2990
    duration = parse_time_or_die(opt, arg, 1);
2991
    return 0;
2992
}
2993

    
2994
static int opt_debug(const char *opt, const char *arg)
2995
{
2996
    av_log_set_level(99);
2997
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2998
    return 0;
2999
}
3000

    
3001
static int opt_vismv(const char *opt, const char *arg)
3002
{
3003
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3004
    return 0;
3005
}
3006

    
3007
static int opt_thread_count(const char *opt, const char *arg)
3008
{
3009
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3010
#if !HAVE_THREADS
3011
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3012
#endif
3013
    return 0;
3014
}
3015

    
3016
static const OptionDef options[] = {
3017
#include "cmdutils_common_opts.h"
3018
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3019
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3020
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3021
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3022
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3023
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3024
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3025
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3026
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3027
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3028
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3029
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3030
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3031
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3032
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3033
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3034
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3035
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3036
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3037
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3038
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3039
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3040
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3041
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3042
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3043
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3044
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3045
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3046
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3047
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3048
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3049
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3050
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3051
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3052
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3053
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3054
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3055
#if CONFIG_AVFILTER
3056
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3057
#endif
3058
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3059
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3060
    { NULL, },
3061
};
3062

    
3063
static void show_usage(void)
3064
{
3065
    printf("Simple media player\n");
3066
    printf("usage: ffplay [options] input_file\n");
3067
    printf("\n");
3068
}
3069

    
3070
static void show_help(void)
3071
{
3072
    av_log_set_callback(log_callback_help);
3073
    show_usage();
3074
    show_help_options(options, "Main options:\n",
3075
                      OPT_EXPERT, 0);
3076
    show_help_options(options, "\nAdvanced options:\n",
3077
                      OPT_EXPERT, OPT_EXPERT);
3078
    printf("\n");
3079
    av_opt_show2(avcodec_opts[0], NULL,
3080
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3081
    printf("\n");
3082
    av_opt_show2(avformat_opts, NULL,
3083
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3084
#if !CONFIG_AVFILTER
3085
    printf("\n");
3086
    av_opt_show2(sws_opts, NULL,
3087
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3088
#endif
3089
    printf("\nWhile playing:\n"
3090
           "q, ESC              quit\n"
3091
           "f                   toggle full screen\n"
3092
           "p, SPC              pause\n"
3093
           "a                   cycle audio channel\n"
3094
           "v                   cycle video channel\n"
3095
           "t                   cycle subtitle channel\n"
3096
           "w                   show audio waves\n"
3097
           "s                   activate frame-step mode\n"
3098
           "left/right          seek backward/forward 10 seconds\n"
3099
           "down/up             seek backward/forward 1 minute\n"
3100
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3101
           );
3102
}
3103

    
3104
static void opt_input_file(const char *filename)
3105
{
3106
    if (input_filename) {
3107
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3108
                filename, input_filename);
3109
        exit(1);
3110
    }
3111
    if (!strcmp(filename, "-"))
3112
        filename = "pipe:";
3113
    input_filename = filename;
3114
}
3115

    
3116
/* Called from the main */
3117
int main(int argc, char **argv)
3118
{
3119
    int flags;
3120

    
3121
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3122

    
3123
    /* register all codecs, demux and protocols */
3124
    avcodec_register_all();
3125
#if CONFIG_AVDEVICE
3126
    avdevice_register_all();
3127
#endif
3128
#if CONFIG_AVFILTER
3129
    avfilter_register_all();
3130
#endif
3131
    av_register_all();
3132

    
3133
    init_opts();
3134

    
3135
    show_banner();
3136

    
3137
    parse_options(argc, argv, options, opt_input_file);
3138

    
3139
    if (!input_filename) {
3140
        show_usage();
3141
        fprintf(stderr, "An input file must be specified\n");
3142
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3143
        exit(1);
3144
    }
3145

    
3146
    if (display_disable) {
3147
        video_disable = 1;
3148
    }
3149
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3150
#if !defined(__MINGW32__) && !defined(__APPLE__)
3151
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3152
#endif
3153
    if (SDL_Init (flags)) {
3154
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3155
        exit(1);
3156
    }
3157

    
3158
    if (!display_disable) {
3159
#if HAVE_SDL_VIDEO_SIZE
3160
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3161
        fs_screen_width = vi->current_w;
3162
        fs_screen_height = vi->current_h;
3163
#endif
3164
    }
3165

    
3166
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3167
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3168
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3169

    
3170
    av_init_packet(&flush_pkt);
3171
    flush_pkt.data= "FLUSH";
3172

    
3173
    cur_stream = stream_open(input_filename, file_iformat);
3174

    
3175
    event_loop();
3176

    
3177
    /* never returns */
3178

    
3179
    return 0;
3180
}