Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 043d2ff2

History | View | Annotate | Download (98.9 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavcore/samplefmt.h"
34
#include "libavformat/avformat.h"
35
#include "libavdevice/avdevice.h"
36
#include "libswscale/swscale.h"
37
#include "libavcodec/audioconvert.h"
38
#include "libavcodec/opt.h"
39
#include "libavcodec/avfft.h"
40

    
41
#if CONFIG_AVFILTER
42
# include "libavfilter/avfilter.h"
43
# include "libavfilter/avfiltergraph.h"
44
#endif
45

    
46
#include "cmdutils.h"
47

    
48
#include <SDL.h>
49
#include <SDL_thread.h>
50

    
51
#ifdef __MINGW32__
52
#undef main /* We don't want SDL to override our main() */
53
#endif
54

    
55
#include <unistd.h>
56
#include <assert.h>
57

    
58
const char program_name[] = "FFplay";
59
const int program_birth_year = 2003;
60

    
61
//#define DEBUG
62
//#define DEBUG_SYNC
63

    
64
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66
#define MIN_FRAMES 5
67

    
68
/* SDL audio buffer size, in samples. Should be small to have precise
69
   A/V sync as SDL does not have hardware buffer fullness info. */
70
#define SDL_AUDIO_BUFFER_SIZE 1024
71

    
72
/* no AV sync correction is done if below the AV sync threshold */
73
#define AV_SYNC_THRESHOLD 0.01
74
/* no AV correction is done if too big error */
75
#define AV_NOSYNC_THRESHOLD 10.0
76

    
77
#define FRAME_SKIP_FACTOR 0.05
78

    
79
/* maximum audio speed change to get correct sync */
80
#define SAMPLE_CORRECTION_PERCENT_MAX 10
81

    
82
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83
#define AUDIO_DIFF_AVG_NB   20
84

    
85
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86
#define SAMPLE_ARRAY_SIZE (2*65536)
87

    
88
static int sws_flags = SWS_BICUBIC;
89

    
90
typedef struct PacketQueue {
91
    AVPacketList *first_pkt, *last_pkt;
92
    int nb_packets;
93
    int size;
94
    int abort_request;
95
    SDL_mutex *mutex;
96
    SDL_cond *cond;
97
} PacketQueue;
98

    
99
#define VIDEO_PICTURE_QUEUE_SIZE 2
100
#define SUBPICTURE_QUEUE_SIZE 4
101

    
102
typedef struct VideoPicture {
103
    double pts;                                  ///<presentation time stamp for this picture
104
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105
    int64_t pos;                                 ///<byte position in file
106
    SDL_Overlay *bmp;
107
    int width, height; /* source height & width */
108
    int allocated;
109
    enum PixelFormat pix_fmt;
110

    
111
#if CONFIG_AVFILTER
112
    AVFilterBufferRef *picref;
113
#endif
114
} VideoPicture;
115

    
116
typedef struct SubPicture {
117
    double pts; /* presentation time stamp for this picture */
118
    AVSubtitle sub;
119
} SubPicture;
120

    
121
enum {
122
    AV_SYNC_AUDIO_MASTER, /* default choice */
123
    AV_SYNC_VIDEO_MASTER,
124
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125
};
126

    
127
typedef struct VideoState {
128
    SDL_Thread *parse_tid;
129
    SDL_Thread *video_tid;
130
    SDL_Thread *refresh_tid;
131
    AVInputFormat *iformat;
132
    int no_background;
133
    int abort_request;
134
    int paused;
135
    int last_paused;
136
    int seek_req;
137
    int seek_flags;
138
    int64_t seek_pos;
139
    int64_t seek_rel;
140
    int read_pause_return;
141
    AVFormatContext *ic;
142
    int dtg_active_format;
143

    
144
    int audio_stream;
145

    
146
    int av_sync_type;
147
    double external_clock; /* external clock base */
148
    int64_t external_clock_time;
149

    
150
    double audio_clock;
151
    double audio_diff_cum; /* used for AV difference average computation */
152
    double audio_diff_avg_coef;
153
    double audio_diff_threshold;
154
    int audio_diff_avg_count;
155
    AVStream *audio_st;
156
    PacketQueue audioq;
157
    int audio_hw_buf_size;
158
    /* samples output by the codec. we reserve more space for avsync
159
       compensation */
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    uint8_t *audio_buf;
163
    unsigned int audio_buf_size; /* in bytes */
164
    int audio_buf_index; /* in bytes */
165
    AVPacket audio_pkt_temp;
166
    AVPacket audio_pkt;
167
    enum AVSampleFormat audio_src_fmt;
168
    AVAudioConvert *reformat_ctx;
169

    
170
    int show_audio; /* if true, display audio samples */
171
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
172
    int sample_array_index;
173
    int last_i_start;
174
    RDFTContext *rdft;
175
    int rdft_bits;
176
    FFTSample *rdft_data;
177
    int xpos;
178

    
179
    SDL_Thread *subtitle_tid;
180
    int subtitle_stream;
181
    int subtitle_stream_changed;
182
    AVStream *subtitle_st;
183
    PacketQueue subtitleq;
184
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185
    int subpq_size, subpq_rindex, subpq_windex;
186
    SDL_mutex *subpq_mutex;
187
    SDL_cond *subpq_cond;
188

    
189
    double frame_timer;
190
    double frame_last_pts;
191
    double frame_last_delay;
192
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193
    int video_stream;
194
    AVStream *video_st;
195
    PacketQueue videoq;
196
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198
    int64_t video_current_pos;                   ///<current displayed file pos
199
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200
    int pictq_size, pictq_rindex, pictq_windex;
201
    SDL_mutex *pictq_mutex;
202
    SDL_cond *pictq_cond;
203
#if !CONFIG_AVFILTER
204
    struct SwsContext *img_convert_ctx;
205
#endif
206

    
207
    //    QETimer *video_timer;
208
    char filename[1024];
209
    int width, height, xleft, ytop;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int exit_on_keydown;
264
static int exit_on_mousedown;
265
static int loop=1;
266
static int framedrop=1;
267

    
268
static int rdftspeed=20;
269
#if CONFIG_AVFILTER
270
static char *vfilters = NULL;
271
#endif
272

    
273
/* current context */
274
static int is_full_screen;
275
static VideoState *cur_stream;
276
static int64_t audio_callback_time;
277

    
278
static AVPacket flush_pkt;
279

    
280
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
281
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283

    
284
static SDL_Surface *screen;
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287

    
288
/* packet queue handling */
289
static void packet_queue_init(PacketQueue *q)
290
{
291
    memset(q, 0, sizeof(PacketQueue));
292
    q->mutex = SDL_CreateMutex();
293
    q->cond = SDL_CreateCond();
294
    packet_queue_put(q, &flush_pkt);
295
}
296

    
297
static void packet_queue_flush(PacketQueue *q)
298
{
299
    AVPacketList *pkt, *pkt1;
300

    
301
    SDL_LockMutex(q->mutex);
302
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303
        pkt1 = pkt->next;
304
        av_free_packet(&pkt->pkt);
305
        av_freep(&pkt);
306
    }
307
    q->last_pkt = NULL;
308
    q->first_pkt = NULL;
309
    q->nb_packets = 0;
310
    q->size = 0;
311
    SDL_UnlockMutex(q->mutex);
312
}
313

    
314
static void packet_queue_end(PacketQueue *q)
315
{
316
    packet_queue_flush(q);
317
    SDL_DestroyMutex(q->mutex);
318
    SDL_DestroyCond(q->cond);
319
}
320

    
321
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322
{
323
    AVPacketList *pkt1;
324

    
325
    /* duplicate the packet */
326
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327
        return -1;
328

    
329
    pkt1 = av_malloc(sizeof(AVPacketList));
330
    if (!pkt1)
331
        return -1;
332
    pkt1->pkt = *pkt;
333
    pkt1->next = NULL;
334

    
335

    
336
    SDL_LockMutex(q->mutex);
337

    
338
    if (!q->last_pkt)
339

    
340
        q->first_pkt = pkt1;
341
    else
342
        q->last_pkt->next = pkt1;
343
    q->last_pkt = pkt1;
344
    q->nb_packets++;
345
    q->size += pkt1->pkt.size + sizeof(*pkt1);
346
    /* XXX: should duplicate packet data in DV case */
347
    SDL_CondSignal(q->cond);
348

    
349
    SDL_UnlockMutex(q->mutex);
350
    return 0;
351
}
352

    
353
static void packet_queue_abort(PacketQueue *q)
354
{
355
    SDL_LockMutex(q->mutex);
356

    
357
    q->abort_request = 1;
358

    
359
    SDL_CondSignal(q->cond);
360

    
361
    SDL_UnlockMutex(q->mutex);
362
}
363

    
364
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366
{
367
    AVPacketList *pkt1;
368
    int ret;
369

    
370
    SDL_LockMutex(q->mutex);
371

    
372
    for(;;) {
373
        if (q->abort_request) {
374
            ret = -1;
375
            break;
376
        }
377

    
378
        pkt1 = q->first_pkt;
379
        if (pkt1) {
380
            q->first_pkt = pkt1->next;
381
            if (!q->first_pkt)
382
                q->last_pkt = NULL;
383
            q->nb_packets--;
384
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
385
            *pkt = pkt1->pkt;
386
            av_free(pkt1);
387
            ret = 1;
388
            break;
389
        } else if (!block) {
390
            ret = 0;
391
            break;
392
        } else {
393
            SDL_CondWait(q->cond, q->mutex);
394
        }
395
    }
396
    SDL_UnlockMutex(q->mutex);
397
    return ret;
398
}
399

    
400
static inline void fill_rectangle(SDL_Surface *screen,
401
                                  int x, int y, int w, int h, int color)
402
{
403
    SDL_Rect rect;
404
    rect.x = x;
405
    rect.y = y;
406
    rect.w = w;
407
    rect.h = h;
408
    SDL_FillRect(screen, &rect, color);
409
}
410

    
411
#if 0
412
/* draw only the border of a rectangle */
413
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414
{
415
    int w1, w2, h1, h2;
416

417
    /* fill the background */
418
    w1 = x;
419
    if (w1 < 0)
420
        w1 = 0;
421
    w2 = s->width - (x + w);
422
    if (w2 < 0)
423
        w2 = 0;
424
    h1 = y;
425
    if (h1 < 0)
426
        h1 = 0;
427
    h2 = s->height - (y + h);
428
    if (h2 < 0)
429
        h2 = 0;
430
    fill_rectangle(screen,
431
                   s->xleft, s->ytop,
432
                   w1, s->height,
433
                   color);
434
    fill_rectangle(screen,
435
                   s->xleft + s->width - w2, s->ytop,
436
                   w2, s->height,
437
                   color);
438
    fill_rectangle(screen,
439
                   s->xleft + w1, s->ytop,
440
                   s->width - w1 - w2, h1,
441
                   color);
442
    fill_rectangle(screen,
443
                   s->xleft + w1, s->ytop + s->height - h2,
444
                   s->width - w1 - w2, h2,
445
                   color);
446
}
447
#endif
448

    
449
#define ALPHA_BLEND(a, oldp, newp, s)\
450
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451

    
452
#define RGBA_IN(r, g, b, a, s)\
453
{\
454
    unsigned int v = ((const uint32_t *)(s))[0];\
455
    a = (v >> 24) & 0xff;\
456
    r = (v >> 16) & 0xff;\
457
    g = (v >> 8) & 0xff;\
458
    b = v & 0xff;\
459
}
460

    
461
#define YUVA_IN(y, u, v, a, s, pal)\
462
{\
463
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464
    a = (val >> 24) & 0xff;\
465
    y = (val >> 16) & 0xff;\
466
    u = (val >> 8) & 0xff;\
467
    v = val & 0xff;\
468
}
469

    
470
#define YUVA_OUT(d, y, u, v, a)\
471
{\
472
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473
}
474

    
475

    
476
#define BPP 1
477

    
478
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479
{
480
    int wrap, wrap3, width2, skip2;
481
    int y, u, v, a, u1, v1, a1, w, h;
482
    uint8_t *lum, *cb, *cr;
483
    const uint8_t *p;
484
    const uint32_t *pal;
485
    int dstx, dsty, dstw, dsth;
486

    
487
    dstw = av_clip(rect->w, 0, imgw);
488
    dsth = av_clip(rect->h, 0, imgh);
489
    dstx = av_clip(rect->x, 0, imgw - dstw);
490
    dsty = av_clip(rect->y, 0, imgh - dsth);
491
    lum = dst->data[0] + dsty * dst->linesize[0];
492
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494

    
495
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496
    skip2 = dstx >> 1;
497
    wrap = dst->linesize[0];
498
    wrap3 = rect->pict.linesize[0];
499
    p = rect->pict.data[0];
500
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501

    
502
    if (dsty & 1) {
503
        lum += dstx;
504
        cb += skip2;
505
        cr += skip2;
506

    
507
        if (dstx & 1) {
508
            YUVA_IN(y, u, v, a, p, pal);
509
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512
            cb++;
513
            cr++;
514
            lum++;
515
            p += BPP;
516
        }
517
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518
            YUVA_IN(y, u, v, a, p, pal);
519
            u1 = u;
520
            v1 = v;
521
            a1 = a;
522
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523

    
524
            YUVA_IN(y, u, v, a, p + BPP, pal);
525
            u1 += u;
526
            v1 += v;
527
            a1 += a;
528
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531
            cb++;
532
            cr++;
533
            p += 2 * BPP;
534
            lum += 2;
535
        }
536
        if (w) {
537
            YUVA_IN(y, u, v, a, p, pal);
538
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541
            p++;
542
            lum++;
543
        }
544
        p += wrap3 - dstw * BPP;
545
        lum += wrap - dstw - dstx;
546
        cb += dst->linesize[1] - width2 - skip2;
547
        cr += dst->linesize[2] - width2 - skip2;
548
    }
549
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550
        lum += dstx;
551
        cb += skip2;
552
        cr += skip2;
553

    
554
        if (dstx & 1) {
555
            YUVA_IN(y, u, v, a, p, pal);
556
            u1 = u;
557
            v1 = v;
558
            a1 = a;
559
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
            p += wrap3;
561
            lum += wrap;
562
            YUVA_IN(y, u, v, a, p, pal);
563
            u1 += u;
564
            v1 += v;
565
            a1 += a;
566
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569
            cb++;
570
            cr++;
571
            p += -wrap3 + BPP;
572
            lum += -wrap + 1;
573
        }
574
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 = u;
577
            v1 = v;
578
            a1 = a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586
            p += wrap3;
587
            lum += wrap;
588

    
589
            YUVA_IN(y, u, v, a, p, pal);
590
            u1 += u;
591
            v1 += v;
592
            a1 += a;
593
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594

    
595
            YUVA_IN(y, u, v, a, p + BPP, pal);
596
            u1 += u;
597
            v1 += v;
598
            a1 += a;
599
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600

    
601
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603

    
604
            cb++;
605
            cr++;
606
            p += -wrap3 + 2 * BPP;
607
            lum += -wrap + 2;
608
        }
609
        if (w) {
610
            YUVA_IN(y, u, v, a, p, pal);
611
            u1 = u;
612
            v1 = v;
613
            a1 = a;
614
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
            p += wrap3;
616
            lum += wrap;
617
            YUVA_IN(y, u, v, a, p, pal);
618
            u1 += u;
619
            v1 += v;
620
            a1 += a;
621
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624
            cb++;
625
            cr++;
626
            p += -wrap3 + BPP;
627
            lum += -wrap + 1;
628
        }
629
        p += wrap3 + (wrap3 - dstw * BPP);
630
        lum += wrap + (wrap - dstw - dstx);
631
        cb += dst->linesize[1] - width2 - skip2;
632
        cr += dst->linesize[2] - width2 - skip2;
633
    }
634
    /* handle odd height */
635
    if (h) {
636
        lum += dstx;
637
        cb += skip2;
638
        cr += skip2;
639

    
640
        if (dstx & 1) {
641
            YUVA_IN(y, u, v, a, p, pal);
642
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645
            cb++;
646
            cr++;
647
            lum++;
648
            p += BPP;
649
        }
650
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651
            YUVA_IN(y, u, v, a, p, pal);
652
            u1 = u;
653
            v1 = v;
654
            a1 = a;
655
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656

    
657
            YUVA_IN(y, u, v, a, p + BPP, pal);
658
            u1 += u;
659
            v1 += v;
660
            a1 += a;
661
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664
            cb++;
665
            cr++;
666
            p += 2 * BPP;
667
            lum += 2;
668
        }
669
        if (w) {
670
            YUVA_IN(y, u, v, a, p, pal);
671
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674
        }
675
    }
676
}
677

    
678
static void free_subpicture(SubPicture *sp)
679
{
680
    avsubtitle_free(&sp->sub);
681
}
682

    
683
static void video_image_display(VideoState *is)
684
{
685
    VideoPicture *vp;
686
    SubPicture *sp;
687
    AVPicture pict;
688
    float aspect_ratio;
689
    int width, height, x, y;
690
    SDL_Rect rect;
691
    int i;
692

    
693
    vp = &is->pictq[is->pictq_rindex];
694
    if (vp->bmp) {
695
#if CONFIG_AVFILTER
696
         if (vp->picref->video->pixel_aspect.num == 0)
697
             aspect_ratio = 0;
698
         else
699
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
700
#else
701

    
702
        /* XXX: use variable in the frame */
703
        if (is->video_st->sample_aspect_ratio.num)
704
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
705
        else if (is->video_st->codec->sample_aspect_ratio.num)
706
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
707
        else
708
            aspect_ratio = 0;
709
#endif
710
        if (aspect_ratio <= 0.0)
711
            aspect_ratio = 1.0;
712
        aspect_ratio *= (float)vp->width / (float)vp->height;
713
        /* if an active format is indicated, then it overrides the
714
           mpeg format */
715
#if 0
716
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
717
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
718
            printf("dtg_active_format=%d\n", is->dtg_active_format);
719
        }
720
#endif
721
#if 0
722
        switch(is->video_st->codec->dtg_active_format) {
723
        case FF_DTG_AFD_SAME:
724
        default:
725
            /* nothing to do */
726
            break;
727
        case FF_DTG_AFD_4_3:
728
            aspect_ratio = 4.0 / 3.0;
729
            break;
730
        case FF_DTG_AFD_16_9:
731
            aspect_ratio = 16.0 / 9.0;
732
            break;
733
        case FF_DTG_AFD_14_9:
734
            aspect_ratio = 14.0 / 9.0;
735
            break;
736
        case FF_DTG_AFD_4_3_SP_14_9:
737
            aspect_ratio = 14.0 / 9.0;
738
            break;
739
        case FF_DTG_AFD_16_9_SP_14_9:
740
            aspect_ratio = 14.0 / 9.0;
741
            break;
742
        case FF_DTG_AFD_SP_4_3:
743
            aspect_ratio = 4.0 / 3.0;
744
            break;
745
        }
746
#endif
747

    
748
        if (is->subtitle_st)
749
        {
750
            if (is->subpq_size > 0)
751
            {
752
                sp = &is->subpq[is->subpq_rindex];
753

    
754
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
755
                {
756
                    SDL_LockYUVOverlay (vp->bmp);
757

    
758
                    pict.data[0] = vp->bmp->pixels[0];
759
                    pict.data[1] = vp->bmp->pixels[2];
760
                    pict.data[2] = vp->bmp->pixels[1];
761

    
762
                    pict.linesize[0] = vp->bmp->pitches[0];
763
                    pict.linesize[1] = vp->bmp->pitches[2];
764
                    pict.linesize[2] = vp->bmp->pitches[1];
765

    
766
                    for (i = 0; i < sp->sub.num_rects; i++)
767
                        blend_subrect(&pict, sp->sub.rects[i],
768
                                      vp->bmp->w, vp->bmp->h);
769

    
770
                    SDL_UnlockYUVOverlay (vp->bmp);
771
                }
772
            }
773
        }
774

    
775

    
776
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
777
        height = is->height;
778
        width = ((int)rint(height * aspect_ratio)) & ~1;
779
        if (width > is->width) {
780
            width = is->width;
781
            height = ((int)rint(width / aspect_ratio)) & ~1;
782
        }
783
        x = (is->width - width) / 2;
784
        y = (is->height - height) / 2;
785
        if (!is->no_background) {
786
            /* fill the background */
787
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
788
        } else {
789
            is->no_background = 0;
790
        }
791
        rect.x = is->xleft + x;
792
        rect.y = is->ytop  + y;
793
        rect.w = width;
794
        rect.h = height;
795
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
796
    } else {
797
#if 0
798
        fill_rectangle(screen,
799
                       is->xleft, is->ytop, is->width, is->height,
800
                       QERGB(0x00, 0x00, 0x00));
801
#endif
802
    }
803
}
804

    
805
static inline int compute_mod(int a, int b)
806
{
807
    a = a % b;
808
    if (a >= 0)
809
        return a;
810
    else
811
        return a + b;
812
}
813

    
814
static void video_audio_display(VideoState *s)
815
{
816
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
817
    int ch, channels, h, h2, bgcolor, fgcolor;
818
    int16_t time_diff;
819
    int rdft_bits, nb_freq;
820

    
821
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
822
        ;
823
    nb_freq= 1<<(rdft_bits-1);
824

    
825
    /* compute display index : center on currently output samples */
826
    channels = s->audio_st->codec->channels;
827
    nb_display_channels = channels;
828
    if (!s->paused) {
829
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
830
        n = 2 * channels;
831
        delay = audio_write_get_buf_size(s);
832
        delay /= n;
833

    
834
        /* to be more precise, we take into account the time spent since
835
           the last buffer computation */
836
        if (audio_callback_time) {
837
            time_diff = av_gettime() - audio_callback_time;
838
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
839
        }
840

    
841
        delay += 2*data_used;
842
        if (delay < data_used)
843
            delay = data_used;
844

    
845
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
846
        if(s->show_audio==1){
847
            h= INT_MIN;
848
            for(i=0; i<1000; i+=channels){
849
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
850
                int a= s->sample_array[idx];
851
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
852
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
853
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
854
                int score= a-d;
855
                if(h<score && (b^c)<0){
856
                    h= score;
857
                    i_start= idx;
858
                }
859
            }
860
        }
861

    
862
        s->last_i_start = i_start;
863
    } else {
864
        i_start = s->last_i_start;
865
    }
866

    
867
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
868
    if(s->show_audio==1){
869
        fill_rectangle(screen,
870
                       s->xleft, s->ytop, s->width, s->height,
871
                       bgcolor);
872

    
873
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
874

    
875
        /* total height for one channel */
876
        h = s->height / nb_display_channels;
877
        /* graph height / 2 */
878
        h2 = (h * 9) / 20;
879
        for(ch = 0;ch < nb_display_channels; ch++) {
880
            i = i_start + ch;
881
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
882
            for(x = 0; x < s->width; x++) {
883
                y = (s->sample_array[i] * h2) >> 15;
884
                if (y < 0) {
885
                    y = -y;
886
                    ys = y1 - y;
887
                } else {
888
                    ys = y1;
889
                }
890
                fill_rectangle(screen,
891
                               s->xleft + x, ys, 1, y,
892
                               fgcolor);
893
                i += channels;
894
                if (i >= SAMPLE_ARRAY_SIZE)
895
                    i -= SAMPLE_ARRAY_SIZE;
896
            }
897
        }
898

    
899
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
900

    
901
        for(ch = 1;ch < nb_display_channels; ch++) {
902
            y = s->ytop + ch * h;
903
            fill_rectangle(screen,
904
                           s->xleft, y, s->width, 1,
905
                           fgcolor);
906
        }
907
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
908
    }else{
909
        nb_display_channels= FFMIN(nb_display_channels, 2);
910
        if(rdft_bits != s->rdft_bits){
911
            av_rdft_end(s->rdft);
912
            av_free(s->rdft_data);
913
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
914
            s->rdft_bits= rdft_bits;
915
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
916
        }
917
        {
918
            FFTSample *data[2];
919
            for(ch = 0;ch < nb_display_channels; ch++) {
920
                data[ch] = s->rdft_data + 2*nb_freq*ch;
921
                i = i_start + ch;
922
                for(x = 0; x < 2*nb_freq; x++) {
923
                    double w= (x-nb_freq)*(1.0/nb_freq);
924
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
925
                    i += channels;
926
                    if (i >= SAMPLE_ARRAY_SIZE)
927
                        i -= SAMPLE_ARRAY_SIZE;
928
                }
929
                av_rdft_calc(s->rdft, data[ch]);
930
            }
931
            //least efficient way to do this, we should of course directly access it but its more than fast enough
932
            for(y=0; y<s->height; y++){
933
                double w= 1/sqrt(nb_freq);
934
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
935
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
936
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
937
                a= FFMIN(a,255);
938
                b= FFMIN(b,255);
939
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
940

    
941
                fill_rectangle(screen,
942
                            s->xpos, s->height-y, 1, 1,
943
                            fgcolor);
944
            }
945
        }
946
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
947
        s->xpos++;
948
        if(s->xpos >= s->width)
949
            s->xpos= s->xleft;
950
    }
951
}
952

    
953
static int video_open(VideoState *is){
954
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
955
    int w,h;
956

    
957
    if(is_full_screen) flags |= SDL_FULLSCREEN;
958
    else               flags |= SDL_RESIZABLE;
959

    
960
    if (is_full_screen && fs_screen_width) {
961
        w = fs_screen_width;
962
        h = fs_screen_height;
963
    } else if(!is_full_screen && screen_width){
964
        w = screen_width;
965
        h = screen_height;
966
#if CONFIG_AVFILTER
967
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
968
        w = is->out_video_filter->inputs[0]->w;
969
        h = is->out_video_filter->inputs[0]->h;
970
#else
971
    }else if (is->video_st && is->video_st->codec->width){
972
        w = is->video_st->codec->width;
973
        h = is->video_st->codec->height;
974
#endif
975
    } else {
976
        w = 640;
977
        h = 480;
978
    }
979
    if(screen && is->width == screen->w && screen->w == w
980
       && is->height== screen->h && screen->h == h)
981
        return 0;
982

    
983
#ifndef __APPLE__
984
    screen = SDL_SetVideoMode(w, h, 0, flags);
985
#else
986
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
987
    screen = SDL_SetVideoMode(w, h, 24, flags);
988
#endif
989
    if (!screen) {
990
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
991
        return -1;
992
    }
993
    if (!window_title)
994
        window_title = input_filename;
995
    SDL_WM_SetCaption(window_title, window_title);
996

    
997
    is->width = screen->w;
998
    is->height = screen->h;
999

    
1000
    return 0;
1001
}
1002

    
1003
/* display the current picture, if any */
1004
static void video_display(VideoState *is)
1005
{
1006
    if(!screen)
1007
        video_open(cur_stream);
1008
    if (is->audio_st && is->show_audio)
1009
        video_audio_display(is);
1010
    else if (is->video_st)
1011
        video_image_display(is);
1012
}
1013

    
1014
static int refresh_thread(void *opaque)
1015
{
1016
    VideoState *is= opaque;
1017
    while(!is->abort_request){
1018
        SDL_Event event;
1019
        event.type = FF_REFRESH_EVENT;
1020
        event.user.data1 = opaque;
1021
        if(!is->refresh){
1022
            is->refresh=1;
1023
            SDL_PushEvent(&event);
1024
        }
1025
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1026
    }
1027
    return 0;
1028
}
1029

    
1030
/* get the current audio clock value */
1031
static double get_audio_clock(VideoState *is)
1032
{
1033
    double pts;
1034
    int hw_buf_size, bytes_per_sec;
1035
    pts = is->audio_clock;
1036
    hw_buf_size = audio_write_get_buf_size(is);
1037
    bytes_per_sec = 0;
1038
    if (is->audio_st) {
1039
        bytes_per_sec = is->audio_st->codec->sample_rate *
1040
            2 * is->audio_st->codec->channels;
1041
    }
1042
    if (bytes_per_sec)
1043
        pts -= (double)hw_buf_size / bytes_per_sec;
1044
    return pts;
1045
}
1046

    
1047
/* get the current video clock value */
1048
static double get_video_clock(VideoState *is)
1049
{
1050
    if (is->paused) {
1051
        return is->video_current_pts;
1052
    } else {
1053
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1054
    }
1055
}
1056

    
1057
/* get the current external clock value */
1058
static double get_external_clock(VideoState *is)
1059
{
1060
    int64_t ti;
1061
    ti = av_gettime();
1062
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1063
}
1064

    
1065
/* get the current master clock value */
1066
static double get_master_clock(VideoState *is)
1067
{
1068
    double val;
1069

    
1070
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1071
        if (is->video_st)
1072
            val = get_video_clock(is);
1073
        else
1074
            val = get_audio_clock(is);
1075
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1076
        if (is->audio_st)
1077
            val = get_audio_clock(is);
1078
        else
1079
            val = get_video_clock(is);
1080
    } else {
1081
        val = get_external_clock(is);
1082
    }
1083
    return val;
1084
}
1085

    
1086
/* seek in the stream */
1087
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1088
{
1089
    if (!is->seek_req) {
1090
        is->seek_pos = pos;
1091
        is->seek_rel = rel;
1092
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1093
        if (seek_by_bytes)
1094
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1095
        is->seek_req = 1;
1096
    }
1097
}
1098

    
1099
/* pause or resume the video */
1100
static void stream_pause(VideoState *is)
1101
{
1102
    if (is->paused) {
1103
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1104
        if(is->read_pause_return != AVERROR(ENOSYS)){
1105
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1106
        }
1107
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1108
    }
1109
    is->paused = !is->paused;
1110
}
1111

    
1112
static double compute_target_time(double frame_current_pts, VideoState *is)
1113
{
1114
    double delay, sync_threshold, diff;
1115

    
1116
    /* compute nominal delay */
1117
    delay = frame_current_pts - is->frame_last_pts;
1118
    if (delay <= 0 || delay >= 10.0) {
1119
        /* if incorrect delay, use previous one */
1120
        delay = is->frame_last_delay;
1121
    } else {
1122
        is->frame_last_delay = delay;
1123
    }
1124
    is->frame_last_pts = frame_current_pts;
1125

    
1126
    /* update delay to follow master synchronisation source */
1127
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1128
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1129
        /* if video is slave, we try to correct big delays by
1130
           duplicating or deleting a frame */
1131
        diff = get_video_clock(is) - get_master_clock(is);
1132

    
1133
        /* skip or repeat frame. We take into account the
1134
           delay to compute the threshold. I still don't know
1135
           if it is the best guess */
1136
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1137
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1138
            if (diff <= -sync_threshold)
1139
                delay = 0;
1140
            else if (diff >= sync_threshold)
1141
                delay = 2 * delay;
1142
        }
1143
    }
1144
    is->frame_timer += delay;
1145
#if defined(DEBUG_SYNC)
1146
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1147
            delay, actual_delay, frame_current_pts, -diff);
1148
#endif
1149

    
1150
    return is->frame_timer;
1151
}
1152

    
1153
/* called to display each frame */
1154
static void video_refresh_timer(void *opaque)
1155
{
1156
    VideoState *is = opaque;
1157
    VideoPicture *vp;
1158

    
1159
    SubPicture *sp, *sp2;
1160

    
1161
    if (is->video_st) {
1162
retry:
1163
        if (is->pictq_size == 0) {
1164
            //nothing to do, no picture to display in the que
1165
        } else {
1166
            double time= av_gettime()/1000000.0;
1167
            double next_target;
1168
            /* dequeue the picture */
1169
            vp = &is->pictq[is->pictq_rindex];
1170

    
1171
            if(time < vp->target_clock)
1172
                return;
1173
            /* update current video pts */
1174
            is->video_current_pts = vp->pts;
1175
            is->video_current_pts_drift = is->video_current_pts - time;
1176
            is->video_current_pos = vp->pos;
1177
            if(is->pictq_size > 1){
1178
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1179
                assert(nextvp->target_clock >= vp->target_clock);
1180
                next_target= nextvp->target_clock;
1181
            }else{
1182
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1183
            }
1184
            if(framedrop && time > next_target){
1185
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1186
                if(is->pictq_size > 1 || time > next_target + 0.5){
1187
                    /* update queue size and signal for next picture */
1188
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1189
                        is->pictq_rindex = 0;
1190

    
1191
                    SDL_LockMutex(is->pictq_mutex);
1192
                    is->pictq_size--;
1193
                    SDL_CondSignal(is->pictq_cond);
1194
                    SDL_UnlockMutex(is->pictq_mutex);
1195
                    goto retry;
1196
                }
1197
            }
1198

    
1199
            if(is->subtitle_st) {
1200
                if (is->subtitle_stream_changed) {
1201
                    SDL_LockMutex(is->subpq_mutex);
1202

    
1203
                    while (is->subpq_size) {
1204
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1205

    
1206
                        /* update queue size and signal for next picture */
1207
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1208
                            is->subpq_rindex = 0;
1209

    
1210
                        is->subpq_size--;
1211
                    }
1212
                    is->subtitle_stream_changed = 0;
1213

    
1214
                    SDL_CondSignal(is->subpq_cond);
1215
                    SDL_UnlockMutex(is->subpq_mutex);
1216
                } else {
1217
                    if (is->subpq_size > 0) {
1218
                        sp = &is->subpq[is->subpq_rindex];
1219

    
1220
                        if (is->subpq_size > 1)
1221
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1222
                        else
1223
                            sp2 = NULL;
1224

    
1225
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1226
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1227
                        {
1228
                            free_subpicture(sp);
1229

    
1230
                            /* update queue size and signal for next picture */
1231
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1232
                                is->subpq_rindex = 0;
1233

    
1234
                            SDL_LockMutex(is->subpq_mutex);
1235
                            is->subpq_size--;
1236
                            SDL_CondSignal(is->subpq_cond);
1237
                            SDL_UnlockMutex(is->subpq_mutex);
1238
                        }
1239
                    }
1240
                }
1241
            }
1242

    
1243
            /* display picture */
1244
            if (!display_disable)
1245
                video_display(is);
1246

    
1247
            /* update queue size and signal for next picture */
1248
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249
                is->pictq_rindex = 0;
1250

    
1251
            SDL_LockMutex(is->pictq_mutex);
1252
            is->pictq_size--;
1253
            SDL_CondSignal(is->pictq_cond);
1254
            SDL_UnlockMutex(is->pictq_mutex);
1255
        }
1256
    } else if (is->audio_st) {
1257
        /* draw the next audio frame */
1258

    
1259
        /* if only audio stream, then display the audio bars (better
1260
           than nothing, just to test the implementation */
1261

    
1262
        /* display picture */
1263
        if (!display_disable)
1264
            video_display(is);
1265
    }
1266
    if (show_status) {
1267
        static int64_t last_time;
1268
        int64_t cur_time;
1269
        int aqsize, vqsize, sqsize;
1270
        double av_diff;
1271

    
1272
        cur_time = av_gettime();
1273
        if (!last_time || (cur_time - last_time) >= 30000) {
1274
            aqsize = 0;
1275
            vqsize = 0;
1276
            sqsize = 0;
1277
            if (is->audio_st)
1278
                aqsize = is->audioq.size;
1279
            if (is->video_st)
1280
                vqsize = is->videoq.size;
1281
            if (is->subtitle_st)
1282
                sqsize = is->subtitleq.size;
1283
            av_diff = 0;
1284
            if (is->audio_st && is->video_st)
1285
                av_diff = get_audio_clock(is) - get_video_clock(is);
1286
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1287
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->video_st->codec->pts_correction_num_faulty_dts, is->video_st->codec->pts_correction_num_faulty_pts);
1288
            fflush(stdout);
1289
            last_time = cur_time;
1290
        }
1291
    }
1292
}
1293

    
1294
static void stream_close(VideoState *is)
1295
{
1296
    VideoPicture *vp;
1297
    int i;
1298
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1299
    is->abort_request = 1;
1300
    SDL_WaitThread(is->parse_tid, NULL);
1301
    SDL_WaitThread(is->refresh_tid, NULL);
1302

    
1303
    /* free all pictures */
1304
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1305
        vp = &is->pictq[i];
1306
#if CONFIG_AVFILTER
1307
        if (vp->picref) {
1308
            avfilter_unref_buffer(vp->picref);
1309
            vp->picref = NULL;
1310
        }
1311
#endif
1312
        if (vp->bmp) {
1313
            SDL_FreeYUVOverlay(vp->bmp);
1314
            vp->bmp = NULL;
1315
        }
1316
    }
1317
    SDL_DestroyMutex(is->pictq_mutex);
1318
    SDL_DestroyCond(is->pictq_cond);
1319
    SDL_DestroyMutex(is->subpq_mutex);
1320
    SDL_DestroyCond(is->subpq_cond);
1321
#if !CONFIG_AVFILTER
1322
    if (is->img_convert_ctx)
1323
        sws_freeContext(is->img_convert_ctx);
1324
#endif
1325
    av_free(is);
1326
}
1327

    
1328
static void do_exit(void)
1329
{
1330
    if (cur_stream) {
1331
        stream_close(cur_stream);
1332
        cur_stream = NULL;
1333
    }
1334
    uninit_opts();
1335
#if CONFIG_AVFILTER
1336
    avfilter_uninit();
1337
#endif
1338
    if (show_status)
1339
        printf("\n");
1340
    SDL_Quit();
1341
    av_log(NULL, AV_LOG_QUIET, "");
1342
    exit(0);
1343
}
1344

    
1345
/* allocate a picture (needs to do that in main thread to avoid
1346
   potential locking problems */
1347
static void alloc_picture(void *opaque)
1348
{
1349
    VideoState *is = opaque;
1350
    VideoPicture *vp;
1351

    
1352
    vp = &is->pictq[is->pictq_windex];
1353

    
1354
    if (vp->bmp)
1355
        SDL_FreeYUVOverlay(vp->bmp);
1356

    
1357
#if CONFIG_AVFILTER
1358
    if (vp->picref)
1359
        avfilter_unref_buffer(vp->picref);
1360
    vp->picref = NULL;
1361

    
1362
    vp->width   = is->out_video_filter->inputs[0]->w;
1363
    vp->height  = is->out_video_filter->inputs[0]->h;
1364
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1365
#else
1366
    vp->width   = is->video_st->codec->width;
1367
    vp->height  = is->video_st->codec->height;
1368
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1369
#endif
1370

    
1371
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1372
                                   SDL_YV12_OVERLAY,
1373
                                   screen);
1374
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1375
        /* SDL allocates a buffer smaller than requested if the video
1376
         * overlay hardware is unable to support the requested size. */
1377
        fprintf(stderr, "Error: the video system does not support an image\n"
1378
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1379
                        "to reduce the image size.\n", vp->width, vp->height );
1380
        do_exit();
1381
    }
1382

    
1383
    SDL_LockMutex(is->pictq_mutex);
1384
    vp->allocated = 1;
1385
    SDL_CondSignal(is->pictq_cond);
1386
    SDL_UnlockMutex(is->pictq_mutex);
1387
}
1388

    
1389
/**
1390
 *
1391
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1392
 */
1393
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1394
{
1395
    VideoPicture *vp;
1396
    int dst_pix_fmt;
1397
#if CONFIG_AVFILTER
1398
    AVPicture pict_src;
1399
#endif
1400
    /* wait until we have space to put a new picture */
1401
    SDL_LockMutex(is->pictq_mutex);
1402

    
1403
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1404
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1405

    
1406
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1407
           !is->videoq.abort_request) {
1408
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1409
    }
1410
    SDL_UnlockMutex(is->pictq_mutex);
1411

    
1412
    if (is->videoq.abort_request)
1413
        return -1;
1414

    
1415
    vp = &is->pictq[is->pictq_windex];
1416

    
1417
    /* alloc or resize hardware picture buffer */
1418
    if (!vp->bmp ||
1419
#if CONFIG_AVFILTER
1420
        vp->width  != is->out_video_filter->inputs[0]->w ||
1421
        vp->height != is->out_video_filter->inputs[0]->h) {
1422
#else
1423
        vp->width != is->video_st->codec->width ||
1424
        vp->height != is->video_st->codec->height) {
1425
#endif
1426
        SDL_Event event;
1427

    
1428
        vp->allocated = 0;
1429

    
1430
        /* the allocation must be done in the main thread to avoid
1431
           locking problems */
1432
        event.type = FF_ALLOC_EVENT;
1433
        event.user.data1 = is;
1434
        SDL_PushEvent(&event);
1435

    
1436
        /* wait until the picture is allocated */
1437
        SDL_LockMutex(is->pictq_mutex);
1438
        while (!vp->allocated && !is->videoq.abort_request) {
1439
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1440
        }
1441
        SDL_UnlockMutex(is->pictq_mutex);
1442

    
1443
        if (is->videoq.abort_request)
1444
            return -1;
1445
    }
1446

    
1447
    /* if the frame is not skipped, then display it */
1448
    if (vp->bmp) {
1449
        AVPicture pict;
1450
#if CONFIG_AVFILTER
1451
        if(vp->picref)
1452
            avfilter_unref_buffer(vp->picref);
1453
        vp->picref = src_frame->opaque;
1454
#endif
1455

    
1456
        /* get a pointer on the bitmap */
1457
        SDL_LockYUVOverlay (vp->bmp);
1458

    
1459
        dst_pix_fmt = PIX_FMT_YUV420P;
1460
        memset(&pict,0,sizeof(AVPicture));
1461
        pict.data[0] = vp->bmp->pixels[0];
1462
        pict.data[1] = vp->bmp->pixels[2];
1463
        pict.data[2] = vp->bmp->pixels[1];
1464

    
1465
        pict.linesize[0] = vp->bmp->pitches[0];
1466
        pict.linesize[1] = vp->bmp->pitches[2];
1467
        pict.linesize[2] = vp->bmp->pitches[1];
1468

    
1469
#if CONFIG_AVFILTER
1470
        pict_src.data[0] = src_frame->data[0];
1471
        pict_src.data[1] = src_frame->data[1];
1472
        pict_src.data[2] = src_frame->data[2];
1473

    
1474
        pict_src.linesize[0] = src_frame->linesize[0];
1475
        pict_src.linesize[1] = src_frame->linesize[1];
1476
        pict_src.linesize[2] = src_frame->linesize[2];
1477

    
1478
        //FIXME use direct rendering
1479
        av_picture_copy(&pict, &pict_src,
1480
                        vp->pix_fmt, vp->width, vp->height);
1481
#else
1482
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1483
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1484
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1485
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1486
        if (is->img_convert_ctx == NULL) {
1487
            fprintf(stderr, "Cannot initialize the conversion context\n");
1488
            exit(1);
1489
        }
1490
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1491
                  0, vp->height, pict.data, pict.linesize);
1492
#endif
1493
        /* update the bitmap content */
1494
        SDL_UnlockYUVOverlay(vp->bmp);
1495

    
1496
        vp->pts = pts;
1497
        vp->pos = pos;
1498

    
1499
        /* now we can update the picture count */
1500
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1501
            is->pictq_windex = 0;
1502
        SDL_LockMutex(is->pictq_mutex);
1503
        vp->target_clock= compute_target_time(vp->pts, is);
1504

    
1505
        is->pictq_size++;
1506
        SDL_UnlockMutex(is->pictq_mutex);
1507
    }
1508
    return 0;
1509
}
1510

    
1511
/**
1512
 * compute the exact PTS for the picture if it is omitted in the stream
1513
 * @param pts1 the dts of the pkt / pts of the frame
1514
 */
1515
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1516
{
1517
    double frame_delay, pts;
1518

    
1519
    pts = pts1;
1520

    
1521
    if (pts != 0) {
1522
        /* update video clock with pts, if present */
1523
        is->video_clock = pts;
1524
    } else {
1525
        pts = is->video_clock;
1526
    }
1527
    /* update video clock for next frame */
1528
    frame_delay = av_q2d(is->video_st->codec->time_base);
1529
    /* for MPEG2, the frame can be repeated, so we update the
1530
       clock accordingly */
1531
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1532
    is->video_clock += frame_delay;
1533

    
1534
#if defined(DEBUG_SYNC) && 0
1535
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1536
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1537
#endif
1538
    return queue_picture(is, src_frame, pts, pos);
1539
}
1540

    
1541
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1542
{
1543
    int len1, got_picture, i;
1544

    
1545
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1546
        return -1;
1547

    
1548
    if (pkt->data == flush_pkt.data) {
1549
        avcodec_flush_buffers(is->video_st->codec);
1550

    
1551
        SDL_LockMutex(is->pictq_mutex);
1552
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1553
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1554
            is->pictq[i].target_clock= 0;
1555
        }
1556
        while (is->pictq_size && !is->videoq.abort_request) {
1557
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1558
        }
1559
        is->video_current_pos = -1;
1560
        SDL_UnlockMutex(is->pictq_mutex);
1561

    
1562
        is->frame_last_pts = AV_NOPTS_VALUE;
1563
        is->frame_last_delay = 0;
1564
        is->frame_timer = (double)av_gettime() / 1000000.0;
1565
        is->skip_frames = 1;
1566
        is->skip_frames_index = 0;
1567
        return 0;
1568
    }
1569

    
1570
    len1 = avcodec_decode_video2(is->video_st->codec,
1571
                                 frame, &got_picture,
1572
                                 pkt);
1573

    
1574
    if (got_picture) {
1575
        if (decoder_reorder_pts == -1) {
1576
            *pts = frame->best_effort_timestamp;
1577
        } else if (decoder_reorder_pts) {
1578
            *pts = frame->pkt_pts;
1579
        } else {
1580
            *pts = frame->pkt_dts;
1581
        }
1582

    
1583
        if (*pts == AV_NOPTS_VALUE) {
1584
            *pts = 0;
1585
        }
1586

    
1587
        is->skip_frames_index += 1;
1588
        if(is->skip_frames_index >= is->skip_frames){
1589
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1590
            return 1;
1591
        }
1592

    
1593
    }
1594
    return 0;
1595
}
1596

    
1597
#if CONFIG_AVFILTER
1598
typedef struct {
1599
    VideoState *is;
1600
    AVFrame *frame;
1601
    int use_dr1;
1602
} FilterPriv;
1603

    
1604
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1605
{
1606
    AVFilterContext *ctx = codec->opaque;
1607
    AVFilterBufferRef  *ref;
1608
    int perms = AV_PERM_WRITE;
1609
    int i, w, h, stride[4];
1610
    unsigned edge;
1611

    
1612
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1613
        perms |= AV_PERM_NEG_LINESIZES;
1614

    
1615
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1616
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1617
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1618
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1619
    }
1620
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1621

    
1622
    w = codec->width;
1623
    h = codec->height;
1624
    avcodec_align_dimensions2(codec, &w, &h, stride);
1625
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1626
    w += edge << 1;
1627
    h += edge << 1;
1628

    
1629
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1630
        return -1;
1631

    
1632
    ref->video->w = codec->width;
1633
    ref->video->h = codec->height;
1634
    for(i = 0; i < 4; i ++) {
1635
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1636
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1637

    
1638
        if (ref->data[i]) {
1639
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1640
        }
1641
        pic->data[i]     = ref->data[i];
1642
        pic->linesize[i] = ref->linesize[i];
1643
    }
1644
    pic->opaque = ref;
1645
    pic->age    = INT_MAX;
1646
    pic->type   = FF_BUFFER_TYPE_USER;
1647
    pic->reordered_opaque = codec->reordered_opaque;
1648
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1649
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1650
    return 0;
1651
}
1652

    
1653
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1654
{
1655
    memset(pic->data, 0, sizeof(pic->data));
1656
    avfilter_unref_buffer(pic->opaque);
1657
}
1658

    
1659
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1660
{
1661
    AVFilterBufferRef *ref = pic->opaque;
1662

    
1663
    if (pic->data[0] == NULL) {
1664
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1665
        return codec->get_buffer(codec, pic);
1666
    }
1667

    
1668
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1669
        (codec->pix_fmt != ref->format)) {
1670
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1671
        return -1;
1672
    }
1673

    
1674
    pic->reordered_opaque = codec->reordered_opaque;
1675
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1676
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1677
    return 0;
1678
}
1679

    
1680
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1681
{
1682
    FilterPriv *priv = ctx->priv;
1683
    AVCodecContext *codec;
1684
    if(!opaque) return -1;
1685

    
1686
    priv->is = opaque;
1687
    codec    = priv->is->video_st->codec;
1688
    codec->opaque = ctx;
1689
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1690
        priv->use_dr1 = 1;
1691
        codec->get_buffer     = input_get_buffer;
1692
        codec->release_buffer = input_release_buffer;
1693
        codec->reget_buffer   = input_reget_buffer;
1694
        codec->thread_safe_callbacks = 1;
1695
    }
1696

    
1697
    priv->frame = avcodec_alloc_frame();
1698

    
1699
    return 0;
1700
}
1701

    
1702
static void input_uninit(AVFilterContext *ctx)
1703
{
1704
    FilterPriv *priv = ctx->priv;
1705
    av_free(priv->frame);
1706
}
1707

    
1708
static int input_request_frame(AVFilterLink *link)
1709
{
1710
    FilterPriv *priv = link->src->priv;
1711
    AVFilterBufferRef *picref;
1712
    int64_t pts = 0;
1713
    AVPacket pkt;
1714
    int ret;
1715

    
1716
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1717
        av_free_packet(&pkt);
1718
    if (ret < 0)
1719
        return -1;
1720

    
1721
    if(priv->use_dr1) {
1722
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1723
    } else {
1724
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1725
        av_image_copy(picref->data, picref->linesize,
1726
                      priv->frame->data, priv->frame->linesize,
1727
                      picref->format, link->w, link->h);
1728
    }
1729
    av_free_packet(&pkt);
1730

    
1731
    picref->pts = pts;
1732
    picref->pos = pkt.pos;
1733
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1734
    avfilter_start_frame(link, picref);
1735
    avfilter_draw_slice(link, 0, link->h, 1);
1736
    avfilter_end_frame(link);
1737

    
1738
    return 0;
1739
}
1740

    
1741
static int input_query_formats(AVFilterContext *ctx)
1742
{
1743
    FilterPriv *priv = ctx->priv;
1744
    enum PixelFormat pix_fmts[] = {
1745
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1746
    };
1747

    
1748
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1749
    return 0;
1750
}
1751

    
1752
static int input_config_props(AVFilterLink *link)
1753
{
1754
    FilterPriv *priv  = link->src->priv;
1755
    AVCodecContext *c = priv->is->video_st->codec;
1756

    
1757
    link->w = c->width;
1758
    link->h = c->height;
1759
    link->time_base = priv->is->video_st->time_base;
1760

    
1761
    return 0;
1762
}
1763

    
1764
static AVFilter input_filter =
1765
{
1766
    .name      = "ffplay_input",
1767

    
1768
    .priv_size = sizeof(FilterPriv),
1769

    
1770
    .init      = input_init,
1771
    .uninit    = input_uninit,
1772

    
1773
    .query_formats = input_query_formats,
1774

    
1775
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1776
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1777
                                    .type = AVMEDIA_TYPE_VIDEO,
1778
                                    .request_frame = input_request_frame,
1779
                                    .config_props  = input_config_props, },
1780
                                  { .name = NULL }},
1781
};
1782

    
1783
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1784
{
1785
    char sws_flags_str[128];
1786
    int ret;
1787
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1788
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1789
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1790
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1791

    
1792
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1793
                                            NULL, is, graph)) < 0)
1794
        goto the_end;
1795
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1796
                                            NULL, &ffsink_ctx, graph)) < 0)
1797
        goto the_end;
1798

    
1799
    if(vfilters) {
1800
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1801
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1802

    
1803
        outputs->name    = av_strdup("in");
1804
        outputs->filter_ctx = filt_src;
1805
        outputs->pad_idx = 0;
1806
        outputs->next    = NULL;
1807

    
1808
        inputs->name    = av_strdup("out");
1809
        inputs->filter_ctx = filt_out;
1810
        inputs->pad_idx = 0;
1811
        inputs->next    = NULL;
1812

    
1813
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1814
            goto the_end;
1815
        av_freep(&vfilters);
1816
    } else {
1817
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1818
            goto the_end;
1819
    }
1820

    
1821
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1822
        goto the_end;
1823

    
1824
    is->out_video_filter = filt_out;
1825
the_end:
1826
    return ret;
1827
}
1828

    
1829
#endif  /* CONFIG_AVFILTER */
1830

    
1831
static int video_thread(void *arg)
1832
{
1833
    VideoState *is = arg;
1834
    AVFrame *frame= avcodec_alloc_frame();
1835
    int64_t pts_int;
1836
    double pts;
1837
    int ret;
1838

    
1839
#if CONFIG_AVFILTER
1840
    AVFilterGraph *graph = avfilter_graph_alloc();
1841
    AVFilterContext *filt_out = NULL;
1842
    int64_t pos;
1843

    
1844
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1845
        goto the_end;
1846
    filt_out = is->out_video_filter;
1847
#endif
1848

    
1849
    for(;;) {
1850
#if !CONFIG_AVFILTER
1851
        AVPacket pkt;
1852
#else
1853
        AVFilterBufferRef *picref;
1854
        AVRational tb;
1855
#endif
1856
        while (is->paused && !is->videoq.abort_request)
1857
            SDL_Delay(10);
1858
#if CONFIG_AVFILTER
1859
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1860
        if (picref) {
1861
            pts_int = picref->pts;
1862
            pos     = picref->pos;
1863
            frame->opaque = picref;
1864
        }
1865

    
1866
        if (av_cmp_q(tb, is->video_st->time_base)) {
1867
            av_unused int64_t pts1 = pts_int;
1868
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1869
            av_dlog(NULL, "video_thread(): "
1870
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1871
                    tb.num, tb.den, pts1,
1872
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1873
        }
1874
#else
1875
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1876
#endif
1877

    
1878
        if (ret < 0) goto the_end;
1879

    
1880
        if (!ret)
1881
            continue;
1882

    
1883
        pts = pts_int*av_q2d(is->video_st->time_base);
1884

    
1885
#if CONFIG_AVFILTER
1886
        ret = output_picture2(is, frame, pts, pos);
1887
#else
1888
        ret = output_picture2(is, frame, pts,  pkt.pos);
1889
        av_free_packet(&pkt);
1890
#endif
1891
        if (ret < 0)
1892
            goto the_end;
1893

    
1894
        if (step)
1895
            if (cur_stream)
1896
                stream_pause(cur_stream);
1897
    }
1898
 the_end:
1899
#if CONFIG_AVFILTER
1900
    avfilter_graph_free(&graph);
1901
#endif
1902
    av_free(frame);
1903
    return 0;
1904
}
1905

    
1906
static int subtitle_thread(void *arg)
1907
{
1908
    VideoState *is = arg;
1909
    SubPicture *sp;
1910
    AVPacket pkt1, *pkt = &pkt1;
1911
    int len1, got_subtitle;
1912
    double pts;
1913
    int i, j;
1914
    int r, g, b, y, u, v, a;
1915

    
1916
    for(;;) {
1917
        while (is->paused && !is->subtitleq.abort_request) {
1918
            SDL_Delay(10);
1919
        }
1920
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1921
            break;
1922

    
1923
        if(pkt->data == flush_pkt.data){
1924
            avcodec_flush_buffers(is->subtitle_st->codec);
1925
            continue;
1926
        }
1927
        SDL_LockMutex(is->subpq_mutex);
1928
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1929
               !is->subtitleq.abort_request) {
1930
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1931
        }
1932
        SDL_UnlockMutex(is->subpq_mutex);
1933

    
1934
        if (is->subtitleq.abort_request)
1935
            goto the_end;
1936

    
1937
        sp = &is->subpq[is->subpq_windex];
1938

    
1939
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1940
           this packet, if any */
1941
        pts = 0;
1942
        if (pkt->pts != AV_NOPTS_VALUE)
1943
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1944

    
1945
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1946
                                    &sp->sub, &got_subtitle,
1947
                                    pkt);
1948
//            if (len1 < 0)
1949
//                break;
1950
        if (got_subtitle && sp->sub.format == 0) {
1951
            sp->pts = pts;
1952

    
1953
            for (i = 0; i < sp->sub.num_rects; i++)
1954
            {
1955
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1956
                {
1957
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1958
                    y = RGB_TO_Y_CCIR(r, g, b);
1959
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1960
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1961
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1962
                }
1963
            }
1964

    
1965
            /* now we can update the picture count */
1966
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1967
                is->subpq_windex = 0;
1968
            SDL_LockMutex(is->subpq_mutex);
1969
            is->subpq_size++;
1970
            SDL_UnlockMutex(is->subpq_mutex);
1971
        }
1972
        av_free_packet(pkt);
1973
//        if (step)
1974
//            if (cur_stream)
1975
//                stream_pause(cur_stream);
1976
    }
1977
 the_end:
1978
    return 0;
1979
}
1980

    
1981
/* copy samples for viewing in editor window */
1982
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1983
{
1984
    int size, len, channels;
1985

    
1986
    channels = is->audio_st->codec->channels;
1987

    
1988
    size = samples_size / sizeof(short);
1989
    while (size > 0) {
1990
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1991
        if (len > size)
1992
            len = size;
1993
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1994
        samples += len;
1995
        is->sample_array_index += len;
1996
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1997
            is->sample_array_index = 0;
1998
        size -= len;
1999
    }
2000
}
2001

    
2002
/* return the new audio buffer size (samples can be added or deleted
2003
   to get better sync if video or external master clock) */
2004
static int synchronize_audio(VideoState *is, short *samples,
2005
                             int samples_size1, double pts)
2006
{
2007
    int n, samples_size;
2008
    double ref_clock;
2009

    
2010
    n = 2 * is->audio_st->codec->channels;
2011
    samples_size = samples_size1;
2012

    
2013
    /* if not master, then we try to remove or add samples to correct the clock */
2014
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2015
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2016
        double diff, avg_diff;
2017
        int wanted_size, min_size, max_size, nb_samples;
2018

    
2019
        ref_clock = get_master_clock(is);
2020
        diff = get_audio_clock(is) - ref_clock;
2021

    
2022
        if (diff < AV_NOSYNC_THRESHOLD) {
2023
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2024
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2025
                /* not enough measures to have a correct estimate */
2026
                is->audio_diff_avg_count++;
2027
            } else {
2028
                /* estimate the A-V difference */
2029
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2030

    
2031
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2032
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2033
                    nb_samples = samples_size / n;
2034

    
2035
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2036
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2037
                    if (wanted_size < min_size)
2038
                        wanted_size = min_size;
2039
                    else if (wanted_size > max_size)
2040
                        wanted_size = max_size;
2041

    
2042
                    /* add or remove samples to correction the synchro */
2043
                    if (wanted_size < samples_size) {
2044
                        /* remove samples */
2045
                        samples_size = wanted_size;
2046
                    } else if (wanted_size > samples_size) {
2047
                        uint8_t *samples_end, *q;
2048
                        int nb;
2049

    
2050
                        /* add samples */
2051
                        nb = (samples_size - wanted_size);
2052
                        samples_end = (uint8_t *)samples + samples_size - n;
2053
                        q = samples_end + n;
2054
                        while (nb > 0) {
2055
                            memcpy(q, samples_end, n);
2056
                            q += n;
2057
                            nb -= n;
2058
                        }
2059
                        samples_size = wanted_size;
2060
                    }
2061
                }
2062
#if 0
2063
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2064
                       diff, avg_diff, samples_size - samples_size1,
2065
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2066
#endif
2067
            }
2068
        } else {
2069
            /* too big difference : may be initial PTS errors, so
2070
               reset A-V filter */
2071
            is->audio_diff_avg_count = 0;
2072
            is->audio_diff_cum = 0;
2073
        }
2074
    }
2075

    
2076
    return samples_size;
2077
}
2078

    
2079
/* decode one audio frame and returns its uncompressed size */
2080
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2081
{
2082
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2083
    AVPacket *pkt = &is->audio_pkt;
2084
    AVCodecContext *dec= is->audio_st->codec;
2085
    int n, len1, data_size;
2086
    double pts;
2087

    
2088
    for(;;) {
2089
        /* NOTE: the audio packet can contain several frames */
2090
        while (pkt_temp->size > 0) {
2091
            data_size = sizeof(is->audio_buf1);
2092
            len1 = avcodec_decode_audio3(dec,
2093
                                        (int16_t *)is->audio_buf1, &data_size,
2094
                                        pkt_temp);
2095
            if (len1 < 0) {
2096
                /* if error, we skip the frame */
2097
                pkt_temp->size = 0;
2098
                break;
2099
            }
2100

    
2101
            pkt_temp->data += len1;
2102
            pkt_temp->size -= len1;
2103
            if (data_size <= 0)
2104
                continue;
2105

    
2106
            if (dec->sample_fmt != is->audio_src_fmt) {
2107
                if (is->reformat_ctx)
2108
                    av_audio_convert_free(is->reformat_ctx);
2109
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2110
                                                         dec->sample_fmt, 1, NULL, 0);
2111
                if (!is->reformat_ctx) {
2112
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2113
                        av_get_sample_fmt_name(dec->sample_fmt),
2114
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2115
                        break;
2116
                }
2117
                is->audio_src_fmt= dec->sample_fmt;
2118
            }
2119

    
2120
            if (is->reformat_ctx) {
2121
                const void *ibuf[6]= {is->audio_buf1};
2122
                void *obuf[6]= {is->audio_buf2};
2123
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2124
                int ostride[6]= {2};
2125
                int len= data_size/istride[0];
2126
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2127
                    printf("av_audio_convert() failed\n");
2128
                    break;
2129
                }
2130
                is->audio_buf= is->audio_buf2;
2131
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2132
                          remove this legacy cruft */
2133
                data_size= len*2;
2134
            }else{
2135
                is->audio_buf= is->audio_buf1;
2136
            }
2137

    
2138
            /* if no pts, then compute it */
2139
            pts = is->audio_clock;
2140
            *pts_ptr = pts;
2141
            n = 2 * dec->channels;
2142
            is->audio_clock += (double)data_size /
2143
                (double)(n * dec->sample_rate);
2144
#if defined(DEBUG_SYNC)
2145
            {
2146
                static double last_clock;
2147
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2148
                       is->audio_clock - last_clock,
2149
                       is->audio_clock, pts);
2150
                last_clock = is->audio_clock;
2151
            }
2152
#endif
2153
            return data_size;
2154
        }
2155

    
2156
        /* free the current packet */
2157
        if (pkt->data)
2158
            av_free_packet(pkt);
2159

    
2160
        if (is->paused || is->audioq.abort_request) {
2161
            return -1;
2162
        }
2163

    
2164
        /* read next packet */
2165
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2166
            return -1;
2167
        if(pkt->data == flush_pkt.data){
2168
            avcodec_flush_buffers(dec);
2169
            continue;
2170
        }
2171

    
2172
        pkt_temp->data = pkt->data;
2173
        pkt_temp->size = pkt->size;
2174

    
2175
        /* if update the audio clock with the pts */
2176
        if (pkt->pts != AV_NOPTS_VALUE) {
2177
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2178
        }
2179
    }
2180
}
2181

    
2182
/* get the current audio output buffer size, in samples. With SDL, we
2183
   cannot have a precise information */
2184
static int audio_write_get_buf_size(VideoState *is)
2185
{
2186
    return is->audio_buf_size - is->audio_buf_index;
2187
}
2188

    
2189

    
2190
/* prepare a new audio buffer */
2191
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2192
{
2193
    VideoState *is = opaque;
2194
    int audio_size, len1;
2195
    double pts;
2196

    
2197
    audio_callback_time = av_gettime();
2198

    
2199
    while (len > 0) {
2200
        if (is->audio_buf_index >= is->audio_buf_size) {
2201
           audio_size = audio_decode_frame(is, &pts);
2202
           if (audio_size < 0) {
2203
                /* if error, just output silence */
2204
               is->audio_buf = is->audio_buf1;
2205
               is->audio_buf_size = 1024;
2206
               memset(is->audio_buf, 0, is->audio_buf_size);
2207
           } else {
2208
               if (is->show_audio)
2209
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2210
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2211
                                              pts);
2212
               is->audio_buf_size = audio_size;
2213
           }
2214
           is->audio_buf_index = 0;
2215
        }
2216
        len1 = is->audio_buf_size - is->audio_buf_index;
2217
        if (len1 > len)
2218
            len1 = len;
2219
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2220
        len -= len1;
2221
        stream += len1;
2222
        is->audio_buf_index += len1;
2223
    }
2224
}
2225

    
2226
/* open a given stream. Return 0 if OK */
2227
static int stream_component_open(VideoState *is, int stream_index)
2228
{
2229
    AVFormatContext *ic = is->ic;
2230
    AVCodecContext *avctx;
2231
    AVCodec *codec;
2232
    SDL_AudioSpec wanted_spec, spec;
2233

    
2234
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2235
        return -1;
2236
    avctx = ic->streams[stream_index]->codec;
2237

    
2238
    /* prepare audio output */
2239
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2240
        if (avctx->channels > 0) {
2241
            avctx->request_channels = FFMIN(2, avctx->channels);
2242
        } else {
2243
            avctx->request_channels = 2;
2244
        }
2245
    }
2246

    
2247
    codec = avcodec_find_decoder(avctx->codec_id);
2248
    avctx->debug_mv = debug_mv;
2249
    avctx->debug = debug;
2250
    avctx->workaround_bugs = workaround_bugs;
2251
    avctx->lowres = lowres;
2252
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2253
    avctx->idct_algo= idct;
2254
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2255
    avctx->skip_frame= skip_frame;
2256
    avctx->skip_idct= skip_idct;
2257
    avctx->skip_loop_filter= skip_loop_filter;
2258
    avctx->error_recognition= error_recognition;
2259
    avctx->error_concealment= error_concealment;
2260
    avctx->thread_count= thread_count;
2261

    
2262
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2263

    
2264
    if (!codec ||
2265
        avcodec_open(avctx, codec) < 0)
2266
        return -1;
2267

    
2268
    /* prepare audio output */
2269
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2270
        wanted_spec.freq = avctx->sample_rate;
2271
        wanted_spec.format = AUDIO_S16SYS;
2272
        wanted_spec.channels = avctx->channels;
2273
        wanted_spec.silence = 0;
2274
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2275
        wanted_spec.callback = sdl_audio_callback;
2276
        wanted_spec.userdata = is;
2277
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2278
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2279
            return -1;
2280
        }
2281
        is->audio_hw_buf_size = spec.size;
2282
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2283
    }
2284

    
2285
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2286
    switch(avctx->codec_type) {
2287
    case AVMEDIA_TYPE_AUDIO:
2288
        is->audio_stream = stream_index;
2289
        is->audio_st = ic->streams[stream_index];
2290
        is->audio_buf_size = 0;
2291
        is->audio_buf_index = 0;
2292

    
2293
        /* init averaging filter */
2294
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2295
        is->audio_diff_avg_count = 0;
2296
        /* since we do not have a precise anough audio fifo fullness,
2297
           we correct audio sync only if larger than this threshold */
2298
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2299

    
2300
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2301
        packet_queue_init(&is->audioq);
2302
        SDL_PauseAudio(0);
2303
        break;
2304
    case AVMEDIA_TYPE_VIDEO:
2305
        is->video_stream = stream_index;
2306
        is->video_st = ic->streams[stream_index];
2307

    
2308
//        is->video_current_pts_time = av_gettime();
2309

    
2310
        packet_queue_init(&is->videoq);
2311
        is->video_tid = SDL_CreateThread(video_thread, is);
2312
        break;
2313
    case AVMEDIA_TYPE_SUBTITLE:
2314
        is->subtitle_stream = stream_index;
2315
        is->subtitle_st = ic->streams[stream_index];
2316
        packet_queue_init(&is->subtitleq);
2317

    
2318
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2319
        break;
2320
    default:
2321
        break;
2322
    }
2323
    return 0;
2324
}
2325

    
2326
static void stream_component_close(VideoState *is, int stream_index)
2327
{
2328
    AVFormatContext *ic = is->ic;
2329
    AVCodecContext *avctx;
2330

    
2331
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2332
        return;
2333
    avctx = ic->streams[stream_index]->codec;
2334

    
2335
    switch(avctx->codec_type) {
2336
    case AVMEDIA_TYPE_AUDIO:
2337
        packet_queue_abort(&is->audioq);
2338

    
2339
        SDL_CloseAudio();
2340

    
2341
        packet_queue_end(&is->audioq);
2342
        if (is->reformat_ctx)
2343
            av_audio_convert_free(is->reformat_ctx);
2344
        is->reformat_ctx = NULL;
2345
        break;
2346
    case AVMEDIA_TYPE_VIDEO:
2347
        packet_queue_abort(&is->videoq);
2348

    
2349
        /* note: we also signal this mutex to make sure we deblock the
2350
           video thread in all cases */
2351
        SDL_LockMutex(is->pictq_mutex);
2352
        SDL_CondSignal(is->pictq_cond);
2353
        SDL_UnlockMutex(is->pictq_mutex);
2354

    
2355
        SDL_WaitThread(is->video_tid, NULL);
2356

    
2357
        packet_queue_end(&is->videoq);
2358
        break;
2359
    case AVMEDIA_TYPE_SUBTITLE:
2360
        packet_queue_abort(&is->subtitleq);
2361

    
2362
        /* note: we also signal this mutex to make sure we deblock the
2363
           video thread in all cases */
2364
        SDL_LockMutex(is->subpq_mutex);
2365
        is->subtitle_stream_changed = 1;
2366

    
2367
        SDL_CondSignal(is->subpq_cond);
2368
        SDL_UnlockMutex(is->subpq_mutex);
2369

    
2370
        SDL_WaitThread(is->subtitle_tid, NULL);
2371

    
2372
        packet_queue_end(&is->subtitleq);
2373
        break;
2374
    default:
2375
        break;
2376
    }
2377

    
2378
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2379
    avcodec_close(avctx);
2380
    switch(avctx->codec_type) {
2381
    case AVMEDIA_TYPE_AUDIO:
2382
        is->audio_st = NULL;
2383
        is->audio_stream = -1;
2384
        break;
2385
    case AVMEDIA_TYPE_VIDEO:
2386
        is->video_st = NULL;
2387
        is->video_stream = -1;
2388
        break;
2389
    case AVMEDIA_TYPE_SUBTITLE:
2390
        is->subtitle_st = NULL;
2391
        is->subtitle_stream = -1;
2392
        break;
2393
    default:
2394
        break;
2395
    }
2396
}
2397

    
2398
/* since we have only one decoding thread, we can use a global
2399
   variable instead of a thread local variable */
2400
static VideoState *global_video_state;
2401

    
2402
static int decode_interrupt_cb(void)
2403
{
2404
    return (global_video_state && global_video_state->abort_request);
2405
}
2406

    
2407
/* this thread gets the stream from the disk or the network */
2408
static int decode_thread(void *arg)
2409
{
2410
    VideoState *is = arg;
2411
    AVFormatContext *ic;
2412
    int err, i, ret;
2413
    int st_index[AVMEDIA_TYPE_NB];
2414
    AVPacket pkt1, *pkt = &pkt1;
2415
    AVFormatParameters params, *ap = &params;
2416
    int eof=0;
2417
    int pkt_in_play_range = 0;
2418

    
2419
    ic = avformat_alloc_context();
2420

    
2421
    memset(st_index, -1, sizeof(st_index));
2422
    is->video_stream = -1;
2423
    is->audio_stream = -1;
2424
    is->subtitle_stream = -1;
2425

    
2426
    global_video_state = is;
2427
    url_set_interrupt_cb(decode_interrupt_cb);
2428

    
2429
    memset(ap, 0, sizeof(*ap));
2430

    
2431
    ap->prealloced_context = 1;
2432
    ap->width = frame_width;
2433
    ap->height= frame_height;
2434
    ap->time_base= (AVRational){1, 25};
2435
    ap->pix_fmt = frame_pix_fmt;
2436

    
2437
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2438

    
2439
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2440
    if (err < 0) {
2441
        print_error(is->filename, err);
2442
        ret = -1;
2443
        goto fail;
2444
    }
2445
    is->ic = ic;
2446

    
2447
    if(genpts)
2448
        ic->flags |= AVFMT_FLAG_GENPTS;
2449

    
2450
    err = av_find_stream_info(ic);
2451
    if (err < 0) {
2452
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2453
        ret = -1;
2454
        goto fail;
2455
    }
2456
    if(ic->pb)
2457
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2458

    
2459
    if(seek_by_bytes<0)
2460
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2461

    
2462
    /* if seeking requested, we execute it */
2463
    if (start_time != AV_NOPTS_VALUE) {
2464
        int64_t timestamp;
2465

    
2466
        timestamp = start_time;
2467
        /* add the stream start time */
2468
        if (ic->start_time != AV_NOPTS_VALUE)
2469
            timestamp += ic->start_time;
2470
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2471
        if (ret < 0) {
2472
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2473
                    is->filename, (double)timestamp / AV_TIME_BASE);
2474
        }
2475
    }
2476

    
2477
    for (i = 0; i < ic->nb_streams; i++)
2478
        ic->streams[i]->discard = AVDISCARD_ALL;
2479
    if (!video_disable)
2480
        st_index[AVMEDIA_TYPE_VIDEO] =
2481
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2482
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2483
    if (!audio_disable)
2484
        st_index[AVMEDIA_TYPE_AUDIO] =
2485
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2486
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2487
                                st_index[AVMEDIA_TYPE_VIDEO],
2488
                                NULL, 0);
2489
    if (!video_disable)
2490
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2491
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2492
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2493
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2494
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2495
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2496
                                NULL, 0);
2497
    if (show_status) {
2498
        dump_format(ic, 0, is->filename, 0);
2499
    }
2500

    
2501
    /* open the streams */
2502
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2503
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2504
    }
2505

    
2506
    ret=-1;
2507
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2508
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2509
    }
2510
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2511
    if(ret<0) {
2512
        if (!display_disable)
2513
            is->show_audio = 2;
2514
    }
2515

    
2516
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2517
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2518
    }
2519

    
2520
    if (is->video_stream < 0 && is->audio_stream < 0) {
2521
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2522
        ret = -1;
2523
        goto fail;
2524
    }
2525

    
2526
    for(;;) {
2527
        if (is->abort_request)
2528
            break;
2529
        if (is->paused != is->last_paused) {
2530
            is->last_paused = is->paused;
2531
            if (is->paused)
2532
                is->read_pause_return= av_read_pause(ic);
2533
            else
2534
                av_read_play(ic);
2535
        }
2536
#if CONFIG_RTSP_DEMUXER
2537
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2538
            /* wait 10 ms to avoid trying to get another packet */
2539
            /* XXX: horrible */
2540
            SDL_Delay(10);
2541
            continue;
2542
        }
2543
#endif
2544
        if (is->seek_req) {
2545
            int64_t seek_target= is->seek_pos;
2546
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2547
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2548
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2549
//      of the seek_pos/seek_rel variables
2550

    
2551
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2552
            if (ret < 0) {
2553
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2554
            }else{
2555
                if (is->audio_stream >= 0) {
2556
                    packet_queue_flush(&is->audioq);
2557
                    packet_queue_put(&is->audioq, &flush_pkt);
2558
                }
2559
                if (is->subtitle_stream >= 0) {
2560
                    packet_queue_flush(&is->subtitleq);
2561
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2562
                }
2563
                if (is->video_stream >= 0) {
2564
                    packet_queue_flush(&is->videoq);
2565
                    packet_queue_put(&is->videoq, &flush_pkt);
2566
                }
2567
            }
2568
            is->seek_req = 0;
2569
            eof= 0;
2570
        }
2571

    
2572
        /* if the queue are full, no need to read more */
2573
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2574
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2575
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2576
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2577
            /* wait 10 ms */
2578
            SDL_Delay(10);
2579
            continue;
2580
        }
2581
        if(eof) {
2582
            if(is->video_stream >= 0){
2583
                av_init_packet(pkt);
2584
                pkt->data=NULL;
2585
                pkt->size=0;
2586
                pkt->stream_index= is->video_stream;
2587
                packet_queue_put(&is->videoq, pkt);
2588
            }
2589
            SDL_Delay(10);
2590
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2591
                if(loop!=1 && (!loop || --loop)){
2592
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2593
                }else if(autoexit){
2594
                    ret=AVERROR_EOF;
2595
                    goto fail;
2596
                }
2597
            }
2598
            continue;
2599
        }
2600
        ret = av_read_frame(ic, pkt);
2601
        if (ret < 0) {
2602
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2603
                eof=1;
2604
            if (url_ferror(ic->pb))
2605
                break;
2606
            SDL_Delay(100); /* wait for user event */
2607
            continue;
2608
        }
2609
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2610
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2611
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2612
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2613
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2614
                <= ((double)duration/1000000);
2615
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2616
            packet_queue_put(&is->audioq, pkt);
2617
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2618
            packet_queue_put(&is->videoq, pkt);
2619
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2620
            packet_queue_put(&is->subtitleq, pkt);
2621
        } else {
2622
            av_free_packet(pkt);
2623
        }
2624
    }
2625
    /* wait until the end */
2626
    while (!is->abort_request) {
2627
        SDL_Delay(100);
2628
    }
2629

    
2630
    ret = 0;
2631
 fail:
2632
    /* disable interrupting */
2633
    global_video_state = NULL;
2634

    
2635
    /* close each stream */
2636
    if (is->audio_stream >= 0)
2637
        stream_component_close(is, is->audio_stream);
2638
    if (is->video_stream >= 0)
2639
        stream_component_close(is, is->video_stream);
2640
    if (is->subtitle_stream >= 0)
2641
        stream_component_close(is, is->subtitle_stream);
2642
    if (is->ic) {
2643
        av_close_input_file(is->ic);
2644
        is->ic = NULL; /* safety */
2645
    }
2646
    url_set_interrupt_cb(NULL);
2647

    
2648
    if (ret != 0) {
2649
        SDL_Event event;
2650

    
2651
        event.type = FF_QUIT_EVENT;
2652
        event.user.data1 = is;
2653
        SDL_PushEvent(&event);
2654
    }
2655
    return 0;
2656
}
2657

    
2658
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2659
{
2660
    VideoState *is;
2661

    
2662
    is = av_mallocz(sizeof(VideoState));
2663
    if (!is)
2664
        return NULL;
2665
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2666
    is->iformat = iformat;
2667
    is->ytop = 0;
2668
    is->xleft = 0;
2669

    
2670
    /* start video display */
2671
    is->pictq_mutex = SDL_CreateMutex();
2672
    is->pictq_cond = SDL_CreateCond();
2673

    
2674
    is->subpq_mutex = SDL_CreateMutex();
2675
    is->subpq_cond = SDL_CreateCond();
2676

    
2677
    is->av_sync_type = av_sync_type;
2678
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2679
    if (!is->parse_tid) {
2680
        av_free(is);
2681
        return NULL;
2682
    }
2683
    return is;
2684
}
2685

    
2686
static void stream_cycle_channel(VideoState *is, int codec_type)
2687
{
2688
    AVFormatContext *ic = is->ic;
2689
    int start_index, stream_index;
2690
    AVStream *st;
2691

    
2692
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2693
        start_index = is->video_stream;
2694
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2695
        start_index = is->audio_stream;
2696
    else
2697
        start_index = is->subtitle_stream;
2698
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2699
        return;
2700
    stream_index = start_index;
2701
    for(;;) {
2702
        if (++stream_index >= is->ic->nb_streams)
2703
        {
2704
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2705
            {
2706
                stream_index = -1;
2707
                goto the_end;
2708
            } else
2709
                stream_index = 0;
2710
        }
2711
        if (stream_index == start_index)
2712
            return;
2713
        st = ic->streams[stream_index];
2714
        if (st->codec->codec_type == codec_type) {
2715
            /* check that parameters are OK */
2716
            switch(codec_type) {
2717
            case AVMEDIA_TYPE_AUDIO:
2718
                if (st->codec->sample_rate != 0 &&
2719
                    st->codec->channels != 0)
2720
                    goto the_end;
2721
                break;
2722
            case AVMEDIA_TYPE_VIDEO:
2723
            case AVMEDIA_TYPE_SUBTITLE:
2724
                goto the_end;
2725
            default:
2726
                break;
2727
            }
2728
        }
2729
    }
2730
 the_end:
2731
    stream_component_close(is, start_index);
2732
    stream_component_open(is, stream_index);
2733
}
2734

    
2735

    
2736
static void toggle_full_screen(void)
2737
{
2738
    is_full_screen = !is_full_screen;
2739
    if (!fs_screen_width) {
2740
        /* use default SDL method */
2741
//        SDL_WM_ToggleFullScreen(screen);
2742
    }
2743
    video_open(cur_stream);
2744
}
2745

    
2746
static void toggle_pause(void)
2747
{
2748
    if (cur_stream)
2749
        stream_pause(cur_stream);
2750
    step = 0;
2751
}
2752

    
2753
static void step_to_next_frame(void)
2754
{
2755
    if (cur_stream) {
2756
        /* if the stream is paused unpause it, then step */
2757
        if (cur_stream->paused)
2758
            stream_pause(cur_stream);
2759
    }
2760
    step = 1;
2761
}
2762

    
2763
static void toggle_audio_display(void)
2764
{
2765
    if (cur_stream) {
2766
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2767
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2768
        fill_rectangle(screen,
2769
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2770
                    bgcolor);
2771
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2772
    }
2773
}
2774

    
2775
/* handle an event sent by the GUI */
2776
static void event_loop(void)
2777
{
2778
    SDL_Event event;
2779
    double incr, pos, frac;
2780

    
2781
    for(;;) {
2782
        double x;
2783
        SDL_WaitEvent(&event);
2784
        switch(event.type) {
2785
        case SDL_KEYDOWN:
2786
            if (exit_on_keydown) {
2787
                do_exit();
2788
                break;
2789
            }
2790
            switch(event.key.keysym.sym) {
2791
            case SDLK_ESCAPE:
2792
            case SDLK_q:
2793
                do_exit();
2794
                break;
2795
            case SDLK_f:
2796
                toggle_full_screen();
2797
                break;
2798
            case SDLK_p:
2799
            case SDLK_SPACE:
2800
                toggle_pause();
2801
                break;
2802
            case SDLK_s: //S: Step to next frame
2803
                step_to_next_frame();
2804
                break;
2805
            case SDLK_a:
2806
                if (cur_stream)
2807
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2808
                break;
2809
            case SDLK_v:
2810
                if (cur_stream)
2811
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2812
                break;
2813
            case SDLK_t:
2814
                if (cur_stream)
2815
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2816
                break;
2817
            case SDLK_w:
2818
                toggle_audio_display();
2819
                break;
2820
            case SDLK_LEFT:
2821
                incr = -10.0;
2822
                goto do_seek;
2823
            case SDLK_RIGHT:
2824
                incr = 10.0;
2825
                goto do_seek;
2826
            case SDLK_UP:
2827
                incr = 60.0;
2828
                goto do_seek;
2829
            case SDLK_DOWN:
2830
                incr = -60.0;
2831
            do_seek:
2832
                if (cur_stream) {
2833
                    if (seek_by_bytes) {
2834
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2835
                            pos= cur_stream->video_current_pos;
2836
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2837
                            pos= cur_stream->audio_pkt.pos;
2838
                        }else
2839
                            pos = url_ftell(cur_stream->ic->pb);
2840
                        if (cur_stream->ic->bit_rate)
2841
                            incr *= cur_stream->ic->bit_rate / 8.0;
2842
                        else
2843
                            incr *= 180000.0;
2844
                        pos += incr;
2845
                        stream_seek(cur_stream, pos, incr, 1);
2846
                    } else {
2847
                        pos = get_master_clock(cur_stream);
2848
                        pos += incr;
2849
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2850
                    }
2851
                }
2852
                break;
2853
            default:
2854
                break;
2855
            }
2856
            break;
2857
        case SDL_MOUSEBUTTONDOWN:
2858
            if (exit_on_mousedown) {
2859
                do_exit();
2860
                break;
2861
            }
2862
        case SDL_MOUSEMOTION:
2863
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2864
                x= event.button.x;
2865
            }else{
2866
                if(event.motion.state != SDL_PRESSED)
2867
                    break;
2868
                x= event.motion.x;
2869
            }
2870
            if (cur_stream) {
2871
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2872
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2873
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2874
                }else{
2875
                    int64_t ts;
2876
                    int ns, hh, mm, ss;
2877
                    int tns, thh, tmm, tss;
2878
                    tns = cur_stream->ic->duration/1000000LL;
2879
                    thh = tns/3600;
2880
                    tmm = (tns%3600)/60;
2881
                    tss = (tns%60);
2882
                    frac = x/cur_stream->width;
2883
                    ns = frac*tns;
2884
                    hh = ns/3600;
2885
                    mm = (ns%3600)/60;
2886
                    ss = (ns%60);
2887
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2888
                            hh, mm, ss, thh, tmm, tss);
2889
                    ts = frac*cur_stream->ic->duration;
2890
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2891
                        ts += cur_stream->ic->start_time;
2892
                    stream_seek(cur_stream, ts, 0, 0);
2893
                }
2894
            }
2895
            break;
2896
        case SDL_VIDEORESIZE:
2897
            if (cur_stream) {
2898
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2899
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2900
                screen_width = cur_stream->width = event.resize.w;
2901
                screen_height= cur_stream->height= event.resize.h;
2902
            }
2903
            break;
2904
        case SDL_QUIT:
2905
        case FF_QUIT_EVENT:
2906
            do_exit();
2907
            break;
2908
        case FF_ALLOC_EVENT:
2909
            video_open(event.user.data1);
2910
            alloc_picture(event.user.data1);
2911
            break;
2912
        case FF_REFRESH_EVENT:
2913
            video_refresh_timer(event.user.data1);
2914
            cur_stream->refresh=0;
2915
            break;
2916
        default:
2917
            break;
2918
        }
2919
    }
2920
}
2921

    
2922
static void opt_frame_size(const char *arg)
2923
{
2924
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2925
        fprintf(stderr, "Incorrect frame size\n");
2926
        exit(1);
2927
    }
2928
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2929
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2930
        exit(1);
2931
    }
2932
}
2933

    
2934
static int opt_width(const char *opt, const char *arg)
2935
{
2936
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2937
    return 0;
2938
}
2939

    
2940
static int opt_height(const char *opt, const char *arg)
2941
{
2942
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2943
    return 0;
2944
}
2945

    
2946
static void opt_format(const char *arg)
2947
{
2948
    file_iformat = av_find_input_format(arg);
2949
    if (!file_iformat) {
2950
        fprintf(stderr, "Unknown input format: %s\n", arg);
2951
        exit(1);
2952
    }
2953
}
2954

    
2955
static void opt_frame_pix_fmt(const char *arg)
2956
{
2957
    frame_pix_fmt = av_get_pix_fmt(arg);
2958
}
2959

    
2960
static int opt_sync(const char *opt, const char *arg)
2961
{
2962
    if (!strcmp(arg, "audio"))
2963
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2964
    else if (!strcmp(arg, "video"))
2965
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2966
    else if (!strcmp(arg, "ext"))
2967
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2968
    else {
2969
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2970
        exit(1);
2971
    }
2972
    return 0;
2973
}
2974

    
2975
static int opt_seek(const char *opt, const char *arg)
2976
{
2977
    start_time = parse_time_or_die(opt, arg, 1);
2978
    return 0;
2979
}
2980

    
2981
static int opt_duration(const char *opt, const char *arg)
2982
{
2983
    duration = parse_time_or_die(opt, arg, 1);
2984
    return 0;
2985
}
2986

    
2987
static int opt_debug(const char *opt, const char *arg)
2988
{
2989
    av_log_set_level(99);
2990
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2991
    return 0;
2992
}
2993

    
2994
static int opt_vismv(const char *opt, const char *arg)
2995
{
2996
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2997
    return 0;
2998
}
2999

    
3000
static int opt_thread_count(const char *opt, const char *arg)
3001
{
3002
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3003
#if !HAVE_THREADS
3004
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3005
#endif
3006
    return 0;
3007
}
3008

    
3009
static const OptionDef options[] = {
3010
#include "cmdutils_common_opts.h"
3011
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3012
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3013
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3014
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3015
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3016
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3017
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3018
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3019
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3020
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3021
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3022
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3023
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3024
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3025
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3026
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3027
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3028
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3029
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3030
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3031
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3032
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3033
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3034
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3035
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3036
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3037
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3038
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3039
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3040
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3041
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3042
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3043
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3044
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3045
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3046
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3047
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3048
#if CONFIG_AVFILTER
3049
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3050
#endif
3051
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3052
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3053
    { NULL, },
3054
};
3055

    
3056
static void show_usage(void)
3057
{
3058
    printf("Simple media player\n");
3059
    printf("usage: ffplay [options] input_file\n");
3060
    printf("\n");
3061
}
3062

    
3063
static void show_help(void)
3064
{
3065
    av_log_set_callback(log_callback_help);
3066
    show_usage();
3067
    show_help_options(options, "Main options:\n",
3068
                      OPT_EXPERT, 0);
3069
    show_help_options(options, "\nAdvanced options:\n",
3070
                      OPT_EXPERT, OPT_EXPERT);
3071
    printf("\n");
3072
    av_opt_show2(avcodec_opts[0], NULL,
3073
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3074
    printf("\n");
3075
    av_opt_show2(avformat_opts, NULL,
3076
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3077
#if !CONFIG_AVFILTER
3078
    printf("\n");
3079
    av_opt_show2(sws_opts, NULL,
3080
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3081
#endif
3082
    printf("\nWhile playing:\n"
3083
           "q, ESC              quit\n"
3084
           "f                   toggle full screen\n"
3085
           "p, SPC              pause\n"
3086
           "a                   cycle audio channel\n"
3087
           "v                   cycle video channel\n"
3088
           "t                   cycle subtitle channel\n"
3089
           "w                   show audio waves\n"
3090
           "s                   activate frame-step mode\n"
3091
           "left/right          seek backward/forward 10 seconds\n"
3092
           "down/up             seek backward/forward 1 minute\n"
3093
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3094
           );
3095
}
3096

    
3097
static void opt_input_file(const char *filename)
3098
{
3099
    if (input_filename) {
3100
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3101
                filename, input_filename);
3102
        exit(1);
3103
    }
3104
    if (!strcmp(filename, "-"))
3105
        filename = "pipe:";
3106
    input_filename = filename;
3107
}
3108

    
3109
/* Called from the main */
3110
int main(int argc, char **argv)
3111
{
3112
    int flags;
3113

    
3114
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3115

    
3116
    /* register all codecs, demux and protocols */
3117
    avcodec_register_all();
3118
#if CONFIG_AVDEVICE
3119
    avdevice_register_all();
3120
#endif
3121
#if CONFIG_AVFILTER
3122
    avfilter_register_all();
3123
#endif
3124
    av_register_all();
3125

    
3126
    init_opts();
3127

    
3128
    show_banner();
3129

    
3130
    parse_options(argc, argv, options, opt_input_file);
3131

    
3132
    if (!input_filename) {
3133
        show_usage();
3134
        fprintf(stderr, "An input file must be specified\n");
3135
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3136
        exit(1);
3137
    }
3138

    
3139
    if (display_disable) {
3140
        video_disable = 1;
3141
    }
3142
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3143
#if !defined(__MINGW32__) && !defined(__APPLE__)
3144
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3145
#endif
3146
    if (SDL_Init (flags)) {
3147
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3148
        exit(1);
3149
    }
3150

    
3151
    if (!display_disable) {
3152
#if HAVE_SDL_VIDEO_SIZE
3153
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3154
        fs_screen_width = vi->current_w;
3155
        fs_screen_height = vi->current_h;
3156
#endif
3157
    }
3158

    
3159
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3160
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3161
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3162

    
3163
    av_init_packet(&flush_pkt);
3164
    flush_pkt.data= "FLUSH";
3165

    
3166
    cur_stream = stream_open(input_filename, file_iformat);
3167

    
3168
    event_loop();
3169

    
3170
    /* never returns */
3171

    
3172
    return 0;
3173
}