Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 91b27e49

History | View | Annotate | Download (98.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavutil/imgutils.h"
32
#include "libavutil/parseutils.h"
33
#include "libavutil/samplefmt.h"
34
#include "libavutil/avassert.h"
35
#include "libavformat/avformat.h"
36
#include "libavdevice/avdevice.h"
37
#include "libswscale/swscale.h"
38
#include "libavcodec/audioconvert.h"
39
#include "libavcodec/opt.h"
40
#include "libavcodec/avfft.h"
41

    
42
#if CONFIG_AVFILTER
43
# include "libavfilter/avfilter.h"
44
# include "libavfilter/avfiltergraph.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG
63
//#define DEBUG_SYNC
64

    
65
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67
#define MIN_FRAMES 5
68

    
69
/* SDL audio buffer size, in samples. Should be small to have precise
70
   A/V sync as SDL does not have hardware buffer fullness info. */
71
#define SDL_AUDIO_BUFFER_SIZE 1024
72

    
73
/* no AV sync correction is done if below the AV sync threshold */
74
#define AV_SYNC_THRESHOLD 0.01
75
/* no AV correction is done if too big error */
76
#define AV_NOSYNC_THRESHOLD 10.0
77

    
78
#define FRAME_SKIP_FACTOR 0.05
79

    
80
/* maximum audio speed change to get correct sync */
81
#define SAMPLE_CORRECTION_PERCENT_MAX 10
82

    
83
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84
#define AUDIO_DIFF_AVG_NB   20
85

    
86
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87
#define SAMPLE_ARRAY_SIZE (2*65536)
88

    
89
static int sws_flags = SWS_BICUBIC;
90

    
91
typedef struct PacketQueue {
92
    AVPacketList *first_pkt, *last_pkt;
93
    int nb_packets;
94
    int size;
95
    int abort_request;
96
    SDL_mutex *mutex;
97
    SDL_cond *cond;
98
} PacketQueue;
99

    
100
#define VIDEO_PICTURE_QUEUE_SIZE 2
101
#define SUBPICTURE_QUEUE_SIZE 4
102

    
103
typedef struct VideoPicture {
104
    double pts;                                  ///<presentation time stamp for this picture
105
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106
    int64_t pos;                                 ///<byte position in file
107
    SDL_Overlay *bmp;
108
    int width, height; /* source height & width */
109
    int allocated;
110
    enum PixelFormat pix_fmt;
111

    
112
#if CONFIG_AVFILTER
113
    AVFilterBufferRef *picref;
114
#endif
115
} VideoPicture;
116

    
117
typedef struct SubPicture {
118
    double pts; /* presentation time stamp for this picture */
119
    AVSubtitle sub;
120
} SubPicture;
121

    
122
enum {
123
    AV_SYNC_AUDIO_MASTER, /* default choice */
124
    AV_SYNC_VIDEO_MASTER,
125
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126
};
127

    
128
typedef struct VideoState {
129
    SDL_Thread *parse_tid;
130
    SDL_Thread *video_tid;
131
    SDL_Thread *refresh_tid;
132
    AVInputFormat *iformat;
133
    int no_background;
134
    int abort_request;
135
    int paused;
136
    int last_paused;
137
    int seek_req;
138
    int seek_flags;
139
    int64_t seek_pos;
140
    int64_t seek_rel;
141
    int read_pause_return;
142
    AVFormatContext *ic;
143
    int dtg_active_format;
144

    
145
    int audio_stream;
146

    
147
    int av_sync_type;
148
    double external_clock; /* external clock base */
149
    int64_t external_clock_time;
150

    
151
    double audio_clock;
152
    double audio_diff_cum; /* used for AV difference average computation */
153
    double audio_diff_avg_coef;
154
    double audio_diff_threshold;
155
    int audio_diff_avg_count;
156
    AVStream *audio_st;
157
    PacketQueue audioq;
158
    int audio_hw_buf_size;
159
    /* samples output by the codec. we reserve more space for avsync
160
       compensation */
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163
    uint8_t *audio_buf;
164
    unsigned int audio_buf_size; /* in bytes */
165
    int audio_buf_index; /* in bytes */
166
    AVPacket audio_pkt_temp;
167
    AVPacket audio_pkt;
168
    enum AVSampleFormat audio_src_fmt;
169
    AVAudioConvert *reformat_ctx;
170

    
171
    int show_audio; /* if true, display audio samples */
172
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
173
    int sample_array_index;
174
    int last_i_start;
175
    RDFTContext *rdft;
176
    int rdft_bits;
177
    FFTSample *rdft_data;
178
    int xpos;
179

    
180
    SDL_Thread *subtitle_tid;
181
    int subtitle_stream;
182
    int subtitle_stream_changed;
183
    AVStream *subtitle_st;
184
    PacketQueue subtitleq;
185
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186
    int subpq_size, subpq_rindex, subpq_windex;
187
    SDL_mutex *subpq_mutex;
188
    SDL_cond *subpq_cond;
189

    
190
    double frame_timer;
191
    double frame_last_pts;
192
    double frame_last_delay;
193
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194
    int video_stream;
195
    AVStream *video_st;
196
    PacketQueue videoq;
197
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199
    int64_t video_current_pos;                   ///<current displayed file pos
200
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201
    int pictq_size, pictq_rindex, pictq_windex;
202
    SDL_mutex *pictq_mutex;
203
    SDL_cond *pictq_cond;
204
#if !CONFIG_AVFILTER
205
    struct SwsContext *img_convert_ctx;
206
#endif
207

    
208
    //    QETimer *video_timer;
209
    char filename[1024];
210
    int width, height, xleft, ytop;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->video->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714

    
715
        if (is->subtitle_st)
716
        {
717
            if (is->subpq_size > 0)
718
            {
719
                sp = &is->subpq[is->subpq_rindex];
720

    
721
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
722
                {
723
                    SDL_LockYUVOverlay (vp->bmp);
724

    
725
                    pict.data[0] = vp->bmp->pixels[0];
726
                    pict.data[1] = vp->bmp->pixels[2];
727
                    pict.data[2] = vp->bmp->pixels[1];
728

    
729
                    pict.linesize[0] = vp->bmp->pitches[0];
730
                    pict.linesize[1] = vp->bmp->pitches[2];
731
                    pict.linesize[2] = vp->bmp->pitches[1];
732

    
733
                    for (i = 0; i < sp->sub.num_rects; i++)
734
                        blend_subrect(&pict, sp->sub.rects[i],
735
                                      vp->bmp->w, vp->bmp->h);
736

    
737
                    SDL_UnlockYUVOverlay (vp->bmp);
738
                }
739
            }
740
        }
741

    
742

    
743
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
744
        height = is->height;
745
        width = ((int)rint(height * aspect_ratio)) & ~1;
746
        if (width > is->width) {
747
            width = is->width;
748
            height = ((int)rint(width / aspect_ratio)) & ~1;
749
        }
750
        x = (is->width - width) / 2;
751
        y = (is->height - height) / 2;
752
        if (!is->no_background) {
753
            /* fill the background */
754
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
755
        } else {
756
            is->no_background = 0;
757
        }
758
        rect.x = is->xleft + x;
759
        rect.y = is->ytop  + y;
760
        rect.w = width;
761
        rect.h = height;
762
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
763
    } else {
764
#if 0
765
        fill_rectangle(screen,
766
                       is->xleft, is->ytop, is->width, is->height,
767
                       QERGB(0x00, 0x00, 0x00));
768
#endif
769
    }
770
}
771

    
772
static inline int compute_mod(int a, int b)
773
{
774
    return a < 0 ? a%b + b : a%b;
775
}
776

    
777
static void video_audio_display(VideoState *s)
778
{
779
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
780
    int ch, channels, h, h2, bgcolor, fgcolor;
781
    int16_t time_diff;
782
    int rdft_bits, nb_freq;
783

    
784
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
785
        ;
786
    nb_freq= 1<<(rdft_bits-1);
787

    
788
    /* compute display index : center on currently output samples */
789
    channels = s->audio_st->codec->channels;
790
    nb_display_channels = channels;
791
    if (!s->paused) {
792
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
793
        n = 2 * channels;
794
        delay = audio_write_get_buf_size(s);
795
        delay /= n;
796

    
797
        /* to be more precise, we take into account the time spent since
798
           the last buffer computation */
799
        if (audio_callback_time) {
800
            time_diff = av_gettime() - audio_callback_time;
801
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
802
        }
803

    
804
        delay += 2*data_used;
805
        if (delay < data_used)
806
            delay = data_used;
807

    
808
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
809
        if(s->show_audio==1){
810
            h= INT_MIN;
811
            for(i=0; i<1000; i+=channels){
812
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
813
                int a= s->sample_array[idx];
814
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
815
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
816
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
817
                int score= a-d;
818
                if(h<score && (b^c)<0){
819
                    h= score;
820
                    i_start= idx;
821
                }
822
            }
823
        }
824

    
825
        s->last_i_start = i_start;
826
    } else {
827
        i_start = s->last_i_start;
828
    }
829

    
830
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
831
    if(s->show_audio==1){
832
        fill_rectangle(screen,
833
                       s->xleft, s->ytop, s->width, s->height,
834
                       bgcolor);
835

    
836
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
837

    
838
        /* total height for one channel */
839
        h = s->height / nb_display_channels;
840
        /* graph height / 2 */
841
        h2 = (h * 9) / 20;
842
        for(ch = 0;ch < nb_display_channels; ch++) {
843
            i = i_start + ch;
844
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
845
            for(x = 0; x < s->width; x++) {
846
                y = (s->sample_array[i] * h2) >> 15;
847
                if (y < 0) {
848
                    y = -y;
849
                    ys = y1 - y;
850
                } else {
851
                    ys = y1;
852
                }
853
                fill_rectangle(screen,
854
                               s->xleft + x, ys, 1, y,
855
                               fgcolor);
856
                i += channels;
857
                if (i >= SAMPLE_ARRAY_SIZE)
858
                    i -= SAMPLE_ARRAY_SIZE;
859
            }
860
        }
861

    
862
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
863

    
864
        for(ch = 1;ch < nb_display_channels; ch++) {
865
            y = s->ytop + ch * h;
866
            fill_rectangle(screen,
867
                           s->xleft, y, s->width, 1,
868
                           fgcolor);
869
        }
870
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
871
    }else{
872
        nb_display_channels= FFMIN(nb_display_channels, 2);
873
        if(rdft_bits != s->rdft_bits){
874
            av_rdft_end(s->rdft);
875
            av_free(s->rdft_data);
876
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
877
            s->rdft_bits= rdft_bits;
878
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
879
        }
880
        {
881
            FFTSample *data[2];
882
            for(ch = 0;ch < nb_display_channels; ch++) {
883
                data[ch] = s->rdft_data + 2*nb_freq*ch;
884
                i = i_start + ch;
885
                for(x = 0; x < 2*nb_freq; x++) {
886
                    double w= (x-nb_freq)*(1.0/nb_freq);
887
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
888
                    i += channels;
889
                    if (i >= SAMPLE_ARRAY_SIZE)
890
                        i -= SAMPLE_ARRAY_SIZE;
891
                }
892
                av_rdft_calc(s->rdft, data[ch]);
893
            }
894
            //least efficient way to do this, we should of course directly access it but its more than fast enough
895
            for(y=0; y<s->height; y++){
896
                double w= 1/sqrt(nb_freq);
897
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
898
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
899
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
900
                a= FFMIN(a,255);
901
                b= FFMIN(b,255);
902
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
903

    
904
                fill_rectangle(screen,
905
                            s->xpos, s->height-y, 1, 1,
906
                            fgcolor);
907
            }
908
        }
909
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
910
        s->xpos++;
911
        if(s->xpos >= s->width)
912
            s->xpos= s->xleft;
913
    }
914
}
915

    
916
static int video_open(VideoState *is){
917
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
918
    int w,h;
919

    
920
    if(is_full_screen) flags |= SDL_FULLSCREEN;
921
    else               flags |= SDL_RESIZABLE;
922

    
923
    if (is_full_screen && fs_screen_width) {
924
        w = fs_screen_width;
925
        h = fs_screen_height;
926
    } else if(!is_full_screen && screen_width){
927
        w = screen_width;
928
        h = screen_height;
929
#if CONFIG_AVFILTER
930
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
931
        w = is->out_video_filter->inputs[0]->w;
932
        h = is->out_video_filter->inputs[0]->h;
933
#else
934
    }else if (is->video_st && is->video_st->codec->width){
935
        w = is->video_st->codec->width;
936
        h = is->video_st->codec->height;
937
#endif
938
    } else {
939
        w = 640;
940
        h = 480;
941
    }
942
    if(screen && is->width == screen->w && screen->w == w
943
       && is->height== screen->h && screen->h == h)
944
        return 0;
945

    
946
#ifndef __APPLE__
947
    screen = SDL_SetVideoMode(w, h, 0, flags);
948
#else
949
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
950
    screen = SDL_SetVideoMode(w, h, 24, flags);
951
#endif
952
    if (!screen) {
953
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
954
        return -1;
955
    }
956
    if (!window_title)
957
        window_title = input_filename;
958
    SDL_WM_SetCaption(window_title, window_title);
959

    
960
    is->width = screen->w;
961
    is->height = screen->h;
962

    
963
    return 0;
964
}
965

    
966
/* display the current picture, if any */
967
static void video_display(VideoState *is)
968
{
969
    if(!screen)
970
        video_open(cur_stream);
971
    if (is->audio_st && is->show_audio)
972
        video_audio_display(is);
973
    else if (is->video_st)
974
        video_image_display(is);
975
}
976

    
977
static int refresh_thread(void *opaque)
978
{
979
    VideoState *is= opaque;
980
    while(!is->abort_request){
981
        SDL_Event event;
982
        event.type = FF_REFRESH_EVENT;
983
        event.user.data1 = opaque;
984
        if(!is->refresh){
985
            is->refresh=1;
986
            SDL_PushEvent(&event);
987
        }
988
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
989
    }
990
    return 0;
991
}
992

    
993
/* get the current audio clock value */
994
static double get_audio_clock(VideoState *is)
995
{
996
    double pts;
997
    int hw_buf_size, bytes_per_sec;
998
    pts = is->audio_clock;
999
    hw_buf_size = audio_write_get_buf_size(is);
1000
    bytes_per_sec = 0;
1001
    if (is->audio_st) {
1002
        bytes_per_sec = is->audio_st->codec->sample_rate *
1003
            2 * is->audio_st->codec->channels;
1004
    }
1005
    if (bytes_per_sec)
1006
        pts -= (double)hw_buf_size / bytes_per_sec;
1007
    return pts;
1008
}
1009

    
1010
/* get the current video clock value */
1011
static double get_video_clock(VideoState *is)
1012
{
1013
    if (is->paused) {
1014
        return is->video_current_pts;
1015
    } else {
1016
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1017
    }
1018
}
1019

    
1020
/* get the current external clock value */
1021
static double get_external_clock(VideoState *is)
1022
{
1023
    int64_t ti;
1024
    ti = av_gettime();
1025
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1026
}
1027

    
1028
/* get the current master clock value */
1029
static double get_master_clock(VideoState *is)
1030
{
1031
    double val;
1032

    
1033
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1034
        if (is->video_st)
1035
            val = get_video_clock(is);
1036
        else
1037
            val = get_audio_clock(is);
1038
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1039
        if (is->audio_st)
1040
            val = get_audio_clock(is);
1041
        else
1042
            val = get_video_clock(is);
1043
    } else {
1044
        val = get_external_clock(is);
1045
    }
1046
    return val;
1047
}
1048

    
1049
/* seek in the stream */
1050
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1051
{
1052
    if (!is->seek_req) {
1053
        is->seek_pos = pos;
1054
        is->seek_rel = rel;
1055
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1056
        if (seek_by_bytes)
1057
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1058
        is->seek_req = 1;
1059
    }
1060
}
1061

    
1062
/* pause or resume the video */
1063
static void stream_pause(VideoState *is)
1064
{
1065
    if (is->paused) {
1066
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1067
        if(is->read_pause_return != AVERROR(ENOSYS)){
1068
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1069
        }
1070
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1071
    }
1072
    is->paused = !is->paused;
1073
}
1074

    
1075
static double compute_target_time(double frame_current_pts, VideoState *is)
1076
{
1077
    double delay, sync_threshold, diff;
1078

    
1079
    /* compute nominal delay */
1080
    delay = frame_current_pts - is->frame_last_pts;
1081
    if (delay <= 0 || delay >= 10.0) {
1082
        /* if incorrect delay, use previous one */
1083
        delay = is->frame_last_delay;
1084
    } else {
1085
        is->frame_last_delay = delay;
1086
    }
1087
    is->frame_last_pts = frame_current_pts;
1088

    
1089
    /* update delay to follow master synchronisation source */
1090
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1091
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1092
        /* if video is slave, we try to correct big delays by
1093
           duplicating or deleting a frame */
1094
        diff = get_video_clock(is) - get_master_clock(is);
1095

    
1096
        /* skip or repeat frame. We take into account the
1097
           delay to compute the threshold. I still don't know
1098
           if it is the best guess */
1099
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1100
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1101
            if (diff <= -sync_threshold)
1102
                delay = 0;
1103
            else if (diff >= sync_threshold)
1104
                delay = 2 * delay;
1105
        }
1106
    }
1107
    is->frame_timer += delay;
1108
#if defined(DEBUG_SYNC)
1109
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1110
            delay, actual_delay, frame_current_pts, -diff);
1111
#endif
1112

    
1113
    return is->frame_timer;
1114
}
1115

    
1116
/* called to display each frame */
1117
static void video_refresh_timer(void *opaque)
1118
{
1119
    VideoState *is = opaque;
1120
    VideoPicture *vp;
1121

    
1122
    SubPicture *sp, *sp2;
1123

    
1124
    if (is->video_st) {
1125
retry:
1126
        if (is->pictq_size == 0) {
1127
            //nothing to do, no picture to display in the que
1128
        } else {
1129
            double time= av_gettime()/1000000.0;
1130
            double next_target;
1131
            /* dequeue the picture */
1132
            vp = &is->pictq[is->pictq_rindex];
1133

    
1134
            if(time < vp->target_clock)
1135
                return;
1136
            /* update current video pts */
1137
            is->video_current_pts = vp->pts;
1138
            is->video_current_pts_drift = is->video_current_pts - time;
1139
            is->video_current_pos = vp->pos;
1140
            if(is->pictq_size > 1){
1141
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1142
                assert(nextvp->target_clock >= vp->target_clock);
1143
                next_target= nextvp->target_clock;
1144
            }else{
1145
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1146
            }
1147
            if(framedrop && time > next_target){
1148
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1149
                if(is->pictq_size > 1 || time > next_target + 0.5){
1150
                    /* update queue size and signal for next picture */
1151
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1152
                        is->pictq_rindex = 0;
1153

    
1154
                    SDL_LockMutex(is->pictq_mutex);
1155
                    is->pictq_size--;
1156
                    SDL_CondSignal(is->pictq_cond);
1157
                    SDL_UnlockMutex(is->pictq_mutex);
1158
                    goto retry;
1159
                }
1160
            }
1161

    
1162
            if(is->subtitle_st) {
1163
                if (is->subtitle_stream_changed) {
1164
                    SDL_LockMutex(is->subpq_mutex);
1165

    
1166
                    while (is->subpq_size) {
1167
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1168

    
1169
                        /* update queue size and signal for next picture */
1170
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1171
                            is->subpq_rindex = 0;
1172

    
1173
                        is->subpq_size--;
1174
                    }
1175
                    is->subtitle_stream_changed = 0;
1176

    
1177
                    SDL_CondSignal(is->subpq_cond);
1178
                    SDL_UnlockMutex(is->subpq_mutex);
1179
                } else {
1180
                    if (is->subpq_size > 0) {
1181
                        sp = &is->subpq[is->subpq_rindex];
1182

    
1183
                        if (is->subpq_size > 1)
1184
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1185
                        else
1186
                            sp2 = NULL;
1187

    
1188
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1189
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1190
                        {
1191
                            free_subpicture(sp);
1192

    
1193
                            /* update queue size and signal for next picture */
1194
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1195
                                is->subpq_rindex = 0;
1196

    
1197
                            SDL_LockMutex(is->subpq_mutex);
1198
                            is->subpq_size--;
1199
                            SDL_CondSignal(is->subpq_cond);
1200
                            SDL_UnlockMutex(is->subpq_mutex);
1201
                        }
1202
                    }
1203
                }
1204
            }
1205

    
1206
            /* display picture */
1207
            if (!display_disable)
1208
                video_display(is);
1209

    
1210
            /* update queue size and signal for next picture */
1211
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1212
                is->pictq_rindex = 0;
1213

    
1214
            SDL_LockMutex(is->pictq_mutex);
1215
            is->pictq_size--;
1216
            SDL_CondSignal(is->pictq_cond);
1217
            SDL_UnlockMutex(is->pictq_mutex);
1218
        }
1219
    } else if (is->audio_st) {
1220
        /* draw the next audio frame */
1221

    
1222
        /* if only audio stream, then display the audio bars (better
1223
           than nothing, just to test the implementation */
1224

    
1225
        /* display picture */
1226
        if (!display_disable)
1227
            video_display(is);
1228
    }
1229
    if (show_status) {
1230
        static int64_t last_time;
1231
        int64_t cur_time;
1232
        int aqsize, vqsize, sqsize;
1233
        double av_diff;
1234

    
1235
        cur_time = av_gettime();
1236
        if (!last_time || (cur_time - last_time) >= 30000) {
1237
            aqsize = 0;
1238
            vqsize = 0;
1239
            sqsize = 0;
1240
            if (is->audio_st)
1241
                aqsize = is->audioq.size;
1242
            if (is->video_st)
1243
                vqsize = is->videoq.size;
1244
            if (is->subtitle_st)
1245
                sqsize = is->subtitleq.size;
1246
            av_diff = 0;
1247
            if (is->audio_st && is->video_st)
1248
                av_diff = get_audio_clock(is) - get_video_clock(is);
1249
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1250
                   get_master_clock(is),
1251
                   av_diff,
1252
                   FFMAX(is->skip_frames-1, 0),
1253
                   aqsize / 1024,
1254
                   vqsize / 1024,
1255
                   sqsize,
1256
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1257
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1258
            fflush(stdout);
1259
            last_time = cur_time;
1260
        }
1261
    }
1262
}
1263

    
1264
static void stream_close(VideoState *is)
1265
{
1266
    VideoPicture *vp;
1267
    int i;
1268
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1269
    is->abort_request = 1;
1270
    SDL_WaitThread(is->parse_tid, NULL);
1271
    SDL_WaitThread(is->refresh_tid, NULL);
1272

    
1273
    /* free all pictures */
1274
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1275
        vp = &is->pictq[i];
1276
#if CONFIG_AVFILTER
1277
        if (vp->picref) {
1278
            avfilter_unref_buffer(vp->picref);
1279
            vp->picref = NULL;
1280
        }
1281
#endif
1282
        if (vp->bmp) {
1283
            SDL_FreeYUVOverlay(vp->bmp);
1284
            vp->bmp = NULL;
1285
        }
1286
    }
1287
    SDL_DestroyMutex(is->pictq_mutex);
1288
    SDL_DestroyCond(is->pictq_cond);
1289
    SDL_DestroyMutex(is->subpq_mutex);
1290
    SDL_DestroyCond(is->subpq_cond);
1291
#if !CONFIG_AVFILTER
1292
    if (is->img_convert_ctx)
1293
        sws_freeContext(is->img_convert_ctx);
1294
#endif
1295
    av_free(is);
1296
}
1297

    
1298
static void do_exit(void)
1299
{
1300
    if (cur_stream) {
1301
        stream_close(cur_stream);
1302
        cur_stream = NULL;
1303
    }
1304
    uninit_opts();
1305
#if CONFIG_AVFILTER
1306
    avfilter_uninit();
1307
#endif
1308
    if (show_status)
1309
        printf("\n");
1310
    SDL_Quit();
1311
    av_log(NULL, AV_LOG_QUIET, "");
1312
    exit(0);
1313
}
1314

    
1315
/* allocate a picture (needs to do that in main thread to avoid
1316
   potential locking problems */
1317
static void alloc_picture(void *opaque)
1318
{
1319
    VideoState *is = opaque;
1320
    VideoPicture *vp;
1321

    
1322
    vp = &is->pictq[is->pictq_windex];
1323

    
1324
    if (vp->bmp)
1325
        SDL_FreeYUVOverlay(vp->bmp);
1326

    
1327
#if CONFIG_AVFILTER
1328
    if (vp->picref)
1329
        avfilter_unref_buffer(vp->picref);
1330
    vp->picref = NULL;
1331

    
1332
    vp->width   = is->out_video_filter->inputs[0]->w;
1333
    vp->height  = is->out_video_filter->inputs[0]->h;
1334
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1335
#else
1336
    vp->width   = is->video_st->codec->width;
1337
    vp->height  = is->video_st->codec->height;
1338
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1339
#endif
1340

    
1341
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1342
                                   SDL_YV12_OVERLAY,
1343
                                   screen);
1344
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1345
        /* SDL allocates a buffer smaller than requested if the video
1346
         * overlay hardware is unable to support the requested size. */
1347
        fprintf(stderr, "Error: the video system does not support an image\n"
1348
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1349
                        "to reduce the image size.\n", vp->width, vp->height );
1350
        do_exit();
1351
    }
1352

    
1353
    SDL_LockMutex(is->pictq_mutex);
1354
    vp->allocated = 1;
1355
    SDL_CondSignal(is->pictq_cond);
1356
    SDL_UnlockMutex(is->pictq_mutex);
1357
}
1358

    
1359
/**
1360
 *
1361
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1362
 */
1363
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1364
{
1365
    VideoPicture *vp;
1366
    int dst_pix_fmt;
1367
#if CONFIG_AVFILTER
1368
    AVPicture pict_src;
1369
#endif
1370
    /* wait until we have space to put a new picture */
1371
    SDL_LockMutex(is->pictq_mutex);
1372

    
1373
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1374
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1375

    
1376
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1377
           !is->videoq.abort_request) {
1378
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1379
    }
1380
    SDL_UnlockMutex(is->pictq_mutex);
1381

    
1382
    if (is->videoq.abort_request)
1383
        return -1;
1384

    
1385
    vp = &is->pictq[is->pictq_windex];
1386

    
1387
    /* alloc or resize hardware picture buffer */
1388
    if (!vp->bmp ||
1389
#if CONFIG_AVFILTER
1390
        vp->width  != is->out_video_filter->inputs[0]->w ||
1391
        vp->height != is->out_video_filter->inputs[0]->h) {
1392
#else
1393
        vp->width != is->video_st->codec->width ||
1394
        vp->height != is->video_st->codec->height) {
1395
#endif
1396
        SDL_Event event;
1397

    
1398
        vp->allocated = 0;
1399

    
1400
        /* the allocation must be done in the main thread to avoid
1401
           locking problems */
1402
        event.type = FF_ALLOC_EVENT;
1403
        event.user.data1 = is;
1404
        SDL_PushEvent(&event);
1405

    
1406
        /* wait until the picture is allocated */
1407
        SDL_LockMutex(is->pictq_mutex);
1408
        while (!vp->allocated && !is->videoq.abort_request) {
1409
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1410
        }
1411
        SDL_UnlockMutex(is->pictq_mutex);
1412

    
1413
        if (is->videoq.abort_request)
1414
            return -1;
1415
    }
1416

    
1417
    /* if the frame is not skipped, then display it */
1418
    if (vp->bmp) {
1419
        AVPicture pict;
1420
#if CONFIG_AVFILTER
1421
        if(vp->picref)
1422
            avfilter_unref_buffer(vp->picref);
1423
        vp->picref = src_frame->opaque;
1424
#endif
1425

    
1426
        /* get a pointer on the bitmap */
1427
        SDL_LockYUVOverlay (vp->bmp);
1428

    
1429
        dst_pix_fmt = PIX_FMT_YUV420P;
1430
        memset(&pict,0,sizeof(AVPicture));
1431
        pict.data[0] = vp->bmp->pixels[0];
1432
        pict.data[1] = vp->bmp->pixels[2];
1433
        pict.data[2] = vp->bmp->pixels[1];
1434

    
1435
        pict.linesize[0] = vp->bmp->pitches[0];
1436
        pict.linesize[1] = vp->bmp->pitches[2];
1437
        pict.linesize[2] = vp->bmp->pitches[1];
1438

    
1439
#if CONFIG_AVFILTER
1440
        pict_src.data[0] = src_frame->data[0];
1441
        pict_src.data[1] = src_frame->data[1];
1442
        pict_src.data[2] = src_frame->data[2];
1443

    
1444
        pict_src.linesize[0] = src_frame->linesize[0];
1445
        pict_src.linesize[1] = src_frame->linesize[1];
1446
        pict_src.linesize[2] = src_frame->linesize[2];
1447

    
1448
        //FIXME use direct rendering
1449
        av_picture_copy(&pict, &pict_src,
1450
                        vp->pix_fmt, vp->width, vp->height);
1451
#else
1452
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1453
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1454
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1455
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1456
        if (is->img_convert_ctx == NULL) {
1457
            fprintf(stderr, "Cannot initialize the conversion context\n");
1458
            exit(1);
1459
        }
1460
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1461
                  0, vp->height, pict.data, pict.linesize);
1462
#endif
1463
        /* update the bitmap content */
1464
        SDL_UnlockYUVOverlay(vp->bmp);
1465

    
1466
        vp->pts = pts;
1467
        vp->pos = pos;
1468

    
1469
        /* now we can update the picture count */
1470
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1471
            is->pictq_windex = 0;
1472
        SDL_LockMutex(is->pictq_mutex);
1473
        vp->target_clock= compute_target_time(vp->pts, is);
1474

    
1475
        is->pictq_size++;
1476
        SDL_UnlockMutex(is->pictq_mutex);
1477
    }
1478
    return 0;
1479
}
1480

    
1481
/**
1482
 * compute the exact PTS for the picture if it is omitted in the stream
1483
 * @param pts1 the dts of the pkt / pts of the frame
1484
 */
1485
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1486
{
1487
    double frame_delay, pts;
1488

    
1489
    pts = pts1;
1490

    
1491
    if (pts != 0) {
1492
        /* update video clock with pts, if present */
1493
        is->video_clock = pts;
1494
    } else {
1495
        pts = is->video_clock;
1496
    }
1497
    /* update video clock for next frame */
1498
    frame_delay = av_q2d(is->video_st->codec->time_base);
1499
    /* for MPEG2, the frame can be repeated, so we update the
1500
       clock accordingly */
1501
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1502
    is->video_clock += frame_delay;
1503

    
1504
#if defined(DEBUG_SYNC) && 0
1505
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1506
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1507
#endif
1508
    return queue_picture(is, src_frame, pts, pos);
1509
}
1510

    
1511
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1512
{
1513
    int len1, got_picture, i;
1514

    
1515
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1516
        return -1;
1517

    
1518
    if (pkt->data == flush_pkt.data) {
1519
        avcodec_flush_buffers(is->video_st->codec);
1520

    
1521
        SDL_LockMutex(is->pictq_mutex);
1522
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1523
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1524
            is->pictq[i].target_clock= 0;
1525
        }
1526
        while (is->pictq_size && !is->videoq.abort_request) {
1527
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1528
        }
1529
        is->video_current_pos = -1;
1530
        SDL_UnlockMutex(is->pictq_mutex);
1531

    
1532
        is->frame_last_pts = AV_NOPTS_VALUE;
1533
        is->frame_last_delay = 0;
1534
        is->frame_timer = (double)av_gettime() / 1000000.0;
1535
        is->skip_frames = 1;
1536
        is->skip_frames_index = 0;
1537
        return 0;
1538
    }
1539

    
1540
    len1 = avcodec_decode_video2(is->video_st->codec,
1541
                                 frame, &got_picture,
1542
                                 pkt);
1543

    
1544
    if (got_picture) {
1545
        if (decoder_reorder_pts == -1) {
1546
            *pts = frame->best_effort_timestamp;
1547
        } else if (decoder_reorder_pts) {
1548
            *pts = frame->pkt_pts;
1549
        } else {
1550
            *pts = frame->pkt_dts;
1551
        }
1552

    
1553
        if (*pts == AV_NOPTS_VALUE) {
1554
            *pts = 0;
1555
        }
1556

    
1557
        is->skip_frames_index += 1;
1558
        if(is->skip_frames_index >= is->skip_frames){
1559
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1560
            return 1;
1561
        }
1562

    
1563
    }
1564
    return 0;
1565
}
1566

    
1567
#if CONFIG_AVFILTER
1568
typedef struct {
1569
    VideoState *is;
1570
    AVFrame *frame;
1571
    int use_dr1;
1572
} FilterPriv;
1573

    
1574
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1575
{
1576
    AVFilterContext *ctx = codec->opaque;
1577
    AVFilterBufferRef  *ref;
1578
    int perms = AV_PERM_WRITE;
1579
    int i, w, h, stride[4];
1580
    unsigned edge;
1581
    int pixel_size;
1582

    
1583
    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1584

    
1585
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1586
        perms |= AV_PERM_NEG_LINESIZES;
1587

    
1588
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1589
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1590
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1591
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1592
    }
1593
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1594

    
1595
    w = codec->width;
1596
    h = codec->height;
1597

    
1598
    if(av_image_check_size(w, h, 0, codec))
1599
        return -1;
1600

    
1601
    avcodec_align_dimensions2(codec, &w, &h, stride);
1602
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1603
    w += edge << 1;
1604
    h += edge << 1;
1605

    
1606
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1607
        return -1;
1608

    
1609
    pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1610
    ref->video->w = codec->width;
1611
    ref->video->h = codec->height;
1612
    for(i = 0; i < 4; i ++) {
1613
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1614
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1615

    
1616
        if (ref->data[i]) {
1617
            ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1618
        }
1619
        pic->data[i]     = ref->data[i];
1620
        pic->linesize[i] = ref->linesize[i];
1621
    }
1622
    pic->opaque = ref;
1623
    pic->age    = INT_MAX;
1624
    pic->type   = FF_BUFFER_TYPE_USER;
1625
    pic->reordered_opaque = codec->reordered_opaque;
1626
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1627
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1628
    return 0;
1629
}
1630

    
1631
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1632
{
1633
    memset(pic->data, 0, sizeof(pic->data));
1634
    avfilter_unref_buffer(pic->opaque);
1635
}
1636

    
1637
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1638
{
1639
    AVFilterBufferRef *ref = pic->opaque;
1640

    
1641
    if (pic->data[0] == NULL) {
1642
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1643
        return codec->get_buffer(codec, pic);
1644
    }
1645

    
1646
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1647
        (codec->pix_fmt != ref->format)) {
1648
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1649
        return -1;
1650
    }
1651

    
1652
    pic->reordered_opaque = codec->reordered_opaque;
1653
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1654
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1655
    return 0;
1656
}
1657

    
1658
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1659
{
1660
    FilterPriv *priv = ctx->priv;
1661
    AVCodecContext *codec;
1662
    if(!opaque) return -1;
1663

    
1664
    priv->is = opaque;
1665
    codec    = priv->is->video_st->codec;
1666
    codec->opaque = ctx;
1667
    if((codec->codec->capabilities & CODEC_CAP_DR1)
1668
    ) {
1669
        codec->flags |= CODEC_FLAG_EMU_EDGE;
1670
        priv->use_dr1 = 1;
1671
        codec->get_buffer     = input_get_buffer;
1672
        codec->release_buffer = input_release_buffer;
1673
        codec->reget_buffer   = input_reget_buffer;
1674
        codec->thread_safe_callbacks = 1;
1675
    }
1676

    
1677
    priv->frame = avcodec_alloc_frame();
1678

    
1679
    return 0;
1680
}
1681

    
1682
static void input_uninit(AVFilterContext *ctx)
1683
{
1684
    FilterPriv *priv = ctx->priv;
1685
    av_free(priv->frame);
1686
}
1687

    
1688
static int input_request_frame(AVFilterLink *link)
1689
{
1690
    FilterPriv *priv = link->src->priv;
1691
    AVFilterBufferRef *picref;
1692
    int64_t pts = 0;
1693
    AVPacket pkt;
1694
    int ret;
1695

    
1696
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1697
        av_free_packet(&pkt);
1698
    if (ret < 0)
1699
        return -1;
1700

    
1701
    if(priv->use_dr1) {
1702
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1703
    } else {
1704
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1705
        av_image_copy(picref->data, picref->linesize,
1706
                      priv->frame->data, priv->frame->linesize,
1707
                      picref->format, link->w, link->h);
1708
    }
1709
    av_free_packet(&pkt);
1710

    
1711
    picref->pts = pts;
1712
    picref->pos = pkt.pos;
1713
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1714
    avfilter_start_frame(link, picref);
1715
    avfilter_draw_slice(link, 0, link->h, 1);
1716
    avfilter_end_frame(link);
1717

    
1718
    return 0;
1719
}
1720

    
1721
static int input_query_formats(AVFilterContext *ctx)
1722
{
1723
    FilterPriv *priv = ctx->priv;
1724
    enum PixelFormat pix_fmts[] = {
1725
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1726
    };
1727

    
1728
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1729
    return 0;
1730
}
1731

    
1732
static int input_config_props(AVFilterLink *link)
1733
{
1734
    FilterPriv *priv  = link->src->priv;
1735
    AVCodecContext *c = priv->is->video_st->codec;
1736

    
1737
    link->w = c->width;
1738
    link->h = c->height;
1739
    link->time_base = priv->is->video_st->time_base;
1740

    
1741
    return 0;
1742
}
1743

    
1744
static AVFilter input_filter =
1745
{
1746
    .name      = "ffplay_input",
1747

    
1748
    .priv_size = sizeof(FilterPriv),
1749

    
1750
    .init      = input_init,
1751
    .uninit    = input_uninit,
1752

    
1753
    .query_formats = input_query_formats,
1754

    
1755
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1756
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1757
                                    .type = AVMEDIA_TYPE_VIDEO,
1758
                                    .request_frame = input_request_frame,
1759
                                    .config_props  = input_config_props, },
1760
                                  { .name = NULL }},
1761
};
1762

    
1763
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1764
{
1765
    char sws_flags_str[128];
1766
    int ret;
1767
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1768
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1769
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1770
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1771

    
1772
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1773
                                            NULL, is, graph)) < 0)
1774
        goto the_end;
1775
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1776
                                            NULL, &ffsink_ctx, graph)) < 0)
1777
        goto the_end;
1778

    
1779
    if(vfilters) {
1780
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1781
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1782

    
1783
        outputs->name    = av_strdup("in");
1784
        outputs->filter_ctx = filt_src;
1785
        outputs->pad_idx = 0;
1786
        outputs->next    = NULL;
1787

    
1788
        inputs->name    = av_strdup("out");
1789
        inputs->filter_ctx = filt_out;
1790
        inputs->pad_idx = 0;
1791
        inputs->next    = NULL;
1792

    
1793
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1794
            goto the_end;
1795
        av_freep(&vfilters);
1796
    } else {
1797
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1798
            goto the_end;
1799
    }
1800

    
1801
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1802
        goto the_end;
1803

    
1804
    is->out_video_filter = filt_out;
1805
the_end:
1806
    return ret;
1807
}
1808

    
1809
#endif  /* CONFIG_AVFILTER */
1810

    
1811
static int video_thread(void *arg)
1812
{
1813
    VideoState *is = arg;
1814
    AVFrame *frame= avcodec_alloc_frame();
1815
    int64_t pts_int;
1816
    double pts;
1817
    int ret;
1818

    
1819
#if CONFIG_AVFILTER
1820
    AVFilterGraph *graph = avfilter_graph_alloc();
1821
    AVFilterContext *filt_out = NULL;
1822
    int64_t pos;
1823

    
1824
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1825
        goto the_end;
1826
    filt_out = is->out_video_filter;
1827
#endif
1828

    
1829
    for(;;) {
1830
#if !CONFIG_AVFILTER
1831
        AVPacket pkt;
1832
#else
1833
        AVFilterBufferRef *picref;
1834
        AVRational tb;
1835
#endif
1836
        while (is->paused && !is->videoq.abort_request)
1837
            SDL_Delay(10);
1838
#if CONFIG_AVFILTER
1839
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1840
        if (picref) {
1841
            pts_int = picref->pts;
1842
            pos     = picref->pos;
1843
            frame->opaque = picref;
1844
        }
1845

    
1846
        if (av_cmp_q(tb, is->video_st->time_base)) {
1847
            av_unused int64_t pts1 = pts_int;
1848
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1849
            av_dlog(NULL, "video_thread(): "
1850
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1851
                    tb.num, tb.den, pts1,
1852
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1853
        }
1854
#else
1855
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1856
#endif
1857

    
1858
        if (ret < 0) goto the_end;
1859

    
1860
        if (!ret)
1861
            continue;
1862

    
1863
        pts = pts_int*av_q2d(is->video_st->time_base);
1864

    
1865
#if CONFIG_AVFILTER
1866
        ret = output_picture2(is, frame, pts, pos);
1867
#else
1868
        ret = output_picture2(is, frame, pts,  pkt.pos);
1869
        av_free_packet(&pkt);
1870
#endif
1871
        if (ret < 0)
1872
            goto the_end;
1873

    
1874
        if (step)
1875
            if (cur_stream)
1876
                stream_pause(cur_stream);
1877
    }
1878
 the_end:
1879
#if CONFIG_AVFILTER
1880
    avfilter_graph_free(&graph);
1881
#endif
1882
    av_free(frame);
1883
    return 0;
1884
}
1885

    
1886
static int subtitle_thread(void *arg)
1887
{
1888
    VideoState *is = arg;
1889
    SubPicture *sp;
1890
    AVPacket pkt1, *pkt = &pkt1;
1891
    int len1, got_subtitle;
1892
    double pts;
1893
    int i, j;
1894
    int r, g, b, y, u, v, a;
1895

    
1896
    for(;;) {
1897
        while (is->paused && !is->subtitleq.abort_request) {
1898
            SDL_Delay(10);
1899
        }
1900
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1901
            break;
1902

    
1903
        if(pkt->data == flush_pkt.data){
1904
            avcodec_flush_buffers(is->subtitle_st->codec);
1905
            continue;
1906
        }
1907
        SDL_LockMutex(is->subpq_mutex);
1908
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1909
               !is->subtitleq.abort_request) {
1910
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1911
        }
1912
        SDL_UnlockMutex(is->subpq_mutex);
1913

    
1914
        if (is->subtitleq.abort_request)
1915
            goto the_end;
1916

    
1917
        sp = &is->subpq[is->subpq_windex];
1918

    
1919
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1920
           this packet, if any */
1921
        pts = 0;
1922
        if (pkt->pts != AV_NOPTS_VALUE)
1923
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1924

    
1925
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1926
                                    &sp->sub, &got_subtitle,
1927
                                    pkt);
1928
//            if (len1 < 0)
1929
//                break;
1930
        if (got_subtitle && sp->sub.format == 0) {
1931
            sp->pts = pts;
1932

    
1933
            for (i = 0; i < sp->sub.num_rects; i++)
1934
            {
1935
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1936
                {
1937
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1938
                    y = RGB_TO_Y_CCIR(r, g, b);
1939
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1940
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1941
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1942
                }
1943
            }
1944

    
1945
            /* now we can update the picture count */
1946
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1947
                is->subpq_windex = 0;
1948
            SDL_LockMutex(is->subpq_mutex);
1949
            is->subpq_size++;
1950
            SDL_UnlockMutex(is->subpq_mutex);
1951
        }
1952
        av_free_packet(pkt);
1953
//        if (step)
1954
//            if (cur_stream)
1955
//                stream_pause(cur_stream);
1956
    }
1957
 the_end:
1958
    return 0;
1959
}
1960

    
1961
/* copy samples for viewing in editor window */
1962
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1963
{
1964
    int size, len, channels;
1965

    
1966
    channels = is->audio_st->codec->channels;
1967

    
1968
    size = samples_size / sizeof(short);
1969
    while (size > 0) {
1970
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1971
        if (len > size)
1972
            len = size;
1973
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1974
        samples += len;
1975
        is->sample_array_index += len;
1976
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1977
            is->sample_array_index = 0;
1978
        size -= len;
1979
    }
1980
}
1981

    
1982
/* return the new audio buffer size (samples can be added or deleted
1983
   to get better sync if video or external master clock) */
1984
static int synchronize_audio(VideoState *is, short *samples,
1985
                             int samples_size1, double pts)
1986
{
1987
    int n, samples_size;
1988
    double ref_clock;
1989

    
1990
    n = 2 * is->audio_st->codec->channels;
1991
    samples_size = samples_size1;
1992

    
1993
    /* if not master, then we try to remove or add samples to correct the clock */
1994
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1995
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1996
        double diff, avg_diff;
1997
        int wanted_size, min_size, max_size, nb_samples;
1998

    
1999
        ref_clock = get_master_clock(is);
2000
        diff = get_audio_clock(is) - ref_clock;
2001

    
2002
        if (diff < AV_NOSYNC_THRESHOLD) {
2003
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2004
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2005
                /* not enough measures to have a correct estimate */
2006
                is->audio_diff_avg_count++;
2007
            } else {
2008
                /* estimate the A-V difference */
2009
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2010

    
2011
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2012
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2013
                    nb_samples = samples_size / n;
2014

    
2015
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2016
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2017
                    if (wanted_size < min_size)
2018
                        wanted_size = min_size;
2019
                    else if (wanted_size > max_size)
2020
                        wanted_size = max_size;
2021

    
2022
                    /* add or remove samples to correction the synchro */
2023
                    if (wanted_size < samples_size) {
2024
                        /* remove samples */
2025
                        samples_size = wanted_size;
2026
                    } else if (wanted_size > samples_size) {
2027
                        uint8_t *samples_end, *q;
2028
                        int nb;
2029

    
2030
                        /* add samples */
2031
                        nb = (samples_size - wanted_size);
2032
                        samples_end = (uint8_t *)samples + samples_size - n;
2033
                        q = samples_end + n;
2034
                        while (nb > 0) {
2035
                            memcpy(q, samples_end, n);
2036
                            q += n;
2037
                            nb -= n;
2038
                        }
2039
                        samples_size = wanted_size;
2040
                    }
2041
                }
2042
#if 0
2043
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2044
                       diff, avg_diff, samples_size - samples_size1,
2045
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2046
#endif
2047
            }
2048
        } else {
2049
            /* too big difference : may be initial PTS errors, so
2050
               reset A-V filter */
2051
            is->audio_diff_avg_count = 0;
2052
            is->audio_diff_cum = 0;
2053
        }
2054
    }
2055

    
2056
    return samples_size;
2057
}
2058

    
2059
/* decode one audio frame and returns its uncompressed size */
2060
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2061
{
2062
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2063
    AVPacket *pkt = &is->audio_pkt;
2064
    AVCodecContext *dec= is->audio_st->codec;
2065
    int n, len1, data_size;
2066
    double pts;
2067

    
2068
    for(;;) {
2069
        /* NOTE: the audio packet can contain several frames */
2070
        while (pkt_temp->size > 0) {
2071
            data_size = sizeof(is->audio_buf1);
2072
            len1 = avcodec_decode_audio3(dec,
2073
                                        (int16_t *)is->audio_buf1, &data_size,
2074
                                        pkt_temp);
2075
            if (len1 < 0) {
2076
                /* if error, we skip the frame */
2077
                pkt_temp->size = 0;
2078
                break;
2079
            }
2080

    
2081
            pkt_temp->data += len1;
2082
            pkt_temp->size -= len1;
2083
            if (data_size <= 0)
2084
                continue;
2085

    
2086
            if (dec->sample_fmt != is->audio_src_fmt) {
2087
                if (is->reformat_ctx)
2088
                    av_audio_convert_free(is->reformat_ctx);
2089
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2090
                                                         dec->sample_fmt, 1, NULL, 0);
2091
                if (!is->reformat_ctx) {
2092
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2093
                        av_get_sample_fmt_name(dec->sample_fmt),
2094
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2095
                        break;
2096
                }
2097
                is->audio_src_fmt= dec->sample_fmt;
2098
            }
2099

    
2100
            if (is->reformat_ctx) {
2101
                const void *ibuf[6]= {is->audio_buf1};
2102
                void *obuf[6]= {is->audio_buf2};
2103
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2104
                int ostride[6]= {2};
2105
                int len= data_size/istride[0];
2106
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2107
                    printf("av_audio_convert() failed\n");
2108
                    break;
2109
                }
2110
                is->audio_buf= is->audio_buf2;
2111
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2112
                          remove this legacy cruft */
2113
                data_size= len*2;
2114
            }else{
2115
                is->audio_buf= is->audio_buf1;
2116
            }
2117

    
2118
            /* if no pts, then compute it */
2119
            pts = is->audio_clock;
2120
            *pts_ptr = pts;
2121
            n = 2 * dec->channels;
2122
            is->audio_clock += (double)data_size /
2123
                (double)(n * dec->sample_rate);
2124
#if defined(DEBUG_SYNC)
2125
            {
2126
                static double last_clock;
2127
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2128
                       is->audio_clock - last_clock,
2129
                       is->audio_clock, pts);
2130
                last_clock = is->audio_clock;
2131
            }
2132
#endif
2133
            return data_size;
2134
        }
2135

    
2136
        /* free the current packet */
2137
        if (pkt->data)
2138
            av_free_packet(pkt);
2139

    
2140
        if (is->paused || is->audioq.abort_request) {
2141
            return -1;
2142
        }
2143

    
2144
        /* read next packet */
2145
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2146
            return -1;
2147
        if(pkt->data == flush_pkt.data){
2148
            avcodec_flush_buffers(dec);
2149
            continue;
2150
        }
2151

    
2152
        pkt_temp->data = pkt->data;
2153
        pkt_temp->size = pkt->size;
2154

    
2155
        /* if update the audio clock with the pts */
2156
        if (pkt->pts != AV_NOPTS_VALUE) {
2157
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2158
        }
2159
    }
2160
}
2161

    
2162
/* get the current audio output buffer size, in samples. With SDL, we
2163
   cannot have a precise information */
2164
static int audio_write_get_buf_size(VideoState *is)
2165
{
2166
    return is->audio_buf_size - is->audio_buf_index;
2167
}
2168

    
2169

    
2170
/* prepare a new audio buffer */
2171
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2172
{
2173
    VideoState *is = opaque;
2174
    int audio_size, len1;
2175
    double pts;
2176

    
2177
    audio_callback_time = av_gettime();
2178

    
2179
    while (len > 0) {
2180
        if (is->audio_buf_index >= is->audio_buf_size) {
2181
           audio_size = audio_decode_frame(is, &pts);
2182
           if (audio_size < 0) {
2183
                /* if error, just output silence */
2184
               is->audio_buf = is->audio_buf1;
2185
               is->audio_buf_size = 1024;
2186
               memset(is->audio_buf, 0, is->audio_buf_size);
2187
           } else {
2188
               if (is->show_audio)
2189
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2190
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2191
                                              pts);
2192
               is->audio_buf_size = audio_size;
2193
           }
2194
           is->audio_buf_index = 0;
2195
        }
2196
        len1 = is->audio_buf_size - is->audio_buf_index;
2197
        if (len1 > len)
2198
            len1 = len;
2199
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2200
        len -= len1;
2201
        stream += len1;
2202
        is->audio_buf_index += len1;
2203
    }
2204
}
2205

    
2206
/* open a given stream. Return 0 if OK */
2207
static int stream_component_open(VideoState *is, int stream_index)
2208
{
2209
    AVFormatContext *ic = is->ic;
2210
    AVCodecContext *avctx;
2211
    AVCodec *codec;
2212
    SDL_AudioSpec wanted_spec, spec;
2213

    
2214
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2215
        return -1;
2216
    avctx = ic->streams[stream_index]->codec;
2217

    
2218
    /* prepare audio output */
2219
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2220
        if (avctx->channels > 0) {
2221
            avctx->request_channels = FFMIN(2, avctx->channels);
2222
        } else {
2223
            avctx->request_channels = 2;
2224
        }
2225
    }
2226

    
2227
    codec = avcodec_find_decoder(avctx->codec_id);
2228
    avctx->debug_mv = debug_mv;
2229
    avctx->debug = debug;
2230
    avctx->workaround_bugs = workaround_bugs;
2231
    avctx->lowres = lowres;
2232
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2233
    avctx->idct_algo= idct;
2234
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2235
    avctx->skip_frame= skip_frame;
2236
    avctx->skip_idct= skip_idct;
2237
    avctx->skip_loop_filter= skip_loop_filter;
2238
    avctx->error_recognition= error_recognition;
2239
    avctx->error_concealment= error_concealment;
2240
    avctx->thread_count= thread_count;
2241

    
2242
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2243

    
2244
    if (!codec ||
2245
        avcodec_open(avctx, codec) < 0)
2246
        return -1;
2247

    
2248
    /* prepare audio output */
2249
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2250
        wanted_spec.freq = avctx->sample_rate;
2251
        wanted_spec.format = AUDIO_S16SYS;
2252
        wanted_spec.channels = avctx->channels;
2253
        wanted_spec.silence = 0;
2254
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2255
        wanted_spec.callback = sdl_audio_callback;
2256
        wanted_spec.userdata = is;
2257
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2258
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2259
            return -1;
2260
        }
2261
        is->audio_hw_buf_size = spec.size;
2262
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2263
    }
2264

    
2265
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2266
    switch(avctx->codec_type) {
2267
    case AVMEDIA_TYPE_AUDIO:
2268
        is->audio_stream = stream_index;
2269
        is->audio_st = ic->streams[stream_index];
2270
        is->audio_buf_size = 0;
2271
        is->audio_buf_index = 0;
2272

    
2273
        /* init averaging filter */
2274
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2275
        is->audio_diff_avg_count = 0;
2276
        /* since we do not have a precise anough audio fifo fullness,
2277
           we correct audio sync only if larger than this threshold */
2278
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2279

    
2280
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2281
        packet_queue_init(&is->audioq);
2282
        SDL_PauseAudio(0);
2283
        break;
2284
    case AVMEDIA_TYPE_VIDEO:
2285
        is->video_stream = stream_index;
2286
        is->video_st = ic->streams[stream_index];
2287

    
2288
//        is->video_current_pts_time = av_gettime();
2289

    
2290
        packet_queue_init(&is->videoq);
2291
        is->video_tid = SDL_CreateThread(video_thread, is);
2292
        break;
2293
    case AVMEDIA_TYPE_SUBTITLE:
2294
        is->subtitle_stream = stream_index;
2295
        is->subtitle_st = ic->streams[stream_index];
2296
        packet_queue_init(&is->subtitleq);
2297

    
2298
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2299
        break;
2300
    default:
2301
        break;
2302
    }
2303
    return 0;
2304
}
2305

    
2306
static void stream_component_close(VideoState *is, int stream_index)
2307
{
2308
    AVFormatContext *ic = is->ic;
2309
    AVCodecContext *avctx;
2310

    
2311
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2312
        return;
2313
    avctx = ic->streams[stream_index]->codec;
2314

    
2315
    switch(avctx->codec_type) {
2316
    case AVMEDIA_TYPE_AUDIO:
2317
        packet_queue_abort(&is->audioq);
2318

    
2319
        SDL_CloseAudio();
2320

    
2321
        packet_queue_end(&is->audioq);
2322
        if (is->reformat_ctx)
2323
            av_audio_convert_free(is->reformat_ctx);
2324
        is->reformat_ctx = NULL;
2325
        break;
2326
    case AVMEDIA_TYPE_VIDEO:
2327
        packet_queue_abort(&is->videoq);
2328

    
2329
        /* note: we also signal this mutex to make sure we deblock the
2330
           video thread in all cases */
2331
        SDL_LockMutex(is->pictq_mutex);
2332
        SDL_CondSignal(is->pictq_cond);
2333
        SDL_UnlockMutex(is->pictq_mutex);
2334

    
2335
        SDL_WaitThread(is->video_tid, NULL);
2336

    
2337
        packet_queue_end(&is->videoq);
2338
        break;
2339
    case AVMEDIA_TYPE_SUBTITLE:
2340
        packet_queue_abort(&is->subtitleq);
2341

    
2342
        /* note: we also signal this mutex to make sure we deblock the
2343
           video thread in all cases */
2344
        SDL_LockMutex(is->subpq_mutex);
2345
        is->subtitle_stream_changed = 1;
2346

    
2347
        SDL_CondSignal(is->subpq_cond);
2348
        SDL_UnlockMutex(is->subpq_mutex);
2349

    
2350
        SDL_WaitThread(is->subtitle_tid, NULL);
2351

    
2352
        packet_queue_end(&is->subtitleq);
2353
        break;
2354
    default:
2355
        break;
2356
    }
2357

    
2358
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2359
    avcodec_close(avctx);
2360
    switch(avctx->codec_type) {
2361
    case AVMEDIA_TYPE_AUDIO:
2362
        is->audio_st = NULL;
2363
        is->audio_stream = -1;
2364
        break;
2365
    case AVMEDIA_TYPE_VIDEO:
2366
        is->video_st = NULL;
2367
        is->video_stream = -1;
2368
        break;
2369
    case AVMEDIA_TYPE_SUBTITLE:
2370
        is->subtitle_st = NULL;
2371
        is->subtitle_stream = -1;
2372
        break;
2373
    default:
2374
        break;
2375
    }
2376
}
2377

    
2378
/* since we have only one decoding thread, we can use a global
2379
   variable instead of a thread local variable */
2380
static VideoState *global_video_state;
2381

    
2382
static int decode_interrupt_cb(void)
2383
{
2384
    return (global_video_state && global_video_state->abort_request);
2385
}
2386

    
2387
/* this thread gets the stream from the disk or the network */
2388
static int decode_thread(void *arg)
2389
{
2390
    VideoState *is = arg;
2391
    AVFormatContext *ic;
2392
    int err, i, ret;
2393
    int st_index[AVMEDIA_TYPE_NB];
2394
    AVPacket pkt1, *pkt = &pkt1;
2395
    AVFormatParameters params, *ap = &params;
2396
    int eof=0;
2397
    int pkt_in_play_range = 0;
2398

    
2399
    ic = avformat_alloc_context();
2400

    
2401
    memset(st_index, -1, sizeof(st_index));
2402
    is->video_stream = -1;
2403
    is->audio_stream = -1;
2404
    is->subtitle_stream = -1;
2405

    
2406
    global_video_state = is;
2407
    avio_set_interrupt_cb(decode_interrupt_cb);
2408

    
2409
    memset(ap, 0, sizeof(*ap));
2410

    
2411
    ap->prealloced_context = 1;
2412
    ap->width = frame_width;
2413
    ap->height= frame_height;
2414
    ap->time_base= (AVRational){1, 25};
2415
    ap->pix_fmt = frame_pix_fmt;
2416

    
2417
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2418

    
2419
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2420
    if (err < 0) {
2421
        print_error(is->filename, err);
2422
        ret = -1;
2423
        goto fail;
2424
    }
2425
    is->ic = ic;
2426

    
2427
    if(genpts)
2428
        ic->flags |= AVFMT_FLAG_GENPTS;
2429

    
2430
    err = av_find_stream_info(ic);
2431
    if (err < 0) {
2432
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2433
        ret = -1;
2434
        goto fail;
2435
    }
2436
    if(ic->pb)
2437
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2438

    
2439
    if(seek_by_bytes<0)
2440
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2441

    
2442
    /* if seeking requested, we execute it */
2443
    if (start_time != AV_NOPTS_VALUE) {
2444
        int64_t timestamp;
2445

    
2446
        timestamp = start_time;
2447
        /* add the stream start time */
2448
        if (ic->start_time != AV_NOPTS_VALUE)
2449
            timestamp += ic->start_time;
2450
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2451
        if (ret < 0) {
2452
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2453
                    is->filename, (double)timestamp / AV_TIME_BASE);
2454
        }
2455
    }
2456

    
2457
    for (i = 0; i < ic->nb_streams; i++)
2458
        ic->streams[i]->discard = AVDISCARD_ALL;
2459
    if (!video_disable)
2460
        st_index[AVMEDIA_TYPE_VIDEO] =
2461
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2462
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2463
    if (!audio_disable)
2464
        st_index[AVMEDIA_TYPE_AUDIO] =
2465
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2466
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2467
                                st_index[AVMEDIA_TYPE_VIDEO],
2468
                                NULL, 0);
2469
    if (!video_disable)
2470
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2471
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2472
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2473
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2474
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2475
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2476
                                NULL, 0);
2477
    if (show_status) {
2478
        av_dump_format(ic, 0, is->filename, 0);
2479
    }
2480

    
2481
    /* open the streams */
2482
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2483
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2484
    }
2485

    
2486
    ret=-1;
2487
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2488
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2489
    }
2490
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2491
    if(ret<0) {
2492
        if (!display_disable)
2493
            is->show_audio = 2;
2494
    }
2495

    
2496
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2497
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2498
    }
2499

    
2500
    if (is->video_stream < 0 && is->audio_stream < 0) {
2501
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2502
        ret = -1;
2503
        goto fail;
2504
    }
2505

    
2506
    for(;;) {
2507
        if (is->abort_request)
2508
            break;
2509
        if (is->paused != is->last_paused) {
2510
            is->last_paused = is->paused;
2511
            if (is->paused)
2512
                is->read_pause_return= av_read_pause(ic);
2513
            else
2514
                av_read_play(ic);
2515
        }
2516
#if CONFIG_RTSP_DEMUXER
2517
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2518
            /* wait 10 ms to avoid trying to get another packet */
2519
            /* XXX: horrible */
2520
            SDL_Delay(10);
2521
            continue;
2522
        }
2523
#endif
2524
        if (is->seek_req) {
2525
            int64_t seek_target= is->seek_pos;
2526
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2527
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2528
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2529
//      of the seek_pos/seek_rel variables
2530

    
2531
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2532
            if (ret < 0) {
2533
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2534
            }else{
2535
                if (is->audio_stream >= 0) {
2536
                    packet_queue_flush(&is->audioq);
2537
                    packet_queue_put(&is->audioq, &flush_pkt);
2538
                }
2539
                if (is->subtitle_stream >= 0) {
2540
                    packet_queue_flush(&is->subtitleq);
2541
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2542
                }
2543
                if (is->video_stream >= 0) {
2544
                    packet_queue_flush(&is->videoq);
2545
                    packet_queue_put(&is->videoq, &flush_pkt);
2546
                }
2547
            }
2548
            is->seek_req = 0;
2549
            eof= 0;
2550
        }
2551

    
2552
        /* if the queue are full, no need to read more */
2553
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2554
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2555
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2556
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2557
            /* wait 10 ms */
2558
            SDL_Delay(10);
2559
            continue;
2560
        }
2561
        if(eof) {
2562
            if(is->video_stream >= 0){
2563
                av_init_packet(pkt);
2564
                pkt->data=NULL;
2565
                pkt->size=0;
2566
                pkt->stream_index= is->video_stream;
2567
                packet_queue_put(&is->videoq, pkt);
2568
            }
2569
            SDL_Delay(10);
2570
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2571
                if(loop!=1 && (!loop || --loop)){
2572
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2573
                }else if(autoexit){
2574
                    ret=AVERROR_EOF;
2575
                    goto fail;
2576
                }
2577
            }
2578
            eof=0;
2579
            continue;
2580
        }
2581
        ret = av_read_frame(ic, pkt);
2582
        if (ret < 0) {
2583
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2584
                eof=1;
2585
            if (ic->pb && ic->pb->error)
2586
                break;
2587
            SDL_Delay(100); /* wait for user event */
2588
            continue;
2589
        }
2590
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2591
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2592
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2593
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2594
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2595
                <= ((double)duration/1000000);
2596
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2597
            packet_queue_put(&is->audioq, pkt);
2598
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2599
            packet_queue_put(&is->videoq, pkt);
2600
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2601
            packet_queue_put(&is->subtitleq, pkt);
2602
        } else {
2603
            av_free_packet(pkt);
2604
        }
2605
    }
2606
    /* wait until the end */
2607
    while (!is->abort_request) {
2608
        SDL_Delay(100);
2609
    }
2610

    
2611
    ret = 0;
2612
 fail:
2613
    /* disable interrupting */
2614
    global_video_state = NULL;
2615

    
2616
    /* close each stream */
2617
    if (is->audio_stream >= 0)
2618
        stream_component_close(is, is->audio_stream);
2619
    if (is->video_stream >= 0)
2620
        stream_component_close(is, is->video_stream);
2621
    if (is->subtitle_stream >= 0)
2622
        stream_component_close(is, is->subtitle_stream);
2623
    if (is->ic) {
2624
        av_close_input_file(is->ic);
2625
        is->ic = NULL; /* safety */
2626
    }
2627
    avio_set_interrupt_cb(NULL);
2628

    
2629
    if (ret != 0) {
2630
        SDL_Event event;
2631

    
2632
        event.type = FF_QUIT_EVENT;
2633
        event.user.data1 = is;
2634
        SDL_PushEvent(&event);
2635
    }
2636
    return 0;
2637
}
2638

    
2639
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2640
{
2641
    VideoState *is;
2642

    
2643
    is = av_mallocz(sizeof(VideoState));
2644
    if (!is)
2645
        return NULL;
2646
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2647
    is->iformat = iformat;
2648
    is->ytop = 0;
2649
    is->xleft = 0;
2650

    
2651
    /* start video display */
2652
    is->pictq_mutex = SDL_CreateMutex();
2653
    is->pictq_cond = SDL_CreateCond();
2654

    
2655
    is->subpq_mutex = SDL_CreateMutex();
2656
    is->subpq_cond = SDL_CreateCond();
2657

    
2658
    is->av_sync_type = av_sync_type;
2659
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2660
    if (!is->parse_tid) {
2661
        av_free(is);
2662
        return NULL;
2663
    }
2664
    return is;
2665
}
2666

    
2667
static void stream_cycle_channel(VideoState *is, int codec_type)
2668
{
2669
    AVFormatContext *ic = is->ic;
2670
    int start_index, stream_index;
2671
    AVStream *st;
2672

    
2673
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2674
        start_index = is->video_stream;
2675
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2676
        start_index = is->audio_stream;
2677
    else
2678
        start_index = is->subtitle_stream;
2679
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2680
        return;
2681
    stream_index = start_index;
2682
    for(;;) {
2683
        if (++stream_index >= is->ic->nb_streams)
2684
        {
2685
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2686
            {
2687
                stream_index = -1;
2688
                goto the_end;
2689
            } else
2690
                stream_index = 0;
2691
        }
2692
        if (stream_index == start_index)
2693
            return;
2694
        st = ic->streams[stream_index];
2695
        if (st->codec->codec_type == codec_type) {
2696
            /* check that parameters are OK */
2697
            switch(codec_type) {
2698
            case AVMEDIA_TYPE_AUDIO:
2699
                if (st->codec->sample_rate != 0 &&
2700
                    st->codec->channels != 0)
2701
                    goto the_end;
2702
                break;
2703
            case AVMEDIA_TYPE_VIDEO:
2704
            case AVMEDIA_TYPE_SUBTITLE:
2705
                goto the_end;
2706
            default:
2707
                break;
2708
            }
2709
        }
2710
    }
2711
 the_end:
2712
    stream_component_close(is, start_index);
2713
    stream_component_open(is, stream_index);
2714
}
2715

    
2716

    
2717
static void toggle_full_screen(void)
2718
{
2719
    is_full_screen = !is_full_screen;
2720
    if (!fs_screen_width) {
2721
        /* use default SDL method */
2722
//        SDL_WM_ToggleFullScreen(screen);
2723
    }
2724
    video_open(cur_stream);
2725
}
2726

    
2727
static void toggle_pause(void)
2728
{
2729
    if (cur_stream)
2730
        stream_pause(cur_stream);
2731
    step = 0;
2732
}
2733

    
2734
static void step_to_next_frame(void)
2735
{
2736
    if (cur_stream) {
2737
        /* if the stream is paused unpause it, then step */
2738
        if (cur_stream->paused)
2739
            stream_pause(cur_stream);
2740
    }
2741
    step = 1;
2742
}
2743

    
2744
static void toggle_audio_display(void)
2745
{
2746
    if (cur_stream) {
2747
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2748
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2749
        fill_rectangle(screen,
2750
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2751
                    bgcolor);
2752
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2753
    }
2754
}
2755

    
2756
/* handle an event sent by the GUI */
2757
static void event_loop(void)
2758
{
2759
    SDL_Event event;
2760
    double incr, pos, frac;
2761

    
2762
    for(;;) {
2763
        double x;
2764
        SDL_WaitEvent(&event);
2765
        switch(event.type) {
2766
        case SDL_KEYDOWN:
2767
            if (exit_on_keydown) {
2768
                do_exit();
2769
                break;
2770
            }
2771
            switch(event.key.keysym.sym) {
2772
            case SDLK_ESCAPE:
2773
            case SDLK_q:
2774
                do_exit();
2775
                break;
2776
            case SDLK_f:
2777
                toggle_full_screen();
2778
                break;
2779
            case SDLK_p:
2780
            case SDLK_SPACE:
2781
                toggle_pause();
2782
                break;
2783
            case SDLK_s: //S: Step to next frame
2784
                step_to_next_frame();
2785
                break;
2786
            case SDLK_a:
2787
                if (cur_stream)
2788
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2789
                break;
2790
            case SDLK_v:
2791
                if (cur_stream)
2792
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2793
                break;
2794
            case SDLK_t:
2795
                if (cur_stream)
2796
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2797
                break;
2798
            case SDLK_w:
2799
                toggle_audio_display();
2800
                break;
2801
            case SDLK_LEFT:
2802
                incr = -10.0;
2803
                goto do_seek;
2804
            case SDLK_RIGHT:
2805
                incr = 10.0;
2806
                goto do_seek;
2807
            case SDLK_UP:
2808
                incr = 60.0;
2809
                goto do_seek;
2810
            case SDLK_DOWN:
2811
                incr = -60.0;
2812
            do_seek:
2813
                if (cur_stream) {
2814
                    if (seek_by_bytes) {
2815
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2816
                            pos= cur_stream->video_current_pos;
2817
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2818
                            pos= cur_stream->audio_pkt.pos;
2819
                        }else
2820
                            pos = avio_tell(cur_stream->ic->pb);
2821
                        if (cur_stream->ic->bit_rate)
2822
                            incr *= cur_stream->ic->bit_rate / 8.0;
2823
                        else
2824
                            incr *= 180000.0;
2825
                        pos += incr;
2826
                        stream_seek(cur_stream, pos, incr, 1);
2827
                    } else {
2828
                        pos = get_master_clock(cur_stream);
2829
                        pos += incr;
2830
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2831
                    }
2832
                }
2833
                break;
2834
            default:
2835
                break;
2836
            }
2837
            break;
2838
        case SDL_MOUSEBUTTONDOWN:
2839
            if (exit_on_mousedown) {
2840
                do_exit();
2841
                break;
2842
            }
2843
        case SDL_MOUSEMOTION:
2844
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2845
                x= event.button.x;
2846
            }else{
2847
                if(event.motion.state != SDL_PRESSED)
2848
                    break;
2849
                x= event.motion.x;
2850
            }
2851
            if (cur_stream) {
2852
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2853
                    uint64_t size=  avio_size(cur_stream->ic->pb);
2854
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2855
                }else{
2856
                    int64_t ts;
2857
                    int ns, hh, mm, ss;
2858
                    int tns, thh, tmm, tss;
2859
                    tns = cur_stream->ic->duration/1000000LL;
2860
                    thh = tns/3600;
2861
                    tmm = (tns%3600)/60;
2862
                    tss = (tns%60);
2863
                    frac = x/cur_stream->width;
2864
                    ns = frac*tns;
2865
                    hh = ns/3600;
2866
                    mm = (ns%3600)/60;
2867
                    ss = (ns%60);
2868
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2869
                            hh, mm, ss, thh, tmm, tss);
2870
                    ts = frac*cur_stream->ic->duration;
2871
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2872
                        ts += cur_stream->ic->start_time;
2873
                    stream_seek(cur_stream, ts, 0, 0);
2874
                }
2875
            }
2876
            break;
2877
        case SDL_VIDEORESIZE:
2878
            if (cur_stream) {
2879
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2880
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2881
                screen_width = cur_stream->width = event.resize.w;
2882
                screen_height= cur_stream->height= event.resize.h;
2883
            }
2884
            break;
2885
        case SDL_QUIT:
2886
        case FF_QUIT_EVENT:
2887
            do_exit();
2888
            break;
2889
        case FF_ALLOC_EVENT:
2890
            video_open(event.user.data1);
2891
            alloc_picture(event.user.data1);
2892
            break;
2893
        case FF_REFRESH_EVENT:
2894
            video_refresh_timer(event.user.data1);
2895
            cur_stream->refresh=0;
2896
            break;
2897
        default:
2898
            break;
2899
        }
2900
    }
2901
}
2902

    
2903
static void opt_frame_size(const char *arg)
2904
{
2905
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2906
        fprintf(stderr, "Incorrect frame size\n");
2907
        exit(1);
2908
    }
2909
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2910
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2911
        exit(1);
2912
    }
2913
}
2914

    
2915
static int opt_width(const char *opt, const char *arg)
2916
{
2917
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2918
    return 0;
2919
}
2920

    
2921
static int opt_height(const char *opt, const char *arg)
2922
{
2923
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2924
    return 0;
2925
}
2926

    
2927
static void opt_format(const char *arg)
2928
{
2929
    file_iformat = av_find_input_format(arg);
2930
    if (!file_iformat) {
2931
        fprintf(stderr, "Unknown input format: %s\n", arg);
2932
        exit(1);
2933
    }
2934
}
2935

    
2936
static void opt_frame_pix_fmt(const char *arg)
2937
{
2938
    frame_pix_fmt = av_get_pix_fmt(arg);
2939
}
2940

    
2941
static int opt_sync(const char *opt, const char *arg)
2942
{
2943
    if (!strcmp(arg, "audio"))
2944
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2945
    else if (!strcmp(arg, "video"))
2946
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2947
    else if (!strcmp(arg, "ext"))
2948
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2949
    else {
2950
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2951
        exit(1);
2952
    }
2953
    return 0;
2954
}
2955

    
2956
static int opt_seek(const char *opt, const char *arg)
2957
{
2958
    start_time = parse_time_or_die(opt, arg, 1);
2959
    return 0;
2960
}
2961

    
2962
static int opt_duration(const char *opt, const char *arg)
2963
{
2964
    duration = parse_time_or_die(opt, arg, 1);
2965
    return 0;
2966
}
2967

    
2968
static int opt_debug(const char *opt, const char *arg)
2969
{
2970
    av_log_set_level(99);
2971
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2972
    return 0;
2973
}
2974

    
2975
static int opt_vismv(const char *opt, const char *arg)
2976
{
2977
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2978
    return 0;
2979
}
2980

    
2981
static int opt_thread_count(const char *opt, const char *arg)
2982
{
2983
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2984
#if !HAVE_THREADS
2985
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2986
#endif
2987
    return 0;
2988
}
2989

    
2990
static const OptionDef options[] = {
2991
#include "cmdutils_common_opts.h"
2992
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2993
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2994
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2995
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2996
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2997
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2998
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2999
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3000
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3001
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3002
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3003
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3004
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3005
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3006
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3007
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3008
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3009
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3010
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3011
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3012
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3013
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3014
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3015
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3016
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3017
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3018
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3019
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3020
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3021
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3022
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3023
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3024
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3025
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3026
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3027
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3028
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3029
#if CONFIG_AVFILTER
3030
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3031
#endif
3032
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3033
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3034
    { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3035
    { NULL, },
3036
};
3037

    
3038
static void show_usage(void)
3039
{
3040
    printf("Simple media player\n");
3041
    printf("usage: ffplay [options] input_file\n");
3042
    printf("\n");
3043
}
3044

    
3045
static void show_help(void)
3046
{
3047
    av_log_set_callback(log_callback_help);
3048
    show_usage();
3049
    show_help_options(options, "Main options:\n",
3050
                      OPT_EXPERT, 0);
3051
    show_help_options(options, "\nAdvanced options:\n",
3052
                      OPT_EXPERT, OPT_EXPERT);
3053
    printf("\n");
3054
    av_opt_show2(avcodec_opts[0], NULL,
3055
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3056
    printf("\n");
3057
    av_opt_show2(avformat_opts, NULL,
3058
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3059
#if !CONFIG_AVFILTER
3060
    printf("\n");
3061
    av_opt_show2(sws_opts, NULL,
3062
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3063
#endif
3064
    printf("\nWhile playing:\n"
3065
           "q, ESC              quit\n"
3066
           "f                   toggle full screen\n"
3067
           "p, SPC              pause\n"
3068
           "a                   cycle audio channel\n"
3069
           "v                   cycle video channel\n"
3070
           "t                   cycle subtitle channel\n"
3071
           "w                   show audio waves\n"
3072
           "s                   activate frame-step mode\n"
3073
           "left/right          seek backward/forward 10 seconds\n"
3074
           "down/up             seek backward/forward 1 minute\n"
3075
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3076
           );
3077
}
3078

    
3079
static void opt_input_file(const char *filename)
3080
{
3081
    if (input_filename) {
3082
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3083
                filename, input_filename);
3084
        exit(1);
3085
    }
3086
    if (!strcmp(filename, "-"))
3087
        filename = "pipe:";
3088
    input_filename = filename;
3089
}
3090

    
3091
/* Called from the main */
3092
int main(int argc, char **argv)
3093
{
3094
    int flags;
3095

    
3096
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3097

    
3098
    /* register all codecs, demux and protocols */
3099
    avcodec_register_all();
3100
#if CONFIG_AVDEVICE
3101
    avdevice_register_all();
3102
#endif
3103
#if CONFIG_AVFILTER
3104
    avfilter_register_all();
3105
#endif
3106
    av_register_all();
3107

    
3108
    init_opts();
3109

    
3110
    show_banner();
3111

    
3112
    parse_options(argc, argv, options, opt_input_file);
3113

    
3114
    if (!input_filename) {
3115
        show_usage();
3116
        fprintf(stderr, "An input file must be specified\n");
3117
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3118
        exit(1);
3119
    }
3120

    
3121
    if (display_disable) {
3122
        video_disable = 1;
3123
    }
3124
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3125
#if !defined(__MINGW32__) && !defined(__APPLE__)
3126
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3127
#endif
3128
    if (SDL_Init (flags)) {
3129
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3130
        exit(1);
3131
    }
3132

    
3133
    if (!display_disable) {
3134
#if HAVE_SDL_VIDEO_SIZE
3135
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3136
        fs_screen_width = vi->current_w;
3137
        fs_screen_height = vi->current_h;
3138
#endif
3139
    }
3140

    
3141
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3142
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3143
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3144

    
3145
    av_init_packet(&flush_pkt);
3146
    flush_pkt.data= "FLUSH";
3147

    
3148
    cur_stream = stream_open(input_filename, file_iformat);
3149

    
3150
    event_loop();
3151

    
3152
    /* never returns */
3153

    
3154
    return 0;
3155
}