Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ dc172ecc

History | View | Annotate | Download (98.5 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavutil/imgutils.h"
32
#include "libavutil/parseutils.h"
33
#include "libavutil/samplefmt.h"
34
#include "libavutil/avassert.h"
35
#include "libavformat/avformat.h"
36
#include "libavdevice/avdevice.h"
37
#include "libswscale/swscale.h"
38
#include "libavcodec/audioconvert.h"
39
#include "libavcodec/opt.h"
40
#include "libavcodec/avfft.h"
41

    
42
#if CONFIG_AVFILTER
43
# include "libavfilter/avfilter.h"
44
# include "libavfilter/avfiltergraph.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG
63
//#define DEBUG_SYNC
64

    
65
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67
#define MIN_FRAMES 5
68

    
69
/* SDL audio buffer size, in samples. Should be small to have precise
70
   A/V sync as SDL does not have hardware buffer fullness info. */
71
#define SDL_AUDIO_BUFFER_SIZE 1024
72

    
73
/* no AV sync correction is done if below the AV sync threshold */
74
#define AV_SYNC_THRESHOLD 0.01
75
/* no AV correction is done if too big error */
76
#define AV_NOSYNC_THRESHOLD 10.0
77

    
78
#define FRAME_SKIP_FACTOR 0.05
79

    
80
/* maximum audio speed change to get correct sync */
81
#define SAMPLE_CORRECTION_PERCENT_MAX 10
82

    
83
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84
#define AUDIO_DIFF_AVG_NB   20
85

    
86
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87
#define SAMPLE_ARRAY_SIZE (2*65536)
88

    
89
static int sws_flags = SWS_BICUBIC;
90

    
91
typedef struct PacketQueue {
92
    AVPacketList *first_pkt, *last_pkt;
93
    int nb_packets;
94
    int size;
95
    int abort_request;
96
    SDL_mutex *mutex;
97
    SDL_cond *cond;
98
} PacketQueue;
99

    
100
#define VIDEO_PICTURE_QUEUE_SIZE 2
101
#define SUBPICTURE_QUEUE_SIZE 4
102

    
103
typedef struct VideoPicture {
104
    double pts;                                  ///<presentation time stamp for this picture
105
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106
    int64_t pos;                                 ///<byte position in file
107
    SDL_Overlay *bmp;
108
    int width, height; /* source height & width */
109
    int allocated;
110
    enum PixelFormat pix_fmt;
111

    
112
#if CONFIG_AVFILTER
113
    AVFilterBufferRef *picref;
114
#endif
115
} VideoPicture;
116

    
117
typedef struct SubPicture {
118
    double pts; /* presentation time stamp for this picture */
119
    AVSubtitle sub;
120
} SubPicture;
121

    
122
enum {
123
    AV_SYNC_AUDIO_MASTER, /* default choice */
124
    AV_SYNC_VIDEO_MASTER,
125
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126
};
127

    
128
typedef struct VideoState {
129
    SDL_Thread *parse_tid;
130
    SDL_Thread *video_tid;
131
    SDL_Thread *refresh_tid;
132
    AVInputFormat *iformat;
133
    int no_background;
134
    int abort_request;
135
    int paused;
136
    int last_paused;
137
    int seek_req;
138
    int seek_flags;
139
    int64_t seek_pos;
140
    int64_t seek_rel;
141
    int read_pause_return;
142
    AVFormatContext *ic;
143
    int dtg_active_format;
144

    
145
    int audio_stream;
146

    
147
    int av_sync_type;
148
    double external_clock; /* external clock base */
149
    int64_t external_clock_time;
150

    
151
    double audio_clock;
152
    double audio_diff_cum; /* used for AV difference average computation */
153
    double audio_diff_avg_coef;
154
    double audio_diff_threshold;
155
    int audio_diff_avg_count;
156
    AVStream *audio_st;
157
    PacketQueue audioq;
158
    int audio_hw_buf_size;
159
    /* samples output by the codec. we reserve more space for avsync
160
       compensation */
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163
    uint8_t *audio_buf;
164
    unsigned int audio_buf_size; /* in bytes */
165
    int audio_buf_index; /* in bytes */
166
    AVPacket audio_pkt_temp;
167
    AVPacket audio_pkt;
168
    enum AVSampleFormat audio_src_fmt;
169
    AVAudioConvert *reformat_ctx;
170

    
171
    int show_audio; /* if true, display audio samples */
172
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
173
    int sample_array_index;
174
    int last_i_start;
175
    RDFTContext *rdft;
176
    int rdft_bits;
177
    FFTSample *rdft_data;
178
    int xpos;
179

    
180
    SDL_Thread *subtitle_tid;
181
    int subtitle_stream;
182
    int subtitle_stream_changed;
183
    AVStream *subtitle_st;
184
    PacketQueue subtitleq;
185
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186
    int subpq_size, subpq_rindex, subpq_windex;
187
    SDL_mutex *subpq_mutex;
188
    SDL_cond *subpq_cond;
189

    
190
    double frame_timer;
191
    double frame_last_pts;
192
    double frame_last_delay;
193
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194
    int video_stream;
195
    AVStream *video_st;
196
    PacketQueue videoq;
197
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199
    int64_t video_current_pos;                   ///<current displayed file pos
200
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201
    int pictq_size, pictq_rindex, pictq_windex;
202
    SDL_mutex *pictq_mutex;
203
    SDL_cond *pictq_cond;
204
#if !CONFIG_AVFILTER
205
    struct SwsContext *img_convert_ctx;
206
#endif
207

    
208
    //    QETimer *video_timer;
209
    char filename[1024];
210
    int width, height, xleft, ytop;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->video->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714

    
715
        if (is->subtitle_st)
716
        {
717
            if (is->subpq_size > 0)
718
            {
719
                sp = &is->subpq[is->subpq_rindex];
720

    
721
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
722
                {
723
                    SDL_LockYUVOverlay (vp->bmp);
724

    
725
                    pict.data[0] = vp->bmp->pixels[0];
726
                    pict.data[1] = vp->bmp->pixels[2];
727
                    pict.data[2] = vp->bmp->pixels[1];
728

    
729
                    pict.linesize[0] = vp->bmp->pitches[0];
730
                    pict.linesize[1] = vp->bmp->pitches[2];
731
                    pict.linesize[2] = vp->bmp->pitches[1];
732

    
733
                    for (i = 0; i < sp->sub.num_rects; i++)
734
                        blend_subrect(&pict, sp->sub.rects[i],
735
                                      vp->bmp->w, vp->bmp->h);
736

    
737
                    SDL_UnlockYUVOverlay (vp->bmp);
738
                }
739
            }
740
        }
741

    
742

    
743
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
744
        height = is->height;
745
        width = ((int)rint(height * aspect_ratio)) & ~1;
746
        if (width > is->width) {
747
            width = is->width;
748
            height = ((int)rint(width / aspect_ratio)) & ~1;
749
        }
750
        x = (is->width - width) / 2;
751
        y = (is->height - height) / 2;
752
        if (!is->no_background) {
753
            /* fill the background */
754
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
755
        } else {
756
            is->no_background = 0;
757
        }
758
        rect.x = is->xleft + x;
759
        rect.y = is->ytop  + y;
760
        rect.w = width;
761
        rect.h = height;
762
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
763
    } else {
764
#if 0
765
        fill_rectangle(screen,
766
                       is->xleft, is->ytop, is->width, is->height,
767
                       QERGB(0x00, 0x00, 0x00));
768
#endif
769
    }
770
}
771

    
772
static inline int compute_mod(int a, int b)
773
{
774
    a = a % b;
775
    if (a >= 0)
776
        return a;
777
    else
778
        return a + b;
779
}
780

    
781
static void video_audio_display(VideoState *s)
782
{
783
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
784
    int ch, channels, h, h2, bgcolor, fgcolor;
785
    int16_t time_diff;
786
    int rdft_bits, nb_freq;
787

    
788
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
789
        ;
790
    nb_freq= 1<<(rdft_bits-1);
791

    
792
    /* compute display index : center on currently output samples */
793
    channels = s->audio_st->codec->channels;
794
    nb_display_channels = channels;
795
    if (!s->paused) {
796
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
797
        n = 2 * channels;
798
        delay = audio_write_get_buf_size(s);
799
        delay /= n;
800

    
801
        /* to be more precise, we take into account the time spent since
802
           the last buffer computation */
803
        if (audio_callback_time) {
804
            time_diff = av_gettime() - audio_callback_time;
805
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
806
        }
807

    
808
        delay += 2*data_used;
809
        if (delay < data_used)
810
            delay = data_used;
811

    
812
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
813
        if(s->show_audio==1){
814
            h= INT_MIN;
815
            for(i=0; i<1000; i+=channels){
816
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
817
                int a= s->sample_array[idx];
818
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
819
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
820
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
821
                int score= a-d;
822
                if(h<score && (b^c)<0){
823
                    h= score;
824
                    i_start= idx;
825
                }
826
            }
827
        }
828

    
829
        s->last_i_start = i_start;
830
    } else {
831
        i_start = s->last_i_start;
832
    }
833

    
834
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
835
    if(s->show_audio==1){
836
        fill_rectangle(screen,
837
                       s->xleft, s->ytop, s->width, s->height,
838
                       bgcolor);
839

    
840
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
841

    
842
        /* total height for one channel */
843
        h = s->height / nb_display_channels;
844
        /* graph height / 2 */
845
        h2 = (h * 9) / 20;
846
        for(ch = 0;ch < nb_display_channels; ch++) {
847
            i = i_start + ch;
848
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
849
            for(x = 0; x < s->width; x++) {
850
                y = (s->sample_array[i] * h2) >> 15;
851
                if (y < 0) {
852
                    y = -y;
853
                    ys = y1 - y;
854
                } else {
855
                    ys = y1;
856
                }
857
                fill_rectangle(screen,
858
                               s->xleft + x, ys, 1, y,
859
                               fgcolor);
860
                i += channels;
861
                if (i >= SAMPLE_ARRAY_SIZE)
862
                    i -= SAMPLE_ARRAY_SIZE;
863
            }
864
        }
865

    
866
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
867

    
868
        for(ch = 1;ch < nb_display_channels; ch++) {
869
            y = s->ytop + ch * h;
870
            fill_rectangle(screen,
871
                           s->xleft, y, s->width, 1,
872
                           fgcolor);
873
        }
874
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
875
    }else{
876
        nb_display_channels= FFMIN(nb_display_channels, 2);
877
        if(rdft_bits != s->rdft_bits){
878
            av_rdft_end(s->rdft);
879
            av_free(s->rdft_data);
880
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
881
            s->rdft_bits= rdft_bits;
882
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
883
        }
884
        {
885
            FFTSample *data[2];
886
            for(ch = 0;ch < nb_display_channels; ch++) {
887
                data[ch] = s->rdft_data + 2*nb_freq*ch;
888
                i = i_start + ch;
889
                for(x = 0; x < 2*nb_freq; x++) {
890
                    double w= (x-nb_freq)*(1.0/nb_freq);
891
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
892
                    i += channels;
893
                    if (i >= SAMPLE_ARRAY_SIZE)
894
                        i -= SAMPLE_ARRAY_SIZE;
895
                }
896
                av_rdft_calc(s->rdft, data[ch]);
897
            }
898
            //least efficient way to do this, we should of course directly access it but its more than fast enough
899
            for(y=0; y<s->height; y++){
900
                double w= 1/sqrt(nb_freq);
901
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
902
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
903
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
904
                a= FFMIN(a,255);
905
                b= FFMIN(b,255);
906
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
907

    
908
                fill_rectangle(screen,
909
                            s->xpos, s->height-y, 1, 1,
910
                            fgcolor);
911
            }
912
        }
913
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
914
        s->xpos++;
915
        if(s->xpos >= s->width)
916
            s->xpos= s->xleft;
917
    }
918
}
919

    
920
static int video_open(VideoState *is){
921
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
922
    int w,h;
923

    
924
    if(is_full_screen) flags |= SDL_FULLSCREEN;
925
    else               flags |= SDL_RESIZABLE;
926

    
927
    if (is_full_screen && fs_screen_width) {
928
        w = fs_screen_width;
929
        h = fs_screen_height;
930
    } else if(!is_full_screen && screen_width){
931
        w = screen_width;
932
        h = screen_height;
933
#if CONFIG_AVFILTER
934
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
935
        w = is->out_video_filter->inputs[0]->w;
936
        h = is->out_video_filter->inputs[0]->h;
937
#else
938
    }else if (is->video_st && is->video_st->codec->width){
939
        w = is->video_st->codec->width;
940
        h = is->video_st->codec->height;
941
#endif
942
    } else {
943
        w = 640;
944
        h = 480;
945
    }
946
    if(screen && is->width == screen->w && screen->w == w
947
       && is->height== screen->h && screen->h == h)
948
        return 0;
949

    
950
#ifndef __APPLE__
951
    screen = SDL_SetVideoMode(w, h, 0, flags);
952
#else
953
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
954
    screen = SDL_SetVideoMode(w, h, 24, flags);
955
#endif
956
    if (!screen) {
957
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
958
        return -1;
959
    }
960
    if (!window_title)
961
        window_title = input_filename;
962
    SDL_WM_SetCaption(window_title, window_title);
963

    
964
    is->width = screen->w;
965
    is->height = screen->h;
966

    
967
    return 0;
968
}
969

    
970
/* display the current picture, if any */
971
static void video_display(VideoState *is)
972
{
973
    if(!screen)
974
        video_open(cur_stream);
975
    if (is->audio_st && is->show_audio)
976
        video_audio_display(is);
977
    else if (is->video_st)
978
        video_image_display(is);
979
}
980

    
981
static int refresh_thread(void *opaque)
982
{
983
    VideoState *is= opaque;
984
    while(!is->abort_request){
985
        SDL_Event event;
986
        event.type = FF_REFRESH_EVENT;
987
        event.user.data1 = opaque;
988
        if(!is->refresh){
989
            is->refresh=1;
990
            SDL_PushEvent(&event);
991
        }
992
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
993
    }
994
    return 0;
995
}
996

    
997
/* get the current audio clock value */
998
static double get_audio_clock(VideoState *is)
999
{
1000
    double pts;
1001
    int hw_buf_size, bytes_per_sec;
1002
    pts = is->audio_clock;
1003
    hw_buf_size = audio_write_get_buf_size(is);
1004
    bytes_per_sec = 0;
1005
    if (is->audio_st) {
1006
        bytes_per_sec = is->audio_st->codec->sample_rate *
1007
            2 * is->audio_st->codec->channels;
1008
    }
1009
    if (bytes_per_sec)
1010
        pts -= (double)hw_buf_size / bytes_per_sec;
1011
    return pts;
1012
}
1013

    
1014
/* get the current video clock value */
1015
static double get_video_clock(VideoState *is)
1016
{
1017
    if (is->paused) {
1018
        return is->video_current_pts;
1019
    } else {
1020
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1021
    }
1022
}
1023

    
1024
/* get the current external clock value */
1025
static double get_external_clock(VideoState *is)
1026
{
1027
    int64_t ti;
1028
    ti = av_gettime();
1029
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1030
}
1031

    
1032
/* get the current master clock value */
1033
static double get_master_clock(VideoState *is)
1034
{
1035
    double val;
1036

    
1037
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1038
        if (is->video_st)
1039
            val = get_video_clock(is);
1040
        else
1041
            val = get_audio_clock(is);
1042
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1043
        if (is->audio_st)
1044
            val = get_audio_clock(is);
1045
        else
1046
            val = get_video_clock(is);
1047
    } else {
1048
        val = get_external_clock(is);
1049
    }
1050
    return val;
1051
}
1052

    
1053
/* seek in the stream */
1054
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1055
{
1056
    if (!is->seek_req) {
1057
        is->seek_pos = pos;
1058
        is->seek_rel = rel;
1059
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1060
        if (seek_by_bytes)
1061
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1062
        is->seek_req = 1;
1063
    }
1064
}
1065

    
1066
/* pause or resume the video */
1067
static void stream_pause(VideoState *is)
1068
{
1069
    if (is->paused) {
1070
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1071
        if(is->read_pause_return != AVERROR(ENOSYS)){
1072
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1073
        }
1074
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1075
    }
1076
    is->paused = !is->paused;
1077
}
1078

    
1079
static double compute_target_time(double frame_current_pts, VideoState *is)
1080
{
1081
    double delay, sync_threshold, diff;
1082

    
1083
    /* compute nominal delay */
1084
    delay = frame_current_pts - is->frame_last_pts;
1085
    if (delay <= 0 || delay >= 10.0) {
1086
        /* if incorrect delay, use previous one */
1087
        delay = is->frame_last_delay;
1088
    } else {
1089
        is->frame_last_delay = delay;
1090
    }
1091
    is->frame_last_pts = frame_current_pts;
1092

    
1093
    /* update delay to follow master synchronisation source */
1094
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1095
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1096
        /* if video is slave, we try to correct big delays by
1097
           duplicating or deleting a frame */
1098
        diff = get_video_clock(is) - get_master_clock(is);
1099

    
1100
        /* skip or repeat frame. We take into account the
1101
           delay to compute the threshold. I still don't know
1102
           if it is the best guess */
1103
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1104
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1105
            if (diff <= -sync_threshold)
1106
                delay = 0;
1107
            else if (diff >= sync_threshold)
1108
                delay = 2 * delay;
1109
        }
1110
    }
1111
    is->frame_timer += delay;
1112
#if defined(DEBUG_SYNC)
1113
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1114
            delay, actual_delay, frame_current_pts, -diff);
1115
#endif
1116

    
1117
    return is->frame_timer;
1118
}
1119

    
1120
/* called to display each frame */
1121
static void video_refresh_timer(void *opaque)
1122
{
1123
    VideoState *is = opaque;
1124
    VideoPicture *vp;
1125

    
1126
    SubPicture *sp, *sp2;
1127

    
1128
    if (is->video_st) {
1129
retry:
1130
        if (is->pictq_size == 0) {
1131
            //nothing to do, no picture to display in the que
1132
        } else {
1133
            double time= av_gettime()/1000000.0;
1134
            double next_target;
1135
            /* dequeue the picture */
1136
            vp = &is->pictq[is->pictq_rindex];
1137

    
1138
            if(time < vp->target_clock)
1139
                return;
1140
            /* update current video pts */
1141
            is->video_current_pts = vp->pts;
1142
            is->video_current_pts_drift = is->video_current_pts - time;
1143
            is->video_current_pos = vp->pos;
1144
            if(is->pictq_size > 1){
1145
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1146
                assert(nextvp->target_clock >= vp->target_clock);
1147
                next_target= nextvp->target_clock;
1148
            }else{
1149
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1150
            }
1151
            if(framedrop && time > next_target){
1152
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1153
                if(is->pictq_size > 1 || time > next_target + 0.5){
1154
                    /* update queue size and signal for next picture */
1155
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1156
                        is->pictq_rindex = 0;
1157

    
1158
                    SDL_LockMutex(is->pictq_mutex);
1159
                    is->pictq_size--;
1160
                    SDL_CondSignal(is->pictq_cond);
1161
                    SDL_UnlockMutex(is->pictq_mutex);
1162
                    goto retry;
1163
                }
1164
            }
1165

    
1166
            if(is->subtitle_st) {
1167
                if (is->subtitle_stream_changed) {
1168
                    SDL_LockMutex(is->subpq_mutex);
1169

    
1170
                    while (is->subpq_size) {
1171
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1172

    
1173
                        /* update queue size and signal for next picture */
1174
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1175
                            is->subpq_rindex = 0;
1176

    
1177
                        is->subpq_size--;
1178
                    }
1179
                    is->subtitle_stream_changed = 0;
1180

    
1181
                    SDL_CondSignal(is->subpq_cond);
1182
                    SDL_UnlockMutex(is->subpq_mutex);
1183
                } else {
1184
                    if (is->subpq_size > 0) {
1185
                        sp = &is->subpq[is->subpq_rindex];
1186

    
1187
                        if (is->subpq_size > 1)
1188
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1189
                        else
1190
                            sp2 = NULL;
1191

    
1192
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1193
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1194
                        {
1195
                            free_subpicture(sp);
1196

    
1197
                            /* update queue size and signal for next picture */
1198
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1199
                                is->subpq_rindex = 0;
1200

    
1201
                            SDL_LockMutex(is->subpq_mutex);
1202
                            is->subpq_size--;
1203
                            SDL_CondSignal(is->subpq_cond);
1204
                            SDL_UnlockMutex(is->subpq_mutex);
1205
                        }
1206
                    }
1207
                }
1208
            }
1209

    
1210
            /* display picture */
1211
            if (!display_disable)
1212
                video_display(is);
1213

    
1214
            /* update queue size and signal for next picture */
1215
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1216
                is->pictq_rindex = 0;
1217

    
1218
            SDL_LockMutex(is->pictq_mutex);
1219
            is->pictq_size--;
1220
            SDL_CondSignal(is->pictq_cond);
1221
            SDL_UnlockMutex(is->pictq_mutex);
1222
        }
1223
    } else if (is->audio_st) {
1224
        /* draw the next audio frame */
1225

    
1226
        /* if only audio stream, then display the audio bars (better
1227
           than nothing, just to test the implementation */
1228

    
1229
        /* display picture */
1230
        if (!display_disable)
1231
            video_display(is);
1232
    }
1233
    if (show_status) {
1234
        static int64_t last_time;
1235
        int64_t cur_time;
1236
        int aqsize, vqsize, sqsize;
1237
        double av_diff;
1238

    
1239
        cur_time = av_gettime();
1240
        if (!last_time || (cur_time - last_time) >= 30000) {
1241
            aqsize = 0;
1242
            vqsize = 0;
1243
            sqsize = 0;
1244
            if (is->audio_st)
1245
                aqsize = is->audioq.size;
1246
            if (is->video_st)
1247
                vqsize = is->videoq.size;
1248
            if (is->subtitle_st)
1249
                sqsize = is->subtitleq.size;
1250
            av_diff = 0;
1251
            if (is->audio_st && is->video_st)
1252
                av_diff = get_audio_clock(is) - get_video_clock(is);
1253
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1254
                   get_master_clock(is),
1255
                   av_diff,
1256
                   FFMAX(is->skip_frames-1, 0),
1257
                   aqsize / 1024,
1258
                   vqsize / 1024,
1259
                   sqsize,
1260
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1261
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1262
            fflush(stdout);
1263
            last_time = cur_time;
1264
        }
1265
    }
1266
}
1267

    
1268
static void stream_close(VideoState *is)
1269
{
1270
    VideoPicture *vp;
1271
    int i;
1272
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1273
    is->abort_request = 1;
1274
    SDL_WaitThread(is->parse_tid, NULL);
1275
    SDL_WaitThread(is->refresh_tid, NULL);
1276

    
1277
    /* free all pictures */
1278
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1279
        vp = &is->pictq[i];
1280
#if CONFIG_AVFILTER
1281
        if (vp->picref) {
1282
            avfilter_unref_buffer(vp->picref);
1283
            vp->picref = NULL;
1284
        }
1285
#endif
1286
        if (vp->bmp) {
1287
            SDL_FreeYUVOverlay(vp->bmp);
1288
            vp->bmp = NULL;
1289
        }
1290
    }
1291
    SDL_DestroyMutex(is->pictq_mutex);
1292
    SDL_DestroyCond(is->pictq_cond);
1293
    SDL_DestroyMutex(is->subpq_mutex);
1294
    SDL_DestroyCond(is->subpq_cond);
1295
#if !CONFIG_AVFILTER
1296
    if (is->img_convert_ctx)
1297
        sws_freeContext(is->img_convert_ctx);
1298
#endif
1299
    av_free(is);
1300
}
1301

    
1302
static void do_exit(void)
1303
{
1304
    if (cur_stream) {
1305
        stream_close(cur_stream);
1306
        cur_stream = NULL;
1307
    }
1308
    uninit_opts();
1309
#if CONFIG_AVFILTER
1310
    avfilter_uninit();
1311
#endif
1312
    if (show_status)
1313
        printf("\n");
1314
    SDL_Quit();
1315
    av_log(NULL, AV_LOG_QUIET, "");
1316
    exit(0);
1317
}
1318

    
1319
/* allocate a picture (needs to do that in main thread to avoid
1320
   potential locking problems */
1321
static void alloc_picture(void *opaque)
1322
{
1323
    VideoState *is = opaque;
1324
    VideoPicture *vp;
1325

    
1326
    vp = &is->pictq[is->pictq_windex];
1327

    
1328
    if (vp->bmp)
1329
        SDL_FreeYUVOverlay(vp->bmp);
1330

    
1331
#if CONFIG_AVFILTER
1332
    if (vp->picref)
1333
        avfilter_unref_buffer(vp->picref);
1334
    vp->picref = NULL;
1335

    
1336
    vp->width   = is->out_video_filter->inputs[0]->w;
1337
    vp->height  = is->out_video_filter->inputs[0]->h;
1338
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1339
#else
1340
    vp->width   = is->video_st->codec->width;
1341
    vp->height  = is->video_st->codec->height;
1342
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1343
#endif
1344

    
1345
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1346
                                   SDL_YV12_OVERLAY,
1347
                                   screen);
1348
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1349
        /* SDL allocates a buffer smaller than requested if the video
1350
         * overlay hardware is unable to support the requested size. */
1351
        fprintf(stderr, "Error: the video system does not support an image\n"
1352
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1353
                        "to reduce the image size.\n", vp->width, vp->height );
1354
        do_exit();
1355
    }
1356

    
1357
    SDL_LockMutex(is->pictq_mutex);
1358
    vp->allocated = 1;
1359
    SDL_CondSignal(is->pictq_cond);
1360
    SDL_UnlockMutex(is->pictq_mutex);
1361
}
1362

    
1363
/**
1364
 *
1365
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1366
 */
1367
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1368
{
1369
    VideoPicture *vp;
1370
    int dst_pix_fmt;
1371
#if CONFIG_AVFILTER
1372
    AVPicture pict_src;
1373
#endif
1374
    /* wait until we have space to put a new picture */
1375
    SDL_LockMutex(is->pictq_mutex);
1376

    
1377
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1378
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1379

    
1380
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1381
           !is->videoq.abort_request) {
1382
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383
    }
1384
    SDL_UnlockMutex(is->pictq_mutex);
1385

    
1386
    if (is->videoq.abort_request)
1387
        return -1;
1388

    
1389
    vp = &is->pictq[is->pictq_windex];
1390

    
1391
    /* alloc or resize hardware picture buffer */
1392
    if (!vp->bmp ||
1393
#if CONFIG_AVFILTER
1394
        vp->width  != is->out_video_filter->inputs[0]->w ||
1395
        vp->height != is->out_video_filter->inputs[0]->h) {
1396
#else
1397
        vp->width != is->video_st->codec->width ||
1398
        vp->height != is->video_st->codec->height) {
1399
#endif
1400
        SDL_Event event;
1401

    
1402
        vp->allocated = 0;
1403

    
1404
        /* the allocation must be done in the main thread to avoid
1405
           locking problems */
1406
        event.type = FF_ALLOC_EVENT;
1407
        event.user.data1 = is;
1408
        SDL_PushEvent(&event);
1409

    
1410
        /* wait until the picture is allocated */
1411
        SDL_LockMutex(is->pictq_mutex);
1412
        while (!vp->allocated && !is->videoq.abort_request) {
1413
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1414
        }
1415
        SDL_UnlockMutex(is->pictq_mutex);
1416

    
1417
        if (is->videoq.abort_request)
1418
            return -1;
1419
    }
1420

    
1421
    /* if the frame is not skipped, then display it */
1422
    if (vp->bmp) {
1423
        AVPicture pict;
1424
#if CONFIG_AVFILTER
1425
        if(vp->picref)
1426
            avfilter_unref_buffer(vp->picref);
1427
        vp->picref = src_frame->opaque;
1428
#endif
1429

    
1430
        /* get a pointer on the bitmap */
1431
        SDL_LockYUVOverlay (vp->bmp);
1432

    
1433
        dst_pix_fmt = PIX_FMT_YUV420P;
1434
        memset(&pict,0,sizeof(AVPicture));
1435
        pict.data[0] = vp->bmp->pixels[0];
1436
        pict.data[1] = vp->bmp->pixels[2];
1437
        pict.data[2] = vp->bmp->pixels[1];
1438

    
1439
        pict.linesize[0] = vp->bmp->pitches[0];
1440
        pict.linesize[1] = vp->bmp->pitches[2];
1441
        pict.linesize[2] = vp->bmp->pitches[1];
1442

    
1443
#if CONFIG_AVFILTER
1444
        pict_src.data[0] = src_frame->data[0];
1445
        pict_src.data[1] = src_frame->data[1];
1446
        pict_src.data[2] = src_frame->data[2];
1447

    
1448
        pict_src.linesize[0] = src_frame->linesize[0];
1449
        pict_src.linesize[1] = src_frame->linesize[1];
1450
        pict_src.linesize[2] = src_frame->linesize[2];
1451

    
1452
        //FIXME use direct rendering
1453
        av_picture_copy(&pict, &pict_src,
1454
                        vp->pix_fmt, vp->width, vp->height);
1455
#else
1456
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1457
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1458
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1459
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1460
        if (is->img_convert_ctx == NULL) {
1461
            fprintf(stderr, "Cannot initialize the conversion context\n");
1462
            exit(1);
1463
        }
1464
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1465
                  0, vp->height, pict.data, pict.linesize);
1466
#endif
1467
        /* update the bitmap content */
1468
        SDL_UnlockYUVOverlay(vp->bmp);
1469

    
1470
        vp->pts = pts;
1471
        vp->pos = pos;
1472

    
1473
        /* now we can update the picture count */
1474
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1475
            is->pictq_windex = 0;
1476
        SDL_LockMutex(is->pictq_mutex);
1477
        vp->target_clock= compute_target_time(vp->pts, is);
1478

    
1479
        is->pictq_size++;
1480
        SDL_UnlockMutex(is->pictq_mutex);
1481
    }
1482
    return 0;
1483
}
1484

    
1485
/**
1486
 * compute the exact PTS for the picture if it is omitted in the stream
1487
 * @param pts1 the dts of the pkt / pts of the frame
1488
 */
1489
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1490
{
1491
    double frame_delay, pts;
1492

    
1493
    pts = pts1;
1494

    
1495
    if (pts != 0) {
1496
        /* update video clock with pts, if present */
1497
        is->video_clock = pts;
1498
    } else {
1499
        pts = is->video_clock;
1500
    }
1501
    /* update video clock for next frame */
1502
    frame_delay = av_q2d(is->video_st->codec->time_base);
1503
    /* for MPEG2, the frame can be repeated, so we update the
1504
       clock accordingly */
1505
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1506
    is->video_clock += frame_delay;
1507

    
1508
#if defined(DEBUG_SYNC) && 0
1509
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1510
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1511
#endif
1512
    return queue_picture(is, src_frame, pts, pos);
1513
}
1514

    
1515
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1516
{
1517
    int len1, got_picture, i;
1518

    
1519
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1520
        return -1;
1521

    
1522
    if (pkt->data == flush_pkt.data) {
1523
        avcodec_flush_buffers(is->video_st->codec);
1524

    
1525
        SDL_LockMutex(is->pictq_mutex);
1526
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1527
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1528
            is->pictq[i].target_clock= 0;
1529
        }
1530
        while (is->pictq_size && !is->videoq.abort_request) {
1531
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1532
        }
1533
        is->video_current_pos = -1;
1534
        SDL_UnlockMutex(is->pictq_mutex);
1535

    
1536
        is->frame_last_pts = AV_NOPTS_VALUE;
1537
        is->frame_last_delay = 0;
1538
        is->frame_timer = (double)av_gettime() / 1000000.0;
1539
        is->skip_frames = 1;
1540
        is->skip_frames_index = 0;
1541
        return 0;
1542
    }
1543

    
1544
    len1 = avcodec_decode_video2(is->video_st->codec,
1545
                                 frame, &got_picture,
1546
                                 pkt);
1547

    
1548
    if (got_picture) {
1549
        if (decoder_reorder_pts == -1) {
1550
            *pts = frame->best_effort_timestamp;
1551
        } else if (decoder_reorder_pts) {
1552
            *pts = frame->pkt_pts;
1553
        } else {
1554
            *pts = frame->pkt_dts;
1555
        }
1556

    
1557
        if (*pts == AV_NOPTS_VALUE) {
1558
            *pts = 0;
1559
        }
1560

    
1561
        is->skip_frames_index += 1;
1562
        if(is->skip_frames_index >= is->skip_frames){
1563
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1564
            return 1;
1565
        }
1566

    
1567
    }
1568
    return 0;
1569
}
1570

    
1571
#if CONFIG_AVFILTER
1572
typedef struct {
1573
    VideoState *is;
1574
    AVFrame *frame;
1575
    int use_dr1;
1576
} FilterPriv;
1577

    
1578
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1579
{
1580
    AVFilterContext *ctx = codec->opaque;
1581
    AVFilterBufferRef  *ref;
1582
    int perms = AV_PERM_WRITE;
1583
    int i, w, h, stride[4];
1584
    unsigned edge;
1585
    int pixel_size;
1586

    
1587
    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1588

    
1589
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1590
        perms |= AV_PERM_NEG_LINESIZES;
1591

    
1592
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1593
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1594
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1595
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1596
    }
1597
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1598

    
1599
    w = codec->width;
1600
    h = codec->height;
1601

    
1602
    if(av_image_check_size(w, h, 0, codec))
1603
        return -1;
1604

    
1605
    avcodec_align_dimensions2(codec, &w, &h, stride);
1606
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1607
    w += edge << 1;
1608
    h += edge << 1;
1609

    
1610
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1611
        return -1;
1612

    
1613
    pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1614
    ref->video->w = codec->width;
1615
    ref->video->h = codec->height;
1616
    for(i = 0; i < 4; i ++) {
1617
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1618
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1619

    
1620
        if (ref->data[i]) {
1621
            ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1622
        }
1623
        pic->data[i]     = ref->data[i];
1624
        pic->linesize[i] = ref->linesize[i];
1625
    }
1626
    pic->opaque = ref;
1627
    pic->age    = INT_MAX;
1628
    pic->type   = FF_BUFFER_TYPE_USER;
1629
    pic->reordered_opaque = codec->reordered_opaque;
1630
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1631
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1632
    return 0;
1633
}
1634

    
1635
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1636
{
1637
    memset(pic->data, 0, sizeof(pic->data));
1638
    avfilter_unref_buffer(pic->opaque);
1639
}
1640

    
1641
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1642
{
1643
    AVFilterBufferRef *ref = pic->opaque;
1644

    
1645
    if (pic->data[0] == NULL) {
1646
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1647
        return codec->get_buffer(codec, pic);
1648
    }
1649

    
1650
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1651
        (codec->pix_fmt != ref->format)) {
1652
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1653
        return -1;
1654
    }
1655

    
1656
    pic->reordered_opaque = codec->reordered_opaque;
1657
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1658
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1659
    return 0;
1660
}
1661

    
1662
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1663
{
1664
    FilterPriv *priv = ctx->priv;
1665
    AVCodecContext *codec;
1666
    if(!opaque) return -1;
1667

    
1668
    priv->is = opaque;
1669
    codec    = priv->is->video_st->codec;
1670
    codec->opaque = ctx;
1671
    if((codec->codec->capabilities & CODEC_CAP_DR1)
1672
    ) {
1673
        codec->flags |= CODEC_FLAG_EMU_EDGE;
1674
        priv->use_dr1 = 1;
1675
        codec->get_buffer     = input_get_buffer;
1676
        codec->release_buffer = input_release_buffer;
1677
        codec->reget_buffer   = input_reget_buffer;
1678
        codec->thread_safe_callbacks = 1;
1679
    }
1680

    
1681
    priv->frame = avcodec_alloc_frame();
1682

    
1683
    return 0;
1684
}
1685

    
1686
static void input_uninit(AVFilterContext *ctx)
1687
{
1688
    FilterPriv *priv = ctx->priv;
1689
    av_free(priv->frame);
1690
}
1691

    
1692
static int input_request_frame(AVFilterLink *link)
1693
{
1694
    FilterPriv *priv = link->src->priv;
1695
    AVFilterBufferRef *picref;
1696
    int64_t pts = 0;
1697
    AVPacket pkt;
1698
    int ret;
1699

    
1700
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1701
        av_free_packet(&pkt);
1702
    if (ret < 0)
1703
        return -1;
1704

    
1705
    if(priv->use_dr1) {
1706
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1707
    } else {
1708
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1709
        av_image_copy(picref->data, picref->linesize,
1710
                      priv->frame->data, priv->frame->linesize,
1711
                      picref->format, link->w, link->h);
1712
    }
1713
    av_free_packet(&pkt);
1714

    
1715
    picref->pts = pts;
1716
    picref->pos = pkt.pos;
1717
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1718
    avfilter_start_frame(link, picref);
1719
    avfilter_draw_slice(link, 0, link->h, 1);
1720
    avfilter_end_frame(link);
1721

    
1722
    return 0;
1723
}
1724

    
1725
static int input_query_formats(AVFilterContext *ctx)
1726
{
1727
    FilterPriv *priv = ctx->priv;
1728
    enum PixelFormat pix_fmts[] = {
1729
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1730
    };
1731

    
1732
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1733
    return 0;
1734
}
1735

    
1736
static int input_config_props(AVFilterLink *link)
1737
{
1738
    FilterPriv *priv  = link->src->priv;
1739
    AVCodecContext *c = priv->is->video_st->codec;
1740

    
1741
    link->w = c->width;
1742
    link->h = c->height;
1743
    link->time_base = priv->is->video_st->time_base;
1744

    
1745
    return 0;
1746
}
1747

    
1748
static AVFilter input_filter =
1749
{
1750
    .name      = "ffplay_input",
1751

    
1752
    .priv_size = sizeof(FilterPriv),
1753

    
1754
    .init      = input_init,
1755
    .uninit    = input_uninit,
1756

    
1757
    .query_formats = input_query_formats,
1758

    
1759
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1760
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1761
                                    .type = AVMEDIA_TYPE_VIDEO,
1762
                                    .request_frame = input_request_frame,
1763
                                    .config_props  = input_config_props, },
1764
                                  { .name = NULL }},
1765
};
1766

    
1767
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1768
{
1769
    char sws_flags_str[128];
1770
    int ret;
1771
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1772
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1773
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1774
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1775

    
1776
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1777
                                            NULL, is, graph)) < 0)
1778
        goto the_end;
1779
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1780
                                            NULL, &ffsink_ctx, graph)) < 0)
1781
        goto the_end;
1782

    
1783
    if(vfilters) {
1784
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1785
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1786

    
1787
        outputs->name    = av_strdup("in");
1788
        outputs->filter_ctx = filt_src;
1789
        outputs->pad_idx = 0;
1790
        outputs->next    = NULL;
1791

    
1792
        inputs->name    = av_strdup("out");
1793
        inputs->filter_ctx = filt_out;
1794
        inputs->pad_idx = 0;
1795
        inputs->next    = NULL;
1796

    
1797
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1798
            goto the_end;
1799
        av_freep(&vfilters);
1800
    } else {
1801
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1802
            goto the_end;
1803
    }
1804

    
1805
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1806
        goto the_end;
1807

    
1808
    is->out_video_filter = filt_out;
1809
the_end:
1810
    return ret;
1811
}
1812

    
1813
#endif  /* CONFIG_AVFILTER */
1814

    
1815
static int video_thread(void *arg)
1816
{
1817
    VideoState *is = arg;
1818
    AVFrame *frame= avcodec_alloc_frame();
1819
    int64_t pts_int;
1820
    double pts;
1821
    int ret;
1822

    
1823
#if CONFIG_AVFILTER
1824
    AVFilterGraph *graph = avfilter_graph_alloc();
1825
    AVFilterContext *filt_out = NULL;
1826
    int64_t pos;
1827

    
1828
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1829
        goto the_end;
1830
    filt_out = is->out_video_filter;
1831
#endif
1832

    
1833
    for(;;) {
1834
#if !CONFIG_AVFILTER
1835
        AVPacket pkt;
1836
#else
1837
        AVFilterBufferRef *picref;
1838
        AVRational tb;
1839
#endif
1840
        while (is->paused && !is->videoq.abort_request)
1841
            SDL_Delay(10);
1842
#if CONFIG_AVFILTER
1843
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1844
        if (picref) {
1845
            pts_int = picref->pts;
1846
            pos     = picref->pos;
1847
            frame->opaque = picref;
1848
        }
1849

    
1850
        if (av_cmp_q(tb, is->video_st->time_base)) {
1851
            av_unused int64_t pts1 = pts_int;
1852
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1853
            av_dlog(NULL, "video_thread(): "
1854
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1855
                    tb.num, tb.den, pts1,
1856
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1857
        }
1858
#else
1859
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1860
#endif
1861

    
1862
        if (ret < 0) goto the_end;
1863

    
1864
        if (!ret)
1865
            continue;
1866

    
1867
        pts = pts_int*av_q2d(is->video_st->time_base);
1868

    
1869
#if CONFIG_AVFILTER
1870
        ret = output_picture2(is, frame, pts, pos);
1871
#else
1872
        ret = output_picture2(is, frame, pts,  pkt.pos);
1873
        av_free_packet(&pkt);
1874
#endif
1875
        if (ret < 0)
1876
            goto the_end;
1877

    
1878
        if (step)
1879
            if (cur_stream)
1880
                stream_pause(cur_stream);
1881
    }
1882
 the_end:
1883
#if CONFIG_AVFILTER
1884
    avfilter_graph_free(&graph);
1885
#endif
1886
    av_free(frame);
1887
    return 0;
1888
}
1889

    
1890
static int subtitle_thread(void *arg)
1891
{
1892
    VideoState *is = arg;
1893
    SubPicture *sp;
1894
    AVPacket pkt1, *pkt = &pkt1;
1895
    int len1, got_subtitle;
1896
    double pts;
1897
    int i, j;
1898
    int r, g, b, y, u, v, a;
1899

    
1900
    for(;;) {
1901
        while (is->paused && !is->subtitleq.abort_request) {
1902
            SDL_Delay(10);
1903
        }
1904
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1905
            break;
1906

    
1907
        if(pkt->data == flush_pkt.data){
1908
            avcodec_flush_buffers(is->subtitle_st->codec);
1909
            continue;
1910
        }
1911
        SDL_LockMutex(is->subpq_mutex);
1912
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1913
               !is->subtitleq.abort_request) {
1914
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1915
        }
1916
        SDL_UnlockMutex(is->subpq_mutex);
1917

    
1918
        if (is->subtitleq.abort_request)
1919
            goto the_end;
1920

    
1921
        sp = &is->subpq[is->subpq_windex];
1922

    
1923
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1924
           this packet, if any */
1925
        pts = 0;
1926
        if (pkt->pts != AV_NOPTS_VALUE)
1927
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1928

    
1929
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1930
                                    &sp->sub, &got_subtitle,
1931
                                    pkt);
1932
//            if (len1 < 0)
1933
//                break;
1934
        if (got_subtitle && sp->sub.format == 0) {
1935
            sp->pts = pts;
1936

    
1937
            for (i = 0; i < sp->sub.num_rects; i++)
1938
            {
1939
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1940
                {
1941
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1942
                    y = RGB_TO_Y_CCIR(r, g, b);
1943
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1944
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1945
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1946
                }
1947
            }
1948

    
1949
            /* now we can update the picture count */
1950
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1951
                is->subpq_windex = 0;
1952
            SDL_LockMutex(is->subpq_mutex);
1953
            is->subpq_size++;
1954
            SDL_UnlockMutex(is->subpq_mutex);
1955
        }
1956
        av_free_packet(pkt);
1957
//        if (step)
1958
//            if (cur_stream)
1959
//                stream_pause(cur_stream);
1960
    }
1961
 the_end:
1962
    return 0;
1963
}
1964

    
1965
/* copy samples for viewing in editor window */
1966
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1967
{
1968
    int size, len, channels;
1969

    
1970
    channels = is->audio_st->codec->channels;
1971

    
1972
    size = samples_size / sizeof(short);
1973
    while (size > 0) {
1974
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1975
        if (len > size)
1976
            len = size;
1977
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1978
        samples += len;
1979
        is->sample_array_index += len;
1980
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1981
            is->sample_array_index = 0;
1982
        size -= len;
1983
    }
1984
}
1985

    
1986
/* return the new audio buffer size (samples can be added or deleted
1987
   to get better sync if video or external master clock) */
1988
static int synchronize_audio(VideoState *is, short *samples,
1989
                             int samples_size1, double pts)
1990
{
1991
    int n, samples_size;
1992
    double ref_clock;
1993

    
1994
    n = 2 * is->audio_st->codec->channels;
1995
    samples_size = samples_size1;
1996

    
1997
    /* if not master, then we try to remove or add samples to correct the clock */
1998
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1999
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2000
        double diff, avg_diff;
2001
        int wanted_size, min_size, max_size, nb_samples;
2002

    
2003
        ref_clock = get_master_clock(is);
2004
        diff = get_audio_clock(is) - ref_clock;
2005

    
2006
        if (diff < AV_NOSYNC_THRESHOLD) {
2007
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2008
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2009
                /* not enough measures to have a correct estimate */
2010
                is->audio_diff_avg_count++;
2011
            } else {
2012
                /* estimate the A-V difference */
2013
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2014

    
2015
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2016
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2017
                    nb_samples = samples_size / n;
2018

    
2019
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2020
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2021
                    if (wanted_size < min_size)
2022
                        wanted_size = min_size;
2023
                    else if (wanted_size > max_size)
2024
                        wanted_size = max_size;
2025

    
2026
                    /* add or remove samples to correction the synchro */
2027
                    if (wanted_size < samples_size) {
2028
                        /* remove samples */
2029
                        samples_size = wanted_size;
2030
                    } else if (wanted_size > samples_size) {
2031
                        uint8_t *samples_end, *q;
2032
                        int nb;
2033

    
2034
                        /* add samples */
2035
                        nb = (samples_size - wanted_size);
2036
                        samples_end = (uint8_t *)samples + samples_size - n;
2037
                        q = samples_end + n;
2038
                        while (nb > 0) {
2039
                            memcpy(q, samples_end, n);
2040
                            q += n;
2041
                            nb -= n;
2042
                        }
2043
                        samples_size = wanted_size;
2044
                    }
2045
                }
2046
#if 0
2047
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2048
                       diff, avg_diff, samples_size - samples_size1,
2049
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2050
#endif
2051
            }
2052
        } else {
2053
            /* too big difference : may be initial PTS errors, so
2054
               reset A-V filter */
2055
            is->audio_diff_avg_count = 0;
2056
            is->audio_diff_cum = 0;
2057
        }
2058
    }
2059

    
2060
    return samples_size;
2061
}
2062

    
2063
/* decode one audio frame and returns its uncompressed size */
2064
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2065
{
2066
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2067
    AVPacket *pkt = &is->audio_pkt;
2068
    AVCodecContext *dec= is->audio_st->codec;
2069
    int n, len1, data_size;
2070
    double pts;
2071

    
2072
    for(;;) {
2073
        /* NOTE: the audio packet can contain several frames */
2074
        while (pkt_temp->size > 0) {
2075
            data_size = sizeof(is->audio_buf1);
2076
            len1 = avcodec_decode_audio3(dec,
2077
                                        (int16_t *)is->audio_buf1, &data_size,
2078
                                        pkt_temp);
2079
            if (len1 < 0) {
2080
                /* if error, we skip the frame */
2081
                pkt_temp->size = 0;
2082
                break;
2083
            }
2084

    
2085
            pkt_temp->data += len1;
2086
            pkt_temp->size -= len1;
2087
            if (data_size <= 0)
2088
                continue;
2089

    
2090
            if (dec->sample_fmt != is->audio_src_fmt) {
2091
                if (is->reformat_ctx)
2092
                    av_audio_convert_free(is->reformat_ctx);
2093
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2094
                                                         dec->sample_fmt, 1, NULL, 0);
2095
                if (!is->reformat_ctx) {
2096
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2097
                        av_get_sample_fmt_name(dec->sample_fmt),
2098
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2099
                        break;
2100
                }
2101
                is->audio_src_fmt= dec->sample_fmt;
2102
            }
2103

    
2104
            if (is->reformat_ctx) {
2105
                const void *ibuf[6]= {is->audio_buf1};
2106
                void *obuf[6]= {is->audio_buf2};
2107
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2108
                int ostride[6]= {2};
2109
                int len= data_size/istride[0];
2110
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2111
                    printf("av_audio_convert() failed\n");
2112
                    break;
2113
                }
2114
                is->audio_buf= is->audio_buf2;
2115
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2116
                          remove this legacy cruft */
2117
                data_size= len*2;
2118
            }else{
2119
                is->audio_buf= is->audio_buf1;
2120
            }
2121

    
2122
            /* if no pts, then compute it */
2123
            pts = is->audio_clock;
2124
            *pts_ptr = pts;
2125
            n = 2 * dec->channels;
2126
            is->audio_clock += (double)data_size /
2127
                (double)(n * dec->sample_rate);
2128
#if defined(DEBUG_SYNC)
2129
            {
2130
                static double last_clock;
2131
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2132
                       is->audio_clock - last_clock,
2133
                       is->audio_clock, pts);
2134
                last_clock = is->audio_clock;
2135
            }
2136
#endif
2137
            return data_size;
2138
        }
2139

    
2140
        /* free the current packet */
2141
        if (pkt->data)
2142
            av_free_packet(pkt);
2143

    
2144
        if (is->paused || is->audioq.abort_request) {
2145
            return -1;
2146
        }
2147

    
2148
        /* read next packet */
2149
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2150
            return -1;
2151
        if(pkt->data == flush_pkt.data){
2152
            avcodec_flush_buffers(dec);
2153
            continue;
2154
        }
2155

    
2156
        pkt_temp->data = pkt->data;
2157
        pkt_temp->size = pkt->size;
2158

    
2159
        /* if update the audio clock with the pts */
2160
        if (pkt->pts != AV_NOPTS_VALUE) {
2161
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2162
        }
2163
    }
2164
}
2165

    
2166
/* get the current audio output buffer size, in samples. With SDL, we
2167
   cannot have a precise information */
2168
static int audio_write_get_buf_size(VideoState *is)
2169
{
2170
    return is->audio_buf_size - is->audio_buf_index;
2171
}
2172

    
2173

    
2174
/* prepare a new audio buffer */
2175
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2176
{
2177
    VideoState *is = opaque;
2178
    int audio_size, len1;
2179
    double pts;
2180

    
2181
    audio_callback_time = av_gettime();
2182

    
2183
    while (len > 0) {
2184
        if (is->audio_buf_index >= is->audio_buf_size) {
2185
           audio_size = audio_decode_frame(is, &pts);
2186
           if (audio_size < 0) {
2187
                /* if error, just output silence */
2188
               is->audio_buf = is->audio_buf1;
2189
               is->audio_buf_size = 1024;
2190
               memset(is->audio_buf, 0, is->audio_buf_size);
2191
           } else {
2192
               if (is->show_audio)
2193
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2194
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2195
                                              pts);
2196
               is->audio_buf_size = audio_size;
2197
           }
2198
           is->audio_buf_index = 0;
2199
        }
2200
        len1 = is->audio_buf_size - is->audio_buf_index;
2201
        if (len1 > len)
2202
            len1 = len;
2203
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2204
        len -= len1;
2205
        stream += len1;
2206
        is->audio_buf_index += len1;
2207
    }
2208
}
2209

    
2210
/* open a given stream. Return 0 if OK */
2211
static int stream_component_open(VideoState *is, int stream_index)
2212
{
2213
    AVFormatContext *ic = is->ic;
2214
    AVCodecContext *avctx;
2215
    AVCodec *codec;
2216
    SDL_AudioSpec wanted_spec, spec;
2217

    
2218
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2219
        return -1;
2220
    avctx = ic->streams[stream_index]->codec;
2221

    
2222
    /* prepare audio output */
2223
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2224
        if (avctx->channels > 0) {
2225
            avctx->request_channels = FFMIN(2, avctx->channels);
2226
        } else {
2227
            avctx->request_channels = 2;
2228
        }
2229
    }
2230

    
2231
    codec = avcodec_find_decoder(avctx->codec_id);
2232
    avctx->debug_mv = debug_mv;
2233
    avctx->debug = debug;
2234
    avctx->workaround_bugs = workaround_bugs;
2235
    avctx->lowres = lowres;
2236
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2237
    avctx->idct_algo= idct;
2238
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2239
    avctx->skip_frame= skip_frame;
2240
    avctx->skip_idct= skip_idct;
2241
    avctx->skip_loop_filter= skip_loop_filter;
2242
    avctx->error_recognition= error_recognition;
2243
    avctx->error_concealment= error_concealment;
2244
    avctx->thread_count= thread_count;
2245

    
2246
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2247

    
2248
    if (!codec ||
2249
        avcodec_open(avctx, codec) < 0)
2250
        return -1;
2251

    
2252
    /* prepare audio output */
2253
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2254
        wanted_spec.freq = avctx->sample_rate;
2255
        wanted_spec.format = AUDIO_S16SYS;
2256
        wanted_spec.channels = avctx->channels;
2257
        wanted_spec.silence = 0;
2258
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2259
        wanted_spec.callback = sdl_audio_callback;
2260
        wanted_spec.userdata = is;
2261
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2262
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2263
            return -1;
2264
        }
2265
        is->audio_hw_buf_size = spec.size;
2266
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2267
    }
2268

    
2269
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2270
    switch(avctx->codec_type) {
2271
    case AVMEDIA_TYPE_AUDIO:
2272
        is->audio_stream = stream_index;
2273
        is->audio_st = ic->streams[stream_index];
2274
        is->audio_buf_size = 0;
2275
        is->audio_buf_index = 0;
2276

    
2277
        /* init averaging filter */
2278
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2279
        is->audio_diff_avg_count = 0;
2280
        /* since we do not have a precise anough audio fifo fullness,
2281
           we correct audio sync only if larger than this threshold */
2282
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2283

    
2284
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2285
        packet_queue_init(&is->audioq);
2286
        SDL_PauseAudio(0);
2287
        break;
2288
    case AVMEDIA_TYPE_VIDEO:
2289
        is->video_stream = stream_index;
2290
        is->video_st = ic->streams[stream_index];
2291

    
2292
//        is->video_current_pts_time = av_gettime();
2293

    
2294
        packet_queue_init(&is->videoq);
2295
        is->video_tid = SDL_CreateThread(video_thread, is);
2296
        break;
2297
    case AVMEDIA_TYPE_SUBTITLE:
2298
        is->subtitle_stream = stream_index;
2299
        is->subtitle_st = ic->streams[stream_index];
2300
        packet_queue_init(&is->subtitleq);
2301

    
2302
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2303
        break;
2304
    default:
2305
        break;
2306
    }
2307
    return 0;
2308
}
2309

    
2310
static void stream_component_close(VideoState *is, int stream_index)
2311
{
2312
    AVFormatContext *ic = is->ic;
2313
    AVCodecContext *avctx;
2314

    
2315
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2316
        return;
2317
    avctx = ic->streams[stream_index]->codec;
2318

    
2319
    switch(avctx->codec_type) {
2320
    case AVMEDIA_TYPE_AUDIO:
2321
        packet_queue_abort(&is->audioq);
2322

    
2323
        SDL_CloseAudio();
2324

    
2325
        packet_queue_end(&is->audioq);
2326
        if (is->reformat_ctx)
2327
            av_audio_convert_free(is->reformat_ctx);
2328
        is->reformat_ctx = NULL;
2329
        break;
2330
    case AVMEDIA_TYPE_VIDEO:
2331
        packet_queue_abort(&is->videoq);
2332

    
2333
        /* note: we also signal this mutex to make sure we deblock the
2334
           video thread in all cases */
2335
        SDL_LockMutex(is->pictq_mutex);
2336
        SDL_CondSignal(is->pictq_cond);
2337
        SDL_UnlockMutex(is->pictq_mutex);
2338

    
2339
        SDL_WaitThread(is->video_tid, NULL);
2340

    
2341
        packet_queue_end(&is->videoq);
2342
        break;
2343
    case AVMEDIA_TYPE_SUBTITLE:
2344
        packet_queue_abort(&is->subtitleq);
2345

    
2346
        /* note: we also signal this mutex to make sure we deblock the
2347
           video thread in all cases */
2348
        SDL_LockMutex(is->subpq_mutex);
2349
        is->subtitle_stream_changed = 1;
2350

    
2351
        SDL_CondSignal(is->subpq_cond);
2352
        SDL_UnlockMutex(is->subpq_mutex);
2353

    
2354
        SDL_WaitThread(is->subtitle_tid, NULL);
2355

    
2356
        packet_queue_end(&is->subtitleq);
2357
        break;
2358
    default:
2359
        break;
2360
    }
2361

    
2362
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2363
    avcodec_close(avctx);
2364
    switch(avctx->codec_type) {
2365
    case AVMEDIA_TYPE_AUDIO:
2366
        is->audio_st = NULL;
2367
        is->audio_stream = -1;
2368
        break;
2369
    case AVMEDIA_TYPE_VIDEO:
2370
        is->video_st = NULL;
2371
        is->video_stream = -1;
2372
        break;
2373
    case AVMEDIA_TYPE_SUBTITLE:
2374
        is->subtitle_st = NULL;
2375
        is->subtitle_stream = -1;
2376
        break;
2377
    default:
2378
        break;
2379
    }
2380
}
2381

    
2382
/* since we have only one decoding thread, we can use a global
2383
   variable instead of a thread local variable */
2384
static VideoState *global_video_state;
2385

    
2386
static int decode_interrupt_cb(void)
2387
{
2388
    return (global_video_state && global_video_state->abort_request);
2389
}
2390

    
2391
/* this thread gets the stream from the disk or the network */
2392
static int decode_thread(void *arg)
2393
{
2394
    VideoState *is = arg;
2395
    AVFormatContext *ic;
2396
    int err, i, ret;
2397
    int st_index[AVMEDIA_TYPE_NB];
2398
    AVPacket pkt1, *pkt = &pkt1;
2399
    AVFormatParameters params, *ap = &params;
2400
    int eof=0;
2401
    int pkt_in_play_range = 0;
2402

    
2403
    ic = avformat_alloc_context();
2404

    
2405
    memset(st_index, -1, sizeof(st_index));
2406
    is->video_stream = -1;
2407
    is->audio_stream = -1;
2408
    is->subtitle_stream = -1;
2409

    
2410
    global_video_state = is;
2411
    avio_set_interrupt_cb(decode_interrupt_cb);
2412

    
2413
    memset(ap, 0, sizeof(*ap));
2414

    
2415
    ap->prealloced_context = 1;
2416
    ap->width = frame_width;
2417
    ap->height= frame_height;
2418
    ap->time_base= (AVRational){1, 25};
2419
    ap->pix_fmt = frame_pix_fmt;
2420

    
2421
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2422

    
2423
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2424
    if (err < 0) {
2425
        print_error(is->filename, err);
2426
        ret = -1;
2427
        goto fail;
2428
    }
2429
    is->ic = ic;
2430

    
2431
    if(genpts)
2432
        ic->flags |= AVFMT_FLAG_GENPTS;
2433

    
2434
    err = av_find_stream_info(ic);
2435
    if (err < 0) {
2436
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2437
        ret = -1;
2438
        goto fail;
2439
    }
2440
    if(ic->pb)
2441
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2442

    
2443
    if(seek_by_bytes<0)
2444
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2445

    
2446
    /* if seeking requested, we execute it */
2447
    if (start_time != AV_NOPTS_VALUE) {
2448
        int64_t timestamp;
2449

    
2450
        timestamp = start_time;
2451
        /* add the stream start time */
2452
        if (ic->start_time != AV_NOPTS_VALUE)
2453
            timestamp += ic->start_time;
2454
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2455
        if (ret < 0) {
2456
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2457
                    is->filename, (double)timestamp / AV_TIME_BASE);
2458
        }
2459
    }
2460

    
2461
    for (i = 0; i < ic->nb_streams; i++)
2462
        ic->streams[i]->discard = AVDISCARD_ALL;
2463
    if (!video_disable)
2464
        st_index[AVMEDIA_TYPE_VIDEO] =
2465
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2466
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2467
    if (!audio_disable)
2468
        st_index[AVMEDIA_TYPE_AUDIO] =
2469
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2470
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2471
                                st_index[AVMEDIA_TYPE_VIDEO],
2472
                                NULL, 0);
2473
    if (!video_disable)
2474
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2475
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2476
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2477
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2478
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2479
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2480
                                NULL, 0);
2481
    if (show_status) {
2482
        av_dump_format(ic, 0, is->filename, 0);
2483
    }
2484

    
2485
    /* open the streams */
2486
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2487
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2488
    }
2489

    
2490
    ret=-1;
2491
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2492
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2493
    }
2494
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2495
    if(ret<0) {
2496
        if (!display_disable)
2497
            is->show_audio = 2;
2498
    }
2499

    
2500
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2501
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2502
    }
2503

    
2504
    if (is->video_stream < 0 && is->audio_stream < 0) {
2505
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2506
        ret = -1;
2507
        goto fail;
2508
    }
2509

    
2510
    for(;;) {
2511
        if (is->abort_request)
2512
            break;
2513
        if (is->paused != is->last_paused) {
2514
            is->last_paused = is->paused;
2515
            if (is->paused)
2516
                is->read_pause_return= av_read_pause(ic);
2517
            else
2518
                av_read_play(ic);
2519
        }
2520
#if CONFIG_RTSP_DEMUXER
2521
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2522
            /* wait 10 ms to avoid trying to get another packet */
2523
            /* XXX: horrible */
2524
            SDL_Delay(10);
2525
            continue;
2526
        }
2527
#endif
2528
        if (is->seek_req) {
2529
            int64_t seek_target= is->seek_pos;
2530
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2531
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2532
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2533
//      of the seek_pos/seek_rel variables
2534

    
2535
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2536
            if (ret < 0) {
2537
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2538
            }else{
2539
                if (is->audio_stream >= 0) {
2540
                    packet_queue_flush(&is->audioq);
2541
                    packet_queue_put(&is->audioq, &flush_pkt);
2542
                }
2543
                if (is->subtitle_stream >= 0) {
2544
                    packet_queue_flush(&is->subtitleq);
2545
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2546
                }
2547
                if (is->video_stream >= 0) {
2548
                    packet_queue_flush(&is->videoq);
2549
                    packet_queue_put(&is->videoq, &flush_pkt);
2550
                }
2551
            }
2552
            is->seek_req = 0;
2553
            eof= 0;
2554
        }
2555

    
2556
        /* if the queue are full, no need to read more */
2557
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2558
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2559
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2560
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2561
            /* wait 10 ms */
2562
            SDL_Delay(10);
2563
            continue;
2564
        }
2565
        if(eof) {
2566
            if(is->video_stream >= 0){
2567
                av_init_packet(pkt);
2568
                pkt->data=NULL;
2569
                pkt->size=0;
2570
                pkt->stream_index= is->video_stream;
2571
                packet_queue_put(&is->videoq, pkt);
2572
            }
2573
            SDL_Delay(10);
2574
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2575
                if(loop!=1 && (!loop || --loop)){
2576
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2577
                }else if(autoexit){
2578
                    ret=AVERROR_EOF;
2579
                    goto fail;
2580
                }
2581
            }
2582
            eof=0;
2583
            continue;
2584
        }
2585
        ret = av_read_frame(ic, pkt);
2586
        if (ret < 0) {
2587
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2588
                eof=1;
2589
            if (ic->pb && ic->pb->error)
2590
                break;
2591
            SDL_Delay(100); /* wait for user event */
2592
            continue;
2593
        }
2594
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2595
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2596
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2597
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2598
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2599
                <= ((double)duration/1000000);
2600
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2601
            packet_queue_put(&is->audioq, pkt);
2602
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2603
            packet_queue_put(&is->videoq, pkt);
2604
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2605
            packet_queue_put(&is->subtitleq, pkt);
2606
        } else {
2607
            av_free_packet(pkt);
2608
        }
2609
    }
2610
    /* wait until the end */
2611
    while (!is->abort_request) {
2612
        SDL_Delay(100);
2613
    }
2614

    
2615
    ret = 0;
2616
 fail:
2617
    /* disable interrupting */
2618
    global_video_state = NULL;
2619

    
2620
    /* close each stream */
2621
    if (is->audio_stream >= 0)
2622
        stream_component_close(is, is->audio_stream);
2623
    if (is->video_stream >= 0)
2624
        stream_component_close(is, is->video_stream);
2625
    if (is->subtitle_stream >= 0)
2626
        stream_component_close(is, is->subtitle_stream);
2627
    if (is->ic) {
2628
        av_close_input_file(is->ic);
2629
        is->ic = NULL; /* safety */
2630
    }
2631
    avio_set_interrupt_cb(NULL);
2632

    
2633
    if (ret != 0) {
2634
        SDL_Event event;
2635

    
2636
        event.type = FF_QUIT_EVENT;
2637
        event.user.data1 = is;
2638
        SDL_PushEvent(&event);
2639
    }
2640
    return 0;
2641
}
2642

    
2643
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2644
{
2645
    VideoState *is;
2646

    
2647
    is = av_mallocz(sizeof(VideoState));
2648
    if (!is)
2649
        return NULL;
2650
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2651
    is->iformat = iformat;
2652
    is->ytop = 0;
2653
    is->xleft = 0;
2654

    
2655
    /* start video display */
2656
    is->pictq_mutex = SDL_CreateMutex();
2657
    is->pictq_cond = SDL_CreateCond();
2658

    
2659
    is->subpq_mutex = SDL_CreateMutex();
2660
    is->subpq_cond = SDL_CreateCond();
2661

    
2662
    is->av_sync_type = av_sync_type;
2663
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2664
    if (!is->parse_tid) {
2665
        av_free(is);
2666
        return NULL;
2667
    }
2668
    return is;
2669
}
2670

    
2671
static void stream_cycle_channel(VideoState *is, int codec_type)
2672
{
2673
    AVFormatContext *ic = is->ic;
2674
    int start_index, stream_index;
2675
    AVStream *st;
2676

    
2677
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2678
        start_index = is->video_stream;
2679
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2680
        start_index = is->audio_stream;
2681
    else
2682
        start_index = is->subtitle_stream;
2683
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2684
        return;
2685
    stream_index = start_index;
2686
    for(;;) {
2687
        if (++stream_index >= is->ic->nb_streams)
2688
        {
2689
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2690
            {
2691
                stream_index = -1;
2692
                goto the_end;
2693
            } else
2694
                stream_index = 0;
2695
        }
2696
        if (stream_index == start_index)
2697
            return;
2698
        st = ic->streams[stream_index];
2699
        if (st->codec->codec_type == codec_type) {
2700
            /* check that parameters are OK */
2701
            switch(codec_type) {
2702
            case AVMEDIA_TYPE_AUDIO:
2703
                if (st->codec->sample_rate != 0 &&
2704
                    st->codec->channels != 0)
2705
                    goto the_end;
2706
                break;
2707
            case AVMEDIA_TYPE_VIDEO:
2708
            case AVMEDIA_TYPE_SUBTITLE:
2709
                goto the_end;
2710
            default:
2711
                break;
2712
            }
2713
        }
2714
    }
2715
 the_end:
2716
    stream_component_close(is, start_index);
2717
    stream_component_open(is, stream_index);
2718
}
2719

    
2720

    
2721
static void toggle_full_screen(void)
2722
{
2723
    is_full_screen = !is_full_screen;
2724
    if (!fs_screen_width) {
2725
        /* use default SDL method */
2726
//        SDL_WM_ToggleFullScreen(screen);
2727
    }
2728
    video_open(cur_stream);
2729
}
2730

    
2731
static void toggle_pause(void)
2732
{
2733
    if (cur_stream)
2734
        stream_pause(cur_stream);
2735
    step = 0;
2736
}
2737

    
2738
static void step_to_next_frame(void)
2739
{
2740
    if (cur_stream) {
2741
        /* if the stream is paused unpause it, then step */
2742
        if (cur_stream->paused)
2743
            stream_pause(cur_stream);
2744
    }
2745
    step = 1;
2746
}
2747

    
2748
static void toggle_audio_display(void)
2749
{
2750
    if (cur_stream) {
2751
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2752
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2753
        fill_rectangle(screen,
2754
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2755
                    bgcolor);
2756
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2757
    }
2758
}
2759

    
2760
/* handle an event sent by the GUI */
2761
static void event_loop(void)
2762
{
2763
    SDL_Event event;
2764
    double incr, pos, frac;
2765

    
2766
    for(;;) {
2767
        double x;
2768
        SDL_WaitEvent(&event);
2769
        switch(event.type) {
2770
        case SDL_KEYDOWN:
2771
            if (exit_on_keydown) {
2772
                do_exit();
2773
                break;
2774
            }
2775
            switch(event.key.keysym.sym) {
2776
            case SDLK_ESCAPE:
2777
            case SDLK_q:
2778
                do_exit();
2779
                break;
2780
            case SDLK_f:
2781
                toggle_full_screen();
2782
                break;
2783
            case SDLK_p:
2784
            case SDLK_SPACE:
2785
                toggle_pause();
2786
                break;
2787
            case SDLK_s: //S: Step to next frame
2788
                step_to_next_frame();
2789
                break;
2790
            case SDLK_a:
2791
                if (cur_stream)
2792
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2793
                break;
2794
            case SDLK_v:
2795
                if (cur_stream)
2796
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2797
                break;
2798
            case SDLK_t:
2799
                if (cur_stream)
2800
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2801
                break;
2802
            case SDLK_w:
2803
                toggle_audio_display();
2804
                break;
2805
            case SDLK_LEFT:
2806
                incr = -10.0;
2807
                goto do_seek;
2808
            case SDLK_RIGHT:
2809
                incr = 10.0;
2810
                goto do_seek;
2811
            case SDLK_UP:
2812
                incr = 60.0;
2813
                goto do_seek;
2814
            case SDLK_DOWN:
2815
                incr = -60.0;
2816
            do_seek:
2817
                if (cur_stream) {
2818
                    if (seek_by_bytes) {
2819
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2820
                            pos= cur_stream->video_current_pos;
2821
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2822
                            pos= cur_stream->audio_pkt.pos;
2823
                        }else
2824
                            pos = avio_tell(cur_stream->ic->pb);
2825
                        if (cur_stream->ic->bit_rate)
2826
                            incr *= cur_stream->ic->bit_rate / 8.0;
2827
                        else
2828
                            incr *= 180000.0;
2829
                        pos += incr;
2830
                        stream_seek(cur_stream, pos, incr, 1);
2831
                    } else {
2832
                        pos = get_master_clock(cur_stream);
2833
                        pos += incr;
2834
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2835
                    }
2836
                }
2837
                break;
2838
            default:
2839
                break;
2840
            }
2841
            break;
2842
        case SDL_MOUSEBUTTONDOWN:
2843
            if (exit_on_mousedown) {
2844
                do_exit();
2845
                break;
2846
            }
2847
        case SDL_MOUSEMOTION:
2848
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2849
                x= event.button.x;
2850
            }else{
2851
                if(event.motion.state != SDL_PRESSED)
2852
                    break;
2853
                x= event.motion.x;
2854
            }
2855
            if (cur_stream) {
2856
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2857
                    uint64_t size=  avio_size(cur_stream->ic->pb);
2858
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2859
                }else{
2860
                    int64_t ts;
2861
                    int ns, hh, mm, ss;
2862
                    int tns, thh, tmm, tss;
2863
                    tns = cur_stream->ic->duration/1000000LL;
2864
                    thh = tns/3600;
2865
                    tmm = (tns%3600)/60;
2866
                    tss = (tns%60);
2867
                    frac = x/cur_stream->width;
2868
                    ns = frac*tns;
2869
                    hh = ns/3600;
2870
                    mm = (ns%3600)/60;
2871
                    ss = (ns%60);
2872
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2873
                            hh, mm, ss, thh, tmm, tss);
2874
                    ts = frac*cur_stream->ic->duration;
2875
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2876
                        ts += cur_stream->ic->start_time;
2877
                    stream_seek(cur_stream, ts, 0, 0);
2878
                }
2879
            }
2880
            break;
2881
        case SDL_VIDEORESIZE:
2882
            if (cur_stream) {
2883
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2884
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2885
                screen_width = cur_stream->width = event.resize.w;
2886
                screen_height= cur_stream->height= event.resize.h;
2887
            }
2888
            break;
2889
        case SDL_QUIT:
2890
        case FF_QUIT_EVENT:
2891
            do_exit();
2892
            break;
2893
        case FF_ALLOC_EVENT:
2894
            video_open(event.user.data1);
2895
            alloc_picture(event.user.data1);
2896
            break;
2897
        case FF_REFRESH_EVENT:
2898
            video_refresh_timer(event.user.data1);
2899
            cur_stream->refresh=0;
2900
            break;
2901
        default:
2902
            break;
2903
        }
2904
    }
2905
}
2906

    
2907
static void opt_frame_size(const char *arg)
2908
{
2909
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2910
        fprintf(stderr, "Incorrect frame size\n");
2911
        exit(1);
2912
    }
2913
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2914
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2915
        exit(1);
2916
    }
2917
}
2918

    
2919
static int opt_width(const char *opt, const char *arg)
2920
{
2921
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2922
    return 0;
2923
}
2924

    
2925
static int opt_height(const char *opt, const char *arg)
2926
{
2927
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2928
    return 0;
2929
}
2930

    
2931
static void opt_format(const char *arg)
2932
{
2933
    file_iformat = av_find_input_format(arg);
2934
    if (!file_iformat) {
2935
        fprintf(stderr, "Unknown input format: %s\n", arg);
2936
        exit(1);
2937
    }
2938
}
2939

    
2940
static void opt_frame_pix_fmt(const char *arg)
2941
{
2942
    frame_pix_fmt = av_get_pix_fmt(arg);
2943
}
2944

    
2945
static int opt_sync(const char *opt, const char *arg)
2946
{
2947
    if (!strcmp(arg, "audio"))
2948
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2949
    else if (!strcmp(arg, "video"))
2950
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2951
    else if (!strcmp(arg, "ext"))
2952
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2953
    else {
2954
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2955
        exit(1);
2956
    }
2957
    return 0;
2958
}
2959

    
2960
static int opt_seek(const char *opt, const char *arg)
2961
{
2962
    start_time = parse_time_or_die(opt, arg, 1);
2963
    return 0;
2964
}
2965

    
2966
static int opt_duration(const char *opt, const char *arg)
2967
{
2968
    duration = parse_time_or_die(opt, arg, 1);
2969
    return 0;
2970
}
2971

    
2972
static int opt_debug(const char *opt, const char *arg)
2973
{
2974
    av_log_set_level(99);
2975
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2976
    return 0;
2977
}
2978

    
2979
static int opt_vismv(const char *opt, const char *arg)
2980
{
2981
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2982
    return 0;
2983
}
2984

    
2985
static int opt_thread_count(const char *opt, const char *arg)
2986
{
2987
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2988
#if !HAVE_THREADS
2989
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2990
#endif
2991
    return 0;
2992
}
2993

    
2994
static const OptionDef options[] = {
2995
#include "cmdutils_common_opts.h"
2996
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2997
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2998
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2999
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3000
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3001
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3002
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3003
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3004
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3005
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3006
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3007
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3008
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3009
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3010
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3011
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3012
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3013
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3014
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3015
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3016
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3017
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3018
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3019
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3020
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3021
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3022
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3023
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3024
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3025
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3026
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3027
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3028
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3029
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3030
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3031
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3032
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3033
#if CONFIG_AVFILTER
3034
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3035
#endif
3036
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3037
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3038
    { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3039
    { NULL, },
3040
};
3041

    
3042
static void show_usage(void)
3043
{
3044
    printf("Simple media player\n");
3045
    printf("usage: ffplay [options] input_file\n");
3046
    printf("\n");
3047
}
3048

    
3049
static void show_help(void)
3050
{
3051
    av_log_set_callback(log_callback_help);
3052
    show_usage();
3053
    show_help_options(options, "Main options:\n",
3054
                      OPT_EXPERT, 0);
3055
    show_help_options(options, "\nAdvanced options:\n",
3056
                      OPT_EXPERT, OPT_EXPERT);
3057
    printf("\n");
3058
    av_opt_show2(avcodec_opts[0], NULL,
3059
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3060
    printf("\n");
3061
    av_opt_show2(avformat_opts, NULL,
3062
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3063
#if !CONFIG_AVFILTER
3064
    printf("\n");
3065
    av_opt_show2(sws_opts, NULL,
3066
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3067
#endif
3068
    printf("\nWhile playing:\n"
3069
           "q, ESC              quit\n"
3070
           "f                   toggle full screen\n"
3071
           "p, SPC              pause\n"
3072
           "a                   cycle audio channel\n"
3073
           "v                   cycle video channel\n"
3074
           "t                   cycle subtitle channel\n"
3075
           "w                   show audio waves\n"
3076
           "s                   activate frame-step mode\n"
3077
           "left/right          seek backward/forward 10 seconds\n"
3078
           "down/up             seek backward/forward 1 minute\n"
3079
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3080
           );
3081
}
3082

    
3083
static void opt_input_file(const char *filename)
3084
{
3085
    if (input_filename) {
3086
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3087
                filename, input_filename);
3088
        exit(1);
3089
    }
3090
    if (!strcmp(filename, "-"))
3091
        filename = "pipe:";
3092
    input_filename = filename;
3093
}
3094

    
3095
/* Called from the main */
3096
int main(int argc, char **argv)
3097
{
3098
    int flags;
3099

    
3100
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3101

    
3102
    /* register all codecs, demux and protocols */
3103
    avcodec_register_all();
3104
#if CONFIG_AVDEVICE
3105
    avdevice_register_all();
3106
#endif
3107
#if CONFIG_AVFILTER
3108
    avfilter_register_all();
3109
#endif
3110
    av_register_all();
3111

    
3112
    init_opts();
3113

    
3114
    show_banner();
3115

    
3116
    parse_options(argc, argv, options, opt_input_file);
3117

    
3118
    if (!input_filename) {
3119
        show_usage();
3120
        fprintf(stderr, "An input file must be specified\n");
3121
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3122
        exit(1);
3123
    }
3124

    
3125
    if (display_disable) {
3126
        video_disable = 1;
3127
    }
3128
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3129
#if !defined(__MINGW32__) && !defined(__APPLE__)
3130
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3131
#endif
3132
    if (SDL_Init (flags)) {
3133
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3134
        exit(1);
3135
    }
3136

    
3137
    if (!display_disable) {
3138
#if HAVE_SDL_VIDEO_SIZE
3139
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3140
        fs_screen_width = vi->current_w;
3141
        fs_screen_height = vi->current_h;
3142
#endif
3143
    }
3144

    
3145
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3146
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3147
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3148

    
3149
    av_init_packet(&flush_pkt);
3150
    flush_pkt.data= "FLUSH";
3151

    
3152
    cur_stream = stream_open(input_filename, file_iformat);
3153

    
3154
    event_loop();
3155

    
3156
    /* never returns */
3157

    
3158
    return 0;
3159
}