Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ f6d71b39

History | View | Annotate | Download (98.3 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavutil/imgutils.h"
32
#include "libavutil/parseutils.h"
33
#include "libavutil/samplefmt.h"
34
#include "libavutil/avassert.h"
35
#include "libavformat/avformat.h"
36
#include "libavdevice/avdevice.h"
37
#include "libswscale/swscale.h"
38
#include "libavcodec/audioconvert.h"
39
#include "libavcodec/opt.h"
40
#include "libavcodec/avfft.h"
41

    
42
#if CONFIG_AVFILTER
43
# include "libavfilter/avfilter.h"
44
# include "libavfilter/avfiltergraph.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG
63
//#define DEBUG_SYNC
64

    
65
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67
#define MIN_FRAMES 5
68

    
69
/* SDL audio buffer size, in samples. Should be small to have precise
70
   A/V sync as SDL does not have hardware buffer fullness info. */
71
#define SDL_AUDIO_BUFFER_SIZE 1024
72

    
73
/* no AV sync correction is done if below the AV sync threshold */
74
#define AV_SYNC_THRESHOLD 0.01
75
/* no AV correction is done if too big error */
76
#define AV_NOSYNC_THRESHOLD 10.0
77

    
78
#define FRAME_SKIP_FACTOR 0.05
79

    
80
/* maximum audio speed change to get correct sync */
81
#define SAMPLE_CORRECTION_PERCENT_MAX 10
82

    
83
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84
#define AUDIO_DIFF_AVG_NB   20
85

    
86
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87
#define SAMPLE_ARRAY_SIZE (2*65536)
88

    
89
static int sws_flags = SWS_BICUBIC;
90

    
91
typedef struct PacketQueue {
92
    AVPacketList *first_pkt, *last_pkt;
93
    int nb_packets;
94
    int size;
95
    int abort_request;
96
    SDL_mutex *mutex;
97
    SDL_cond *cond;
98
} PacketQueue;
99

    
100
#define VIDEO_PICTURE_QUEUE_SIZE 2
101
#define SUBPICTURE_QUEUE_SIZE 4
102

    
103
typedef struct VideoPicture {
104
    double pts;                                  ///<presentation time stamp for this picture
105
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106
    int64_t pos;                                 ///<byte position in file
107
    SDL_Overlay *bmp;
108
    int width, height; /* source height & width */
109
    int allocated;
110
    enum PixelFormat pix_fmt;
111

    
112
#if CONFIG_AVFILTER
113
    AVFilterBufferRef *picref;
114
#endif
115
} VideoPicture;
116

    
117
typedef struct SubPicture {
118
    double pts; /* presentation time stamp for this picture */
119
    AVSubtitle sub;
120
} SubPicture;
121

    
122
enum {
123
    AV_SYNC_AUDIO_MASTER, /* default choice */
124
    AV_SYNC_VIDEO_MASTER,
125
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126
};
127

    
128
typedef struct VideoState {
129
    SDL_Thread *parse_tid;
130
    SDL_Thread *video_tid;
131
    SDL_Thread *refresh_tid;
132
    AVInputFormat *iformat;
133
    int no_background;
134
    int abort_request;
135
    int paused;
136
    int last_paused;
137
    int seek_req;
138
    int seek_flags;
139
    int64_t seek_pos;
140
    int64_t seek_rel;
141
    int read_pause_return;
142
    AVFormatContext *ic;
143
    int dtg_active_format;
144

    
145
    int audio_stream;
146

    
147
    int av_sync_type;
148
    double external_clock; /* external clock base */
149
    int64_t external_clock_time;
150

    
151
    double audio_clock;
152
    double audio_diff_cum; /* used for AV difference average computation */
153
    double audio_diff_avg_coef;
154
    double audio_diff_threshold;
155
    int audio_diff_avg_count;
156
    AVStream *audio_st;
157
    PacketQueue audioq;
158
    int audio_hw_buf_size;
159
    /* samples output by the codec. we reserve more space for avsync
160
       compensation */
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
163
    uint8_t *audio_buf;
164
    unsigned int audio_buf_size; /* in bytes */
165
    int audio_buf_index; /* in bytes */
166
    AVPacket audio_pkt_temp;
167
    AVPacket audio_pkt;
168
    enum AVSampleFormat audio_src_fmt;
169
    AVAudioConvert *reformat_ctx;
170

    
171
    int show_audio; /* if true, display audio samples */
172
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
173
    int sample_array_index;
174
    int last_i_start;
175
    RDFTContext *rdft;
176
    int rdft_bits;
177
    FFTSample *rdft_data;
178
    int xpos;
179

    
180
    SDL_Thread *subtitle_tid;
181
    int subtitle_stream;
182
    int subtitle_stream_changed;
183
    AVStream *subtitle_st;
184
    PacketQueue subtitleq;
185
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186
    int subpq_size, subpq_rindex, subpq_windex;
187
    SDL_mutex *subpq_mutex;
188
    SDL_cond *subpq_cond;
189

    
190
    double frame_timer;
191
    double frame_last_pts;
192
    double frame_last_delay;
193
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194
    int video_stream;
195
    AVStream *video_st;
196
    PacketQueue videoq;
197
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199
    int64_t video_current_pos;                   ///<current displayed file pos
200
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201
    int pictq_size, pictq_rindex, pictq_windex;
202
    SDL_mutex *pictq_mutex;
203
    SDL_cond *pictq_cond;
204
#if !CONFIG_AVFILTER
205
    struct SwsContext *img_convert_ctx;
206
#endif
207

    
208
    //    QETimer *video_timer;
209
    char filename[1024];
210
    int width, height, xleft, ytop;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->video->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714

    
715
        if (is->subtitle_st)
716
        {
717
            if (is->subpq_size > 0)
718
            {
719
                sp = &is->subpq[is->subpq_rindex];
720

    
721
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
722
                {
723
                    SDL_LockYUVOverlay (vp->bmp);
724

    
725
                    pict.data[0] = vp->bmp->pixels[0];
726
                    pict.data[1] = vp->bmp->pixels[2];
727
                    pict.data[2] = vp->bmp->pixels[1];
728

    
729
                    pict.linesize[0] = vp->bmp->pitches[0];
730
                    pict.linesize[1] = vp->bmp->pitches[2];
731
                    pict.linesize[2] = vp->bmp->pitches[1];
732

    
733
                    for (i = 0; i < sp->sub.num_rects; i++)
734
                        blend_subrect(&pict, sp->sub.rects[i],
735
                                      vp->bmp->w, vp->bmp->h);
736

    
737
                    SDL_UnlockYUVOverlay (vp->bmp);
738
                }
739
            }
740
        }
741

    
742

    
743
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
744
        height = is->height;
745
        width = ((int)rint(height * aspect_ratio)) & ~1;
746
        if (width > is->width) {
747
            width = is->width;
748
            height = ((int)rint(width / aspect_ratio)) & ~1;
749
        }
750
        x = (is->width - width) / 2;
751
        y = (is->height - height) / 2;
752
        if (!is->no_background) {
753
            /* fill the background */
754
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
755
        } else {
756
            is->no_background = 0;
757
        }
758
        rect.x = is->xleft + x;
759
        rect.y = is->ytop  + y;
760
        rect.w = width;
761
        rect.h = height;
762
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
763
    } else {
764
#if 0
765
        fill_rectangle(screen,
766
                       is->xleft, is->ytop, is->width, is->height,
767
                       QERGB(0x00, 0x00, 0x00));
768
#endif
769
    }
770
}
771

    
772
static inline int compute_mod(int a, int b)
773
{
774
    a = a % b;
775
    if (a >= 0)
776
        return a;
777
    else
778
        return a + b;
779
}
780

    
781
static void video_audio_display(VideoState *s)
782
{
783
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
784
    int ch, channels, h, h2, bgcolor, fgcolor;
785
    int16_t time_diff;
786
    int rdft_bits, nb_freq;
787

    
788
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
789
        ;
790
    nb_freq= 1<<(rdft_bits-1);
791

    
792
    /* compute display index : center on currently output samples */
793
    channels = s->audio_st->codec->channels;
794
    nb_display_channels = channels;
795
    if (!s->paused) {
796
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
797
        n = 2 * channels;
798
        delay = audio_write_get_buf_size(s);
799
        delay /= n;
800

    
801
        /* to be more precise, we take into account the time spent since
802
           the last buffer computation */
803
        if (audio_callback_time) {
804
            time_diff = av_gettime() - audio_callback_time;
805
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
806
        }
807

    
808
        delay += 2*data_used;
809
        if (delay < data_used)
810
            delay = data_used;
811

    
812
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
813
        if(s->show_audio==1){
814
            h= INT_MIN;
815
            for(i=0; i<1000; i+=channels){
816
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
817
                int a= s->sample_array[idx];
818
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
819
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
820
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
821
                int score= a-d;
822
                if(h<score && (b^c)<0){
823
                    h= score;
824
                    i_start= idx;
825
                }
826
            }
827
        }
828

    
829
        s->last_i_start = i_start;
830
    } else {
831
        i_start = s->last_i_start;
832
    }
833

    
834
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
835
    if(s->show_audio==1){
836
        fill_rectangle(screen,
837
                       s->xleft, s->ytop, s->width, s->height,
838
                       bgcolor);
839

    
840
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
841

    
842
        /* total height for one channel */
843
        h = s->height / nb_display_channels;
844
        /* graph height / 2 */
845
        h2 = (h * 9) / 20;
846
        for(ch = 0;ch < nb_display_channels; ch++) {
847
            i = i_start + ch;
848
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
849
            for(x = 0; x < s->width; x++) {
850
                y = (s->sample_array[i] * h2) >> 15;
851
                if (y < 0) {
852
                    y = -y;
853
                    ys = y1 - y;
854
                } else {
855
                    ys = y1;
856
                }
857
                fill_rectangle(screen,
858
                               s->xleft + x, ys, 1, y,
859
                               fgcolor);
860
                i += channels;
861
                if (i >= SAMPLE_ARRAY_SIZE)
862
                    i -= SAMPLE_ARRAY_SIZE;
863
            }
864
        }
865

    
866
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
867

    
868
        for(ch = 1;ch < nb_display_channels; ch++) {
869
            y = s->ytop + ch * h;
870
            fill_rectangle(screen,
871
                           s->xleft, y, s->width, 1,
872
                           fgcolor);
873
        }
874
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
875
    }else{
876
        nb_display_channels= FFMIN(nb_display_channels, 2);
877
        if(rdft_bits != s->rdft_bits){
878
            av_rdft_end(s->rdft);
879
            av_free(s->rdft_data);
880
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
881
            s->rdft_bits= rdft_bits;
882
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
883
        }
884
        {
885
            FFTSample *data[2];
886
            for(ch = 0;ch < nb_display_channels; ch++) {
887
                data[ch] = s->rdft_data + 2*nb_freq*ch;
888
                i = i_start + ch;
889
                for(x = 0; x < 2*nb_freq; x++) {
890
                    double w= (x-nb_freq)*(1.0/nb_freq);
891
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
892
                    i += channels;
893
                    if (i >= SAMPLE_ARRAY_SIZE)
894
                        i -= SAMPLE_ARRAY_SIZE;
895
                }
896
                av_rdft_calc(s->rdft, data[ch]);
897
            }
898
            //least efficient way to do this, we should of course directly access it but its more than fast enough
899
            for(y=0; y<s->height; y++){
900
                double w= 1/sqrt(nb_freq);
901
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
902
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
903
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
904
                a= FFMIN(a,255);
905
                b= FFMIN(b,255);
906
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
907

    
908
                fill_rectangle(screen,
909
                            s->xpos, s->height-y, 1, 1,
910
                            fgcolor);
911
            }
912
        }
913
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
914
        s->xpos++;
915
        if(s->xpos >= s->width)
916
            s->xpos= s->xleft;
917
    }
918
}
919

    
920
static int video_open(VideoState *is){
921
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
922
    int w,h;
923

    
924
    if(is_full_screen) flags |= SDL_FULLSCREEN;
925
    else               flags |= SDL_RESIZABLE;
926

    
927
    if (is_full_screen && fs_screen_width) {
928
        w = fs_screen_width;
929
        h = fs_screen_height;
930
    } else if(!is_full_screen && screen_width){
931
        w = screen_width;
932
        h = screen_height;
933
#if CONFIG_AVFILTER
934
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
935
        w = is->out_video_filter->inputs[0]->w;
936
        h = is->out_video_filter->inputs[0]->h;
937
#else
938
    }else if (is->video_st && is->video_st->codec->width){
939
        w = is->video_st->codec->width;
940
        h = is->video_st->codec->height;
941
#endif
942
    } else {
943
        w = 640;
944
        h = 480;
945
    }
946
    if(screen && is->width == screen->w && screen->w == w
947
       && is->height== screen->h && screen->h == h)
948
        return 0;
949

    
950
#ifndef __APPLE__
951
    screen = SDL_SetVideoMode(w, h, 0, flags);
952
#else
953
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
954
    screen = SDL_SetVideoMode(w, h, 24, flags);
955
#endif
956
    if (!screen) {
957
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
958
        return -1;
959
    }
960
    if (!window_title)
961
        window_title = input_filename;
962
    SDL_WM_SetCaption(window_title, window_title);
963

    
964
    is->width = screen->w;
965
    is->height = screen->h;
966

    
967
    return 0;
968
}
969

    
970
/* display the current picture, if any */
971
static void video_display(VideoState *is)
972
{
973
    if(!screen)
974
        video_open(cur_stream);
975
    if (is->audio_st && is->show_audio)
976
        video_audio_display(is);
977
    else if (is->video_st)
978
        video_image_display(is);
979
}
980

    
981
static int refresh_thread(void *opaque)
982
{
983
    VideoState *is= opaque;
984
    while(!is->abort_request){
985
        SDL_Event event;
986
        event.type = FF_REFRESH_EVENT;
987
        event.user.data1 = opaque;
988
        if(!is->refresh){
989
            is->refresh=1;
990
            SDL_PushEvent(&event);
991
        }
992
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
993
    }
994
    return 0;
995
}
996

    
997
/* get the current audio clock value */
998
static double get_audio_clock(VideoState *is)
999
{
1000
    double pts;
1001
    int hw_buf_size, bytes_per_sec;
1002
    pts = is->audio_clock;
1003
    hw_buf_size = audio_write_get_buf_size(is);
1004
    bytes_per_sec = 0;
1005
    if (is->audio_st) {
1006
        bytes_per_sec = is->audio_st->codec->sample_rate *
1007
            2 * is->audio_st->codec->channels;
1008
    }
1009
    if (bytes_per_sec)
1010
        pts -= (double)hw_buf_size / bytes_per_sec;
1011
    return pts;
1012
}
1013

    
1014
/* get the current video clock value */
1015
static double get_video_clock(VideoState *is)
1016
{
1017
    if (is->paused) {
1018
        return is->video_current_pts;
1019
    } else {
1020
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1021
    }
1022
}
1023

    
1024
/* get the current external clock value */
1025
static double get_external_clock(VideoState *is)
1026
{
1027
    int64_t ti;
1028
    ti = av_gettime();
1029
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1030
}
1031

    
1032
/* get the current master clock value */
1033
static double get_master_clock(VideoState *is)
1034
{
1035
    double val;
1036

    
1037
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1038
        if (is->video_st)
1039
            val = get_video_clock(is);
1040
        else
1041
            val = get_audio_clock(is);
1042
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1043
        if (is->audio_st)
1044
            val = get_audio_clock(is);
1045
        else
1046
            val = get_video_clock(is);
1047
    } else {
1048
        val = get_external_clock(is);
1049
    }
1050
    return val;
1051
}
1052

    
1053
/* seek in the stream */
1054
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1055
{
1056
    if (!is->seek_req) {
1057
        is->seek_pos = pos;
1058
        is->seek_rel = rel;
1059
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1060
        if (seek_by_bytes)
1061
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1062
        is->seek_req = 1;
1063
    }
1064
}
1065

    
1066
/* pause or resume the video */
1067
static void stream_pause(VideoState *is)
1068
{
1069
    if (is->paused) {
1070
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1071
        if(is->read_pause_return != AVERROR(ENOSYS)){
1072
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1073
        }
1074
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1075
    }
1076
    is->paused = !is->paused;
1077
}
1078

    
1079
static double compute_target_time(double frame_current_pts, VideoState *is)
1080
{
1081
    double delay, sync_threshold, diff;
1082

    
1083
    /* compute nominal delay */
1084
    delay = frame_current_pts - is->frame_last_pts;
1085
    if (delay <= 0 || delay >= 10.0) {
1086
        /* if incorrect delay, use previous one */
1087
        delay = is->frame_last_delay;
1088
    } else {
1089
        is->frame_last_delay = delay;
1090
    }
1091
    is->frame_last_pts = frame_current_pts;
1092

    
1093
    /* update delay to follow master synchronisation source */
1094
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1095
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1096
        /* if video is slave, we try to correct big delays by
1097
           duplicating or deleting a frame */
1098
        diff = get_video_clock(is) - get_master_clock(is);
1099

    
1100
        /* skip or repeat frame. We take into account the
1101
           delay to compute the threshold. I still don't know
1102
           if it is the best guess */
1103
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1104
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1105
            if (diff <= -sync_threshold)
1106
                delay = 0;
1107
            else if (diff >= sync_threshold)
1108
                delay = 2 * delay;
1109
        }
1110
    }
1111
    is->frame_timer += delay;
1112
#if defined(DEBUG_SYNC)
1113
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1114
            delay, actual_delay, frame_current_pts, -diff);
1115
#endif
1116

    
1117
    return is->frame_timer;
1118
}
1119

    
1120
/* called to display each frame */
1121
static void video_refresh_timer(void *opaque)
1122
{
1123
    VideoState *is = opaque;
1124
    VideoPicture *vp;
1125

    
1126
    SubPicture *sp, *sp2;
1127

    
1128
    if (is->video_st) {
1129
retry:
1130
        if (is->pictq_size == 0) {
1131
            //nothing to do, no picture to display in the que
1132
        } else {
1133
            double time= av_gettime()/1000000.0;
1134
            double next_target;
1135
            /* dequeue the picture */
1136
            vp = &is->pictq[is->pictq_rindex];
1137

    
1138
            if(time < vp->target_clock)
1139
                return;
1140
            /* update current video pts */
1141
            is->video_current_pts = vp->pts;
1142
            is->video_current_pts_drift = is->video_current_pts - time;
1143
            is->video_current_pos = vp->pos;
1144
            if(is->pictq_size > 1){
1145
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1146
                assert(nextvp->target_clock >= vp->target_clock);
1147
                next_target= nextvp->target_clock;
1148
            }else{
1149
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1150
            }
1151
            if(framedrop && time > next_target){
1152
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1153
                if(is->pictq_size > 1 || time > next_target + 0.5){
1154
                    /* update queue size and signal for next picture */
1155
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1156
                        is->pictq_rindex = 0;
1157

    
1158
                    SDL_LockMutex(is->pictq_mutex);
1159
                    is->pictq_size--;
1160
                    SDL_CondSignal(is->pictq_cond);
1161
                    SDL_UnlockMutex(is->pictq_mutex);
1162
                    goto retry;
1163
                }
1164
            }
1165

    
1166
            if(is->subtitle_st) {
1167
                if (is->subtitle_stream_changed) {
1168
                    SDL_LockMutex(is->subpq_mutex);
1169

    
1170
                    while (is->subpq_size) {
1171
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1172

    
1173
                        /* update queue size and signal for next picture */
1174
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1175
                            is->subpq_rindex = 0;
1176

    
1177
                        is->subpq_size--;
1178
                    }
1179
                    is->subtitle_stream_changed = 0;
1180

    
1181
                    SDL_CondSignal(is->subpq_cond);
1182
                    SDL_UnlockMutex(is->subpq_mutex);
1183
                } else {
1184
                    if (is->subpq_size > 0) {
1185
                        sp = &is->subpq[is->subpq_rindex];
1186

    
1187
                        if (is->subpq_size > 1)
1188
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1189
                        else
1190
                            sp2 = NULL;
1191

    
1192
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1193
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1194
                        {
1195
                            free_subpicture(sp);
1196

    
1197
                            /* update queue size and signal for next picture */
1198
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1199
                                is->subpq_rindex = 0;
1200

    
1201
                            SDL_LockMutex(is->subpq_mutex);
1202
                            is->subpq_size--;
1203
                            SDL_CondSignal(is->subpq_cond);
1204
                            SDL_UnlockMutex(is->subpq_mutex);
1205
                        }
1206
                    }
1207
                }
1208
            }
1209

    
1210
            /* display picture */
1211
            if (!display_disable)
1212
                video_display(is);
1213

    
1214
            /* update queue size and signal for next picture */
1215
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1216
                is->pictq_rindex = 0;
1217

    
1218
            SDL_LockMutex(is->pictq_mutex);
1219
            is->pictq_size--;
1220
            SDL_CondSignal(is->pictq_cond);
1221
            SDL_UnlockMutex(is->pictq_mutex);
1222
        }
1223
    } else if (is->audio_st) {
1224
        /* draw the next audio frame */
1225

    
1226
        /* if only audio stream, then display the audio bars (better
1227
           than nothing, just to test the implementation */
1228

    
1229
        /* display picture */
1230
        if (!display_disable)
1231
            video_display(is);
1232
    }
1233
    if (show_status) {
1234
        static int64_t last_time;
1235
        int64_t cur_time;
1236
        int aqsize, vqsize, sqsize;
1237
        double av_diff;
1238

    
1239
        cur_time = av_gettime();
1240
        if (!last_time || (cur_time - last_time) >= 30000) {
1241
            aqsize = 0;
1242
            vqsize = 0;
1243
            sqsize = 0;
1244
            if (is->audio_st)
1245
                aqsize = is->audioq.size;
1246
            if (is->video_st)
1247
                vqsize = is->videoq.size;
1248
            if (is->subtitle_st)
1249
                sqsize = is->subtitleq.size;
1250
            av_diff = 0;
1251
            if (is->audio_st && is->video_st)
1252
                av_diff = get_audio_clock(is) - get_video_clock(is);
1253
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1254
                   get_master_clock(is),
1255
                   av_diff,
1256
                   FFMAX(is->skip_frames-1, 0),
1257
                   aqsize / 1024,
1258
                   vqsize / 1024,
1259
                   sqsize,
1260
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1261
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1262
            fflush(stdout);
1263
            last_time = cur_time;
1264
        }
1265
    }
1266
}
1267

    
1268
static void stream_close(VideoState *is)
1269
{
1270
    VideoPicture *vp;
1271
    int i;
1272
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1273
    is->abort_request = 1;
1274
    SDL_WaitThread(is->parse_tid, NULL);
1275
    SDL_WaitThread(is->refresh_tid, NULL);
1276

    
1277
    /* free all pictures */
1278
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1279
        vp = &is->pictq[i];
1280
#if CONFIG_AVFILTER
1281
        if (vp->picref) {
1282
            avfilter_unref_buffer(vp->picref);
1283
            vp->picref = NULL;
1284
        }
1285
#endif
1286
        if (vp->bmp) {
1287
            SDL_FreeYUVOverlay(vp->bmp);
1288
            vp->bmp = NULL;
1289
        }
1290
    }
1291
    SDL_DestroyMutex(is->pictq_mutex);
1292
    SDL_DestroyCond(is->pictq_cond);
1293
    SDL_DestroyMutex(is->subpq_mutex);
1294
    SDL_DestroyCond(is->subpq_cond);
1295
#if !CONFIG_AVFILTER
1296
    if (is->img_convert_ctx)
1297
        sws_freeContext(is->img_convert_ctx);
1298
#endif
1299
    av_free(is);
1300
}
1301

    
1302
static void do_exit(void)
1303
{
1304
    if (cur_stream) {
1305
        stream_close(cur_stream);
1306
        cur_stream = NULL;
1307
    }
1308
    uninit_opts();
1309
#if CONFIG_AVFILTER
1310
    avfilter_uninit();
1311
#endif
1312
    if (show_status)
1313
        printf("\n");
1314
    SDL_Quit();
1315
    av_log(NULL, AV_LOG_QUIET, "");
1316
    exit(0);
1317
}
1318

    
1319
/* allocate a picture (needs to do that in main thread to avoid
1320
   potential locking problems */
1321
static void alloc_picture(void *opaque)
1322
{
1323
    VideoState *is = opaque;
1324
    VideoPicture *vp;
1325

    
1326
    vp = &is->pictq[is->pictq_windex];
1327

    
1328
    if (vp->bmp)
1329
        SDL_FreeYUVOverlay(vp->bmp);
1330

    
1331
#if CONFIG_AVFILTER
1332
    if (vp->picref)
1333
        avfilter_unref_buffer(vp->picref);
1334
    vp->picref = NULL;
1335

    
1336
    vp->width   = is->out_video_filter->inputs[0]->w;
1337
    vp->height  = is->out_video_filter->inputs[0]->h;
1338
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1339
#else
1340
    vp->width   = is->video_st->codec->width;
1341
    vp->height  = is->video_st->codec->height;
1342
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1343
#endif
1344

    
1345
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1346
                                   SDL_YV12_OVERLAY,
1347
                                   screen);
1348
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1349
        /* SDL allocates a buffer smaller than requested if the video
1350
         * overlay hardware is unable to support the requested size. */
1351
        fprintf(stderr, "Error: the video system does not support an image\n"
1352
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1353
                        "to reduce the image size.\n", vp->width, vp->height );
1354
        do_exit();
1355
    }
1356

    
1357
    SDL_LockMutex(is->pictq_mutex);
1358
    vp->allocated = 1;
1359
    SDL_CondSignal(is->pictq_cond);
1360
    SDL_UnlockMutex(is->pictq_mutex);
1361
}
1362

    
1363
/**
1364
 *
1365
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1366
 */
1367
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1368
{
1369
    VideoPicture *vp;
1370
    int dst_pix_fmt;
1371
#if CONFIG_AVFILTER
1372
    AVPicture pict_src;
1373
#endif
1374
    /* wait until we have space to put a new picture */
1375
    SDL_LockMutex(is->pictq_mutex);
1376

    
1377
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1378
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1379

    
1380
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1381
           !is->videoq.abort_request) {
1382
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383
    }
1384
    SDL_UnlockMutex(is->pictq_mutex);
1385

    
1386
    if (is->videoq.abort_request)
1387
        return -1;
1388

    
1389
    vp = &is->pictq[is->pictq_windex];
1390

    
1391
    /* alloc or resize hardware picture buffer */
1392
    if (!vp->bmp ||
1393
#if CONFIG_AVFILTER
1394
        vp->width  != is->out_video_filter->inputs[0]->w ||
1395
        vp->height != is->out_video_filter->inputs[0]->h) {
1396
#else
1397
        vp->width != is->video_st->codec->width ||
1398
        vp->height != is->video_st->codec->height) {
1399
#endif
1400
        SDL_Event event;
1401

    
1402
        vp->allocated = 0;
1403

    
1404
        /* the allocation must be done in the main thread to avoid
1405
           locking problems */
1406
        event.type = FF_ALLOC_EVENT;
1407
        event.user.data1 = is;
1408
        SDL_PushEvent(&event);
1409

    
1410
        /* wait until the picture is allocated */
1411
        SDL_LockMutex(is->pictq_mutex);
1412
        while (!vp->allocated && !is->videoq.abort_request) {
1413
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1414
        }
1415
        SDL_UnlockMutex(is->pictq_mutex);
1416

    
1417
        if (is->videoq.abort_request)
1418
            return -1;
1419
    }
1420

    
1421
    /* if the frame is not skipped, then display it */
1422
    if (vp->bmp) {
1423
        AVPicture pict;
1424
#if CONFIG_AVFILTER
1425
        if(vp->picref)
1426
            avfilter_unref_buffer(vp->picref);
1427
        vp->picref = src_frame->opaque;
1428
#endif
1429

    
1430
        /* get a pointer on the bitmap */
1431
        SDL_LockYUVOverlay (vp->bmp);
1432

    
1433
        dst_pix_fmt = PIX_FMT_YUV420P;
1434
        memset(&pict,0,sizeof(AVPicture));
1435
        pict.data[0] = vp->bmp->pixels[0];
1436
        pict.data[1] = vp->bmp->pixels[2];
1437
        pict.data[2] = vp->bmp->pixels[1];
1438

    
1439
        pict.linesize[0] = vp->bmp->pitches[0];
1440
        pict.linesize[1] = vp->bmp->pitches[2];
1441
        pict.linesize[2] = vp->bmp->pitches[1];
1442

    
1443
#if CONFIG_AVFILTER
1444
        pict_src.data[0] = src_frame->data[0];
1445
        pict_src.data[1] = src_frame->data[1];
1446
        pict_src.data[2] = src_frame->data[2];
1447

    
1448
        pict_src.linesize[0] = src_frame->linesize[0];
1449
        pict_src.linesize[1] = src_frame->linesize[1];
1450
        pict_src.linesize[2] = src_frame->linesize[2];
1451

    
1452
        //FIXME use direct rendering
1453
        av_picture_copy(&pict, &pict_src,
1454
                        vp->pix_fmt, vp->width, vp->height);
1455
#else
1456
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1457
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1458
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1459
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1460
        if (is->img_convert_ctx == NULL) {
1461
            fprintf(stderr, "Cannot initialize the conversion context\n");
1462
            exit(1);
1463
        }
1464
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1465
                  0, vp->height, pict.data, pict.linesize);
1466
#endif
1467
        /* update the bitmap content */
1468
        SDL_UnlockYUVOverlay(vp->bmp);
1469

    
1470
        vp->pts = pts;
1471
        vp->pos = pos;
1472

    
1473
        /* now we can update the picture count */
1474
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1475
            is->pictq_windex = 0;
1476
        SDL_LockMutex(is->pictq_mutex);
1477
        vp->target_clock= compute_target_time(vp->pts, is);
1478

    
1479
        is->pictq_size++;
1480
        SDL_UnlockMutex(is->pictq_mutex);
1481
    }
1482
    return 0;
1483
}
1484

    
1485
/**
1486
 * compute the exact PTS for the picture if it is omitted in the stream
1487
 * @param pts1 the dts of the pkt / pts of the frame
1488
 */
1489
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1490
{
1491
    double frame_delay, pts;
1492

    
1493
    pts = pts1;
1494

    
1495
    if (pts != 0) {
1496
        /* update video clock with pts, if present */
1497
        is->video_clock = pts;
1498
    } else {
1499
        pts = is->video_clock;
1500
    }
1501
    /* update video clock for next frame */
1502
    frame_delay = av_q2d(is->video_st->codec->time_base);
1503
    /* for MPEG2, the frame can be repeated, so we update the
1504
       clock accordingly */
1505
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1506
    is->video_clock += frame_delay;
1507

    
1508
#if defined(DEBUG_SYNC) && 0
1509
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1510
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1511
#endif
1512
    return queue_picture(is, src_frame, pts, pos);
1513
}
1514

    
1515
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1516
{
1517
    int len1, got_picture, i;
1518

    
1519
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1520
        return -1;
1521

    
1522
    if (pkt->data == flush_pkt.data) {
1523
        avcodec_flush_buffers(is->video_st->codec);
1524

    
1525
        SDL_LockMutex(is->pictq_mutex);
1526
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1527
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1528
            is->pictq[i].target_clock= 0;
1529
        }
1530
        while (is->pictq_size && !is->videoq.abort_request) {
1531
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1532
        }
1533
        is->video_current_pos = -1;
1534
        SDL_UnlockMutex(is->pictq_mutex);
1535

    
1536
        is->frame_last_pts = AV_NOPTS_VALUE;
1537
        is->frame_last_delay = 0;
1538
        is->frame_timer = (double)av_gettime() / 1000000.0;
1539
        is->skip_frames = 1;
1540
        is->skip_frames_index = 0;
1541
        return 0;
1542
    }
1543

    
1544
    len1 = avcodec_decode_video2(is->video_st->codec,
1545
                                 frame, &got_picture,
1546
                                 pkt);
1547

    
1548
    if (got_picture) {
1549
        if (decoder_reorder_pts == -1) {
1550
            *pts = frame->best_effort_timestamp;
1551
        } else if (decoder_reorder_pts) {
1552
            *pts = frame->pkt_pts;
1553
        } else {
1554
            *pts = frame->pkt_dts;
1555
        }
1556

    
1557
        if (*pts == AV_NOPTS_VALUE) {
1558
            *pts = 0;
1559
        }
1560

    
1561
        is->skip_frames_index += 1;
1562
        if(is->skip_frames_index >= is->skip_frames){
1563
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1564
            return 1;
1565
        }
1566

    
1567
    }
1568
    return 0;
1569
}
1570

    
1571
#if CONFIG_AVFILTER
1572
typedef struct {
1573
    VideoState *is;
1574
    AVFrame *frame;
1575
    int use_dr1;
1576
} FilterPriv;
1577

    
1578
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1579
{
1580
    AVFilterContext *ctx = codec->opaque;
1581
    AVFilterBufferRef  *ref;
1582
    int perms = AV_PERM_WRITE;
1583
    int i, w, h, stride[4];
1584
    unsigned edge;
1585

    
1586
    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1587

    
1588
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1589
        perms |= AV_PERM_NEG_LINESIZES;
1590

    
1591
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1592
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1593
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1594
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1595
    }
1596
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1597

    
1598
    w = codec->width;
1599
    h = codec->height;
1600

    
1601
    if(av_image_check_size(w, h, 0, codec))
1602
        return -1;
1603

    
1604
    avcodec_align_dimensions2(codec, &w, &h, stride);
1605
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1606
    w += edge << 1;
1607
    h += edge << 1;
1608

    
1609
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1610
        return -1;
1611

    
1612
    ref->video->w = codec->width;
1613
    ref->video->h = codec->height;
1614
    for(i = 0; i < 4; i ++) {
1615
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1616
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1617

    
1618
        if (ref->data[i]) {
1619
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1620
        }
1621
        pic->data[i]     = ref->data[i];
1622
        pic->linesize[i] = ref->linesize[i];
1623
    }
1624
    pic->opaque = ref;
1625
    pic->age    = INT_MAX;
1626
    pic->type   = FF_BUFFER_TYPE_USER;
1627
    pic->reordered_opaque = codec->reordered_opaque;
1628
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1629
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1630
    return 0;
1631
}
1632

    
1633
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1634
{
1635
    memset(pic->data, 0, sizeof(pic->data));
1636
    avfilter_unref_buffer(pic->opaque);
1637
}
1638

    
1639
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1640
{
1641
    AVFilterBufferRef *ref = pic->opaque;
1642

    
1643
    if (pic->data[0] == NULL) {
1644
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1645
        return codec->get_buffer(codec, pic);
1646
    }
1647

    
1648
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1649
        (codec->pix_fmt != ref->format)) {
1650
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1651
        return -1;
1652
    }
1653

    
1654
    pic->reordered_opaque = codec->reordered_opaque;
1655
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1656
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1657
    return 0;
1658
}
1659

    
1660
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1661
{
1662
    FilterPriv *priv = ctx->priv;
1663
    AVCodecContext *codec;
1664
    if(!opaque) return -1;
1665

    
1666
    priv->is = opaque;
1667
    codec    = priv->is->video_st->codec;
1668
    codec->opaque = ctx;
1669
    if((codec->codec->capabilities & CODEC_CAP_DR1)
1670
    ) {
1671
        codec->flags |= CODEC_FLAG_EMU_EDGE;
1672
        priv->use_dr1 = 1;
1673
        codec->get_buffer     = input_get_buffer;
1674
        codec->release_buffer = input_release_buffer;
1675
        codec->reget_buffer   = input_reget_buffer;
1676
        codec->thread_safe_callbacks = 1;
1677
    }
1678

    
1679
    priv->frame = avcodec_alloc_frame();
1680

    
1681
    return 0;
1682
}
1683

    
1684
static void input_uninit(AVFilterContext *ctx)
1685
{
1686
    FilterPriv *priv = ctx->priv;
1687
    av_free(priv->frame);
1688
}
1689

    
1690
static int input_request_frame(AVFilterLink *link)
1691
{
1692
    FilterPriv *priv = link->src->priv;
1693
    AVFilterBufferRef *picref;
1694
    int64_t pts = 0;
1695
    AVPacket pkt;
1696
    int ret;
1697

    
1698
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1699
        av_free_packet(&pkt);
1700
    if (ret < 0)
1701
        return -1;
1702

    
1703
    if(priv->use_dr1) {
1704
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1705
    } else {
1706
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1707
        av_image_copy(picref->data, picref->linesize,
1708
                      priv->frame->data, priv->frame->linesize,
1709
                      picref->format, link->w, link->h);
1710
    }
1711
    av_free_packet(&pkt);
1712

    
1713
    picref->pts = pts;
1714
    picref->pos = pkt.pos;
1715
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1716
    avfilter_start_frame(link, picref);
1717
    avfilter_draw_slice(link, 0, link->h, 1);
1718
    avfilter_end_frame(link);
1719

    
1720
    return 0;
1721
}
1722

    
1723
static int input_query_formats(AVFilterContext *ctx)
1724
{
1725
    FilterPriv *priv = ctx->priv;
1726
    enum PixelFormat pix_fmts[] = {
1727
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1728
    };
1729

    
1730
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1731
    return 0;
1732
}
1733

    
1734
static int input_config_props(AVFilterLink *link)
1735
{
1736
    FilterPriv *priv  = link->src->priv;
1737
    AVCodecContext *c = priv->is->video_st->codec;
1738

    
1739
    link->w = c->width;
1740
    link->h = c->height;
1741
    link->time_base = priv->is->video_st->time_base;
1742

    
1743
    return 0;
1744
}
1745

    
1746
static AVFilter input_filter =
1747
{
1748
    .name      = "ffplay_input",
1749

    
1750
    .priv_size = sizeof(FilterPriv),
1751

    
1752
    .init      = input_init,
1753
    .uninit    = input_uninit,
1754

    
1755
    .query_formats = input_query_formats,
1756

    
1757
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1758
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1759
                                    .type = AVMEDIA_TYPE_VIDEO,
1760
                                    .request_frame = input_request_frame,
1761
                                    .config_props  = input_config_props, },
1762
                                  { .name = NULL }},
1763
};
1764

    
1765
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1766
{
1767
    char sws_flags_str[128];
1768
    int ret;
1769
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1770
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1771
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1772
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1773

    
1774
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1775
                                            NULL, is, graph)) < 0)
1776
        goto the_end;
1777
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1778
                                            NULL, &ffsink_ctx, graph)) < 0)
1779
        goto the_end;
1780

    
1781
    if(vfilters) {
1782
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1783
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1784

    
1785
        outputs->name    = av_strdup("in");
1786
        outputs->filter_ctx = filt_src;
1787
        outputs->pad_idx = 0;
1788
        outputs->next    = NULL;
1789

    
1790
        inputs->name    = av_strdup("out");
1791
        inputs->filter_ctx = filt_out;
1792
        inputs->pad_idx = 0;
1793
        inputs->next    = NULL;
1794

    
1795
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1796
            goto the_end;
1797
        av_freep(&vfilters);
1798
    } else {
1799
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1800
            goto the_end;
1801
    }
1802

    
1803
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1804
        goto the_end;
1805

    
1806
    is->out_video_filter = filt_out;
1807
the_end:
1808
    return ret;
1809
}
1810

    
1811
#endif  /* CONFIG_AVFILTER */
1812

    
1813
static int video_thread(void *arg)
1814
{
1815
    VideoState *is = arg;
1816
    AVFrame *frame= avcodec_alloc_frame();
1817
    int64_t pts_int;
1818
    double pts;
1819
    int ret;
1820

    
1821
#if CONFIG_AVFILTER
1822
    AVFilterGraph *graph = avfilter_graph_alloc();
1823
    AVFilterContext *filt_out = NULL;
1824
    int64_t pos;
1825

    
1826
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1827
        goto the_end;
1828
    filt_out = is->out_video_filter;
1829
#endif
1830

    
1831
    for(;;) {
1832
#if !CONFIG_AVFILTER
1833
        AVPacket pkt;
1834
#else
1835
        AVFilterBufferRef *picref;
1836
        AVRational tb;
1837
#endif
1838
        while (is->paused && !is->videoq.abort_request)
1839
            SDL_Delay(10);
1840
#if CONFIG_AVFILTER
1841
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1842
        if (picref) {
1843
            pts_int = picref->pts;
1844
            pos     = picref->pos;
1845
            frame->opaque = picref;
1846
        }
1847

    
1848
        if (av_cmp_q(tb, is->video_st->time_base)) {
1849
            av_unused int64_t pts1 = pts_int;
1850
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1851
            av_dlog(NULL, "video_thread(): "
1852
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1853
                    tb.num, tb.den, pts1,
1854
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1855
        }
1856
#else
1857
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1858
#endif
1859

    
1860
        if (ret < 0) goto the_end;
1861

    
1862
        if (!ret)
1863
            continue;
1864

    
1865
        pts = pts_int*av_q2d(is->video_st->time_base);
1866

    
1867
#if CONFIG_AVFILTER
1868
        ret = output_picture2(is, frame, pts, pos);
1869
#else
1870
        ret = output_picture2(is, frame, pts,  pkt.pos);
1871
        av_free_packet(&pkt);
1872
#endif
1873
        if (ret < 0)
1874
            goto the_end;
1875

    
1876
        if (step)
1877
            if (cur_stream)
1878
                stream_pause(cur_stream);
1879
    }
1880
 the_end:
1881
#if CONFIG_AVFILTER
1882
    avfilter_graph_free(&graph);
1883
#endif
1884
    av_free(frame);
1885
    return 0;
1886
}
1887

    
1888
static int subtitle_thread(void *arg)
1889
{
1890
    VideoState *is = arg;
1891
    SubPicture *sp;
1892
    AVPacket pkt1, *pkt = &pkt1;
1893
    int len1, got_subtitle;
1894
    double pts;
1895
    int i, j;
1896
    int r, g, b, y, u, v, a;
1897

    
1898
    for(;;) {
1899
        while (is->paused && !is->subtitleq.abort_request) {
1900
            SDL_Delay(10);
1901
        }
1902
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1903
            break;
1904

    
1905
        if(pkt->data == flush_pkt.data){
1906
            avcodec_flush_buffers(is->subtitle_st->codec);
1907
            continue;
1908
        }
1909
        SDL_LockMutex(is->subpq_mutex);
1910
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1911
               !is->subtitleq.abort_request) {
1912
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1913
        }
1914
        SDL_UnlockMutex(is->subpq_mutex);
1915

    
1916
        if (is->subtitleq.abort_request)
1917
            goto the_end;
1918

    
1919
        sp = &is->subpq[is->subpq_windex];
1920

    
1921
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1922
           this packet, if any */
1923
        pts = 0;
1924
        if (pkt->pts != AV_NOPTS_VALUE)
1925
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1926

    
1927
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1928
                                    &sp->sub, &got_subtitle,
1929
                                    pkt);
1930
//            if (len1 < 0)
1931
//                break;
1932
        if (got_subtitle && sp->sub.format == 0) {
1933
            sp->pts = pts;
1934

    
1935
            for (i = 0; i < sp->sub.num_rects; i++)
1936
            {
1937
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1938
                {
1939
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1940
                    y = RGB_TO_Y_CCIR(r, g, b);
1941
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1942
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1943
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1944
                }
1945
            }
1946

    
1947
            /* now we can update the picture count */
1948
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1949
                is->subpq_windex = 0;
1950
            SDL_LockMutex(is->subpq_mutex);
1951
            is->subpq_size++;
1952
            SDL_UnlockMutex(is->subpq_mutex);
1953
        }
1954
        av_free_packet(pkt);
1955
//        if (step)
1956
//            if (cur_stream)
1957
//                stream_pause(cur_stream);
1958
    }
1959
 the_end:
1960
    return 0;
1961
}
1962

    
1963
/* copy samples for viewing in editor window */
1964
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1965
{
1966
    int size, len, channels;
1967

    
1968
    channels = is->audio_st->codec->channels;
1969

    
1970
    size = samples_size / sizeof(short);
1971
    while (size > 0) {
1972
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1973
        if (len > size)
1974
            len = size;
1975
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1976
        samples += len;
1977
        is->sample_array_index += len;
1978
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1979
            is->sample_array_index = 0;
1980
        size -= len;
1981
    }
1982
}
1983

    
1984
/* return the new audio buffer size (samples can be added or deleted
1985
   to get better sync if video or external master clock) */
1986
static int synchronize_audio(VideoState *is, short *samples,
1987
                             int samples_size1, double pts)
1988
{
1989
    int n, samples_size;
1990
    double ref_clock;
1991

    
1992
    n = 2 * is->audio_st->codec->channels;
1993
    samples_size = samples_size1;
1994

    
1995
    /* if not master, then we try to remove or add samples to correct the clock */
1996
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1997
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1998
        double diff, avg_diff;
1999
        int wanted_size, min_size, max_size, nb_samples;
2000

    
2001
        ref_clock = get_master_clock(is);
2002
        diff = get_audio_clock(is) - ref_clock;
2003

    
2004
        if (diff < AV_NOSYNC_THRESHOLD) {
2005
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2006
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2007
                /* not enough measures to have a correct estimate */
2008
                is->audio_diff_avg_count++;
2009
            } else {
2010
                /* estimate the A-V difference */
2011
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2012

    
2013
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2014
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2015
                    nb_samples = samples_size / n;
2016

    
2017
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2018
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2019
                    if (wanted_size < min_size)
2020
                        wanted_size = min_size;
2021
                    else if (wanted_size > max_size)
2022
                        wanted_size = max_size;
2023

    
2024
                    /* add or remove samples to correction the synchro */
2025
                    if (wanted_size < samples_size) {
2026
                        /* remove samples */
2027
                        samples_size = wanted_size;
2028
                    } else if (wanted_size > samples_size) {
2029
                        uint8_t *samples_end, *q;
2030
                        int nb;
2031

    
2032
                        /* add samples */
2033
                        nb = (samples_size - wanted_size);
2034
                        samples_end = (uint8_t *)samples + samples_size - n;
2035
                        q = samples_end + n;
2036
                        while (nb > 0) {
2037
                            memcpy(q, samples_end, n);
2038
                            q += n;
2039
                            nb -= n;
2040
                        }
2041
                        samples_size = wanted_size;
2042
                    }
2043
                }
2044
#if 0
2045
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2046
                       diff, avg_diff, samples_size - samples_size1,
2047
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2048
#endif
2049
            }
2050
        } else {
2051
            /* too big difference : may be initial PTS errors, so
2052
               reset A-V filter */
2053
            is->audio_diff_avg_count = 0;
2054
            is->audio_diff_cum = 0;
2055
        }
2056
    }
2057

    
2058
    return samples_size;
2059
}
2060

    
2061
/* decode one audio frame and returns its uncompressed size */
2062
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2063
{
2064
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2065
    AVPacket *pkt = &is->audio_pkt;
2066
    AVCodecContext *dec= is->audio_st->codec;
2067
    int n, len1, data_size;
2068
    double pts;
2069

    
2070
    for(;;) {
2071
        /* NOTE: the audio packet can contain several frames */
2072
        while (pkt_temp->size > 0) {
2073
            data_size = sizeof(is->audio_buf1);
2074
            len1 = avcodec_decode_audio3(dec,
2075
                                        (int16_t *)is->audio_buf1, &data_size,
2076
                                        pkt_temp);
2077
            if (len1 < 0) {
2078
                /* if error, we skip the frame */
2079
                pkt_temp->size = 0;
2080
                break;
2081
            }
2082

    
2083
            pkt_temp->data += len1;
2084
            pkt_temp->size -= len1;
2085
            if (data_size <= 0)
2086
                continue;
2087

    
2088
            if (dec->sample_fmt != is->audio_src_fmt) {
2089
                if (is->reformat_ctx)
2090
                    av_audio_convert_free(is->reformat_ctx);
2091
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2092
                                                         dec->sample_fmt, 1, NULL, 0);
2093
                if (!is->reformat_ctx) {
2094
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2095
                        av_get_sample_fmt_name(dec->sample_fmt),
2096
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2097
                        break;
2098
                }
2099
                is->audio_src_fmt= dec->sample_fmt;
2100
            }
2101

    
2102
            if (is->reformat_ctx) {
2103
                const void *ibuf[6]= {is->audio_buf1};
2104
                void *obuf[6]= {is->audio_buf2};
2105
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2106
                int ostride[6]= {2};
2107
                int len= data_size/istride[0];
2108
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2109
                    printf("av_audio_convert() failed\n");
2110
                    break;
2111
                }
2112
                is->audio_buf= is->audio_buf2;
2113
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2114
                          remove this legacy cruft */
2115
                data_size= len*2;
2116
            }else{
2117
                is->audio_buf= is->audio_buf1;
2118
            }
2119

    
2120
            /* if no pts, then compute it */
2121
            pts = is->audio_clock;
2122
            *pts_ptr = pts;
2123
            n = 2 * dec->channels;
2124
            is->audio_clock += (double)data_size /
2125
                (double)(n * dec->sample_rate);
2126
#if defined(DEBUG_SYNC)
2127
            {
2128
                static double last_clock;
2129
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2130
                       is->audio_clock - last_clock,
2131
                       is->audio_clock, pts);
2132
                last_clock = is->audio_clock;
2133
            }
2134
#endif
2135
            return data_size;
2136
        }
2137

    
2138
        /* free the current packet */
2139
        if (pkt->data)
2140
            av_free_packet(pkt);
2141

    
2142
        if (is->paused || is->audioq.abort_request) {
2143
            return -1;
2144
        }
2145

    
2146
        /* read next packet */
2147
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2148
            return -1;
2149
        if(pkt->data == flush_pkt.data){
2150
            avcodec_flush_buffers(dec);
2151
            continue;
2152
        }
2153

    
2154
        pkt_temp->data = pkt->data;
2155
        pkt_temp->size = pkt->size;
2156

    
2157
        /* if update the audio clock with the pts */
2158
        if (pkt->pts != AV_NOPTS_VALUE) {
2159
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2160
        }
2161
    }
2162
}
2163

    
2164
/* get the current audio output buffer size, in samples. With SDL, we
2165
   cannot have a precise information */
2166
static int audio_write_get_buf_size(VideoState *is)
2167
{
2168
    return is->audio_buf_size - is->audio_buf_index;
2169
}
2170

    
2171

    
2172
/* prepare a new audio buffer */
2173
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2174
{
2175
    VideoState *is = opaque;
2176
    int audio_size, len1;
2177
    double pts;
2178

    
2179
    audio_callback_time = av_gettime();
2180

    
2181
    while (len > 0) {
2182
        if (is->audio_buf_index >= is->audio_buf_size) {
2183
           audio_size = audio_decode_frame(is, &pts);
2184
           if (audio_size < 0) {
2185
                /* if error, just output silence */
2186
               is->audio_buf = is->audio_buf1;
2187
               is->audio_buf_size = 1024;
2188
               memset(is->audio_buf, 0, is->audio_buf_size);
2189
           } else {
2190
               if (is->show_audio)
2191
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2192
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2193
                                              pts);
2194
               is->audio_buf_size = audio_size;
2195
           }
2196
           is->audio_buf_index = 0;
2197
        }
2198
        len1 = is->audio_buf_size - is->audio_buf_index;
2199
        if (len1 > len)
2200
            len1 = len;
2201
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2202
        len -= len1;
2203
        stream += len1;
2204
        is->audio_buf_index += len1;
2205
    }
2206
}
2207

    
2208
/* open a given stream. Return 0 if OK */
2209
static int stream_component_open(VideoState *is, int stream_index)
2210
{
2211
    AVFormatContext *ic = is->ic;
2212
    AVCodecContext *avctx;
2213
    AVCodec *codec;
2214
    SDL_AudioSpec wanted_spec, spec;
2215

    
2216
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2217
        return -1;
2218
    avctx = ic->streams[stream_index]->codec;
2219

    
2220
    /* prepare audio output */
2221
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2222
        if (avctx->channels > 0) {
2223
            avctx->request_channels = FFMIN(2, avctx->channels);
2224
        } else {
2225
            avctx->request_channels = 2;
2226
        }
2227
    }
2228

    
2229
    codec = avcodec_find_decoder(avctx->codec_id);
2230
    avctx->debug_mv = debug_mv;
2231
    avctx->debug = debug;
2232
    avctx->workaround_bugs = workaround_bugs;
2233
    avctx->lowres = lowres;
2234
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2235
    avctx->idct_algo= idct;
2236
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2237
    avctx->skip_frame= skip_frame;
2238
    avctx->skip_idct= skip_idct;
2239
    avctx->skip_loop_filter= skip_loop_filter;
2240
    avctx->error_recognition= error_recognition;
2241
    avctx->error_concealment= error_concealment;
2242
    avctx->thread_count= thread_count;
2243

    
2244
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2245

    
2246
    if (!codec ||
2247
        avcodec_open(avctx, codec) < 0)
2248
        return -1;
2249

    
2250
    /* prepare audio output */
2251
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2252
        wanted_spec.freq = avctx->sample_rate;
2253
        wanted_spec.format = AUDIO_S16SYS;
2254
        wanted_spec.channels = avctx->channels;
2255
        wanted_spec.silence = 0;
2256
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2257
        wanted_spec.callback = sdl_audio_callback;
2258
        wanted_spec.userdata = is;
2259
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2260
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2261
            return -1;
2262
        }
2263
        is->audio_hw_buf_size = spec.size;
2264
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2265
    }
2266

    
2267
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2268
    switch(avctx->codec_type) {
2269
    case AVMEDIA_TYPE_AUDIO:
2270
        is->audio_stream = stream_index;
2271
        is->audio_st = ic->streams[stream_index];
2272
        is->audio_buf_size = 0;
2273
        is->audio_buf_index = 0;
2274

    
2275
        /* init averaging filter */
2276
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2277
        is->audio_diff_avg_count = 0;
2278
        /* since we do not have a precise anough audio fifo fullness,
2279
           we correct audio sync only if larger than this threshold */
2280
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2281

    
2282
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2283
        packet_queue_init(&is->audioq);
2284
        SDL_PauseAudio(0);
2285
        break;
2286
    case AVMEDIA_TYPE_VIDEO:
2287
        is->video_stream = stream_index;
2288
        is->video_st = ic->streams[stream_index];
2289

    
2290
//        is->video_current_pts_time = av_gettime();
2291

    
2292
        packet_queue_init(&is->videoq);
2293
        is->video_tid = SDL_CreateThread(video_thread, is);
2294
        break;
2295
    case AVMEDIA_TYPE_SUBTITLE:
2296
        is->subtitle_stream = stream_index;
2297
        is->subtitle_st = ic->streams[stream_index];
2298
        packet_queue_init(&is->subtitleq);
2299

    
2300
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2301
        break;
2302
    default:
2303
        break;
2304
    }
2305
    return 0;
2306
}
2307

    
2308
static void stream_component_close(VideoState *is, int stream_index)
2309
{
2310
    AVFormatContext *ic = is->ic;
2311
    AVCodecContext *avctx;
2312

    
2313
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2314
        return;
2315
    avctx = ic->streams[stream_index]->codec;
2316

    
2317
    switch(avctx->codec_type) {
2318
    case AVMEDIA_TYPE_AUDIO:
2319
        packet_queue_abort(&is->audioq);
2320

    
2321
        SDL_CloseAudio();
2322

    
2323
        packet_queue_end(&is->audioq);
2324
        if (is->reformat_ctx)
2325
            av_audio_convert_free(is->reformat_ctx);
2326
        is->reformat_ctx = NULL;
2327
        break;
2328
    case AVMEDIA_TYPE_VIDEO:
2329
        packet_queue_abort(&is->videoq);
2330

    
2331
        /* note: we also signal this mutex to make sure we deblock the
2332
           video thread in all cases */
2333
        SDL_LockMutex(is->pictq_mutex);
2334
        SDL_CondSignal(is->pictq_cond);
2335
        SDL_UnlockMutex(is->pictq_mutex);
2336

    
2337
        SDL_WaitThread(is->video_tid, NULL);
2338

    
2339
        packet_queue_end(&is->videoq);
2340
        break;
2341
    case AVMEDIA_TYPE_SUBTITLE:
2342
        packet_queue_abort(&is->subtitleq);
2343

    
2344
        /* note: we also signal this mutex to make sure we deblock the
2345
           video thread in all cases */
2346
        SDL_LockMutex(is->subpq_mutex);
2347
        is->subtitle_stream_changed = 1;
2348

    
2349
        SDL_CondSignal(is->subpq_cond);
2350
        SDL_UnlockMutex(is->subpq_mutex);
2351

    
2352
        SDL_WaitThread(is->subtitle_tid, NULL);
2353

    
2354
        packet_queue_end(&is->subtitleq);
2355
        break;
2356
    default:
2357
        break;
2358
    }
2359

    
2360
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2361
    avcodec_close(avctx);
2362
    switch(avctx->codec_type) {
2363
    case AVMEDIA_TYPE_AUDIO:
2364
        is->audio_st = NULL;
2365
        is->audio_stream = -1;
2366
        break;
2367
    case AVMEDIA_TYPE_VIDEO:
2368
        is->video_st = NULL;
2369
        is->video_stream = -1;
2370
        break;
2371
    case AVMEDIA_TYPE_SUBTITLE:
2372
        is->subtitle_st = NULL;
2373
        is->subtitle_stream = -1;
2374
        break;
2375
    default:
2376
        break;
2377
    }
2378
}
2379

    
2380
/* since we have only one decoding thread, we can use a global
2381
   variable instead of a thread local variable */
2382
static VideoState *global_video_state;
2383

    
2384
static int decode_interrupt_cb(void)
2385
{
2386
    return (global_video_state && global_video_state->abort_request);
2387
}
2388

    
2389
/* this thread gets the stream from the disk or the network */
2390
static int decode_thread(void *arg)
2391
{
2392
    VideoState *is = arg;
2393
    AVFormatContext *ic;
2394
    int err, i, ret;
2395
    int st_index[AVMEDIA_TYPE_NB];
2396
    AVPacket pkt1, *pkt = &pkt1;
2397
    AVFormatParameters params, *ap = &params;
2398
    int eof=0;
2399
    int pkt_in_play_range = 0;
2400

    
2401
    ic = avformat_alloc_context();
2402

    
2403
    memset(st_index, -1, sizeof(st_index));
2404
    is->video_stream = -1;
2405
    is->audio_stream = -1;
2406
    is->subtitle_stream = -1;
2407

    
2408
    global_video_state = is;
2409
    url_set_interrupt_cb(decode_interrupt_cb);
2410

    
2411
    memset(ap, 0, sizeof(*ap));
2412

    
2413
    ap->prealloced_context = 1;
2414
    ap->width = frame_width;
2415
    ap->height= frame_height;
2416
    ap->time_base= (AVRational){1, 25};
2417
    ap->pix_fmt = frame_pix_fmt;
2418

    
2419
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2420

    
2421
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2422
    if (err < 0) {
2423
        print_error(is->filename, err);
2424
        ret = -1;
2425
        goto fail;
2426
    }
2427
    is->ic = ic;
2428

    
2429
    if(genpts)
2430
        ic->flags |= AVFMT_FLAG_GENPTS;
2431

    
2432
    err = av_find_stream_info(ic);
2433
    if (err < 0) {
2434
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2435
        ret = -1;
2436
        goto fail;
2437
    }
2438
    if(ic->pb)
2439
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2440

    
2441
    if(seek_by_bytes<0)
2442
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2443

    
2444
    /* if seeking requested, we execute it */
2445
    if (start_time != AV_NOPTS_VALUE) {
2446
        int64_t timestamp;
2447

    
2448
        timestamp = start_time;
2449
        /* add the stream start time */
2450
        if (ic->start_time != AV_NOPTS_VALUE)
2451
            timestamp += ic->start_time;
2452
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2453
        if (ret < 0) {
2454
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2455
                    is->filename, (double)timestamp / AV_TIME_BASE);
2456
        }
2457
    }
2458

    
2459
    for (i = 0; i < ic->nb_streams; i++)
2460
        ic->streams[i]->discard = AVDISCARD_ALL;
2461
    if (!video_disable)
2462
        st_index[AVMEDIA_TYPE_VIDEO] =
2463
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2464
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2465
    if (!audio_disable)
2466
        st_index[AVMEDIA_TYPE_AUDIO] =
2467
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2468
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2469
                                st_index[AVMEDIA_TYPE_VIDEO],
2470
                                NULL, 0);
2471
    if (!video_disable)
2472
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2473
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2474
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2475
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2476
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2477
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2478
                                NULL, 0);
2479
    if (show_status) {
2480
        av_dump_format(ic, 0, is->filename, 0);
2481
    }
2482

    
2483
    /* open the streams */
2484
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2485
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2486
    }
2487

    
2488
    ret=-1;
2489
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2490
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2491
    }
2492
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2493
    if(ret<0) {
2494
        if (!display_disable)
2495
            is->show_audio = 2;
2496
    }
2497

    
2498
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2499
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2500
    }
2501

    
2502
    if (is->video_stream < 0 && is->audio_stream < 0) {
2503
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2504
        ret = -1;
2505
        goto fail;
2506
    }
2507

    
2508
    for(;;) {
2509
        if (is->abort_request)
2510
            break;
2511
        if (is->paused != is->last_paused) {
2512
            is->last_paused = is->paused;
2513
            if (is->paused)
2514
                is->read_pause_return= av_read_pause(ic);
2515
            else
2516
                av_read_play(ic);
2517
        }
2518
#if CONFIG_RTSP_DEMUXER
2519
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2520
            /* wait 10 ms to avoid trying to get another packet */
2521
            /* XXX: horrible */
2522
            SDL_Delay(10);
2523
            continue;
2524
        }
2525
#endif
2526
        if (is->seek_req) {
2527
            int64_t seek_target= is->seek_pos;
2528
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2529
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2530
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2531
//      of the seek_pos/seek_rel variables
2532

    
2533
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2534
            if (ret < 0) {
2535
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2536
            }else{
2537
                if (is->audio_stream >= 0) {
2538
                    packet_queue_flush(&is->audioq);
2539
                    packet_queue_put(&is->audioq, &flush_pkt);
2540
                }
2541
                if (is->subtitle_stream >= 0) {
2542
                    packet_queue_flush(&is->subtitleq);
2543
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2544
                }
2545
                if (is->video_stream >= 0) {
2546
                    packet_queue_flush(&is->videoq);
2547
                    packet_queue_put(&is->videoq, &flush_pkt);
2548
                }
2549
            }
2550
            is->seek_req = 0;
2551
            eof= 0;
2552
        }
2553

    
2554
        /* if the queue are full, no need to read more */
2555
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2556
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2557
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2558
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2559
            /* wait 10 ms */
2560
            SDL_Delay(10);
2561
            continue;
2562
        }
2563
        if(eof) {
2564
            if(is->video_stream >= 0){
2565
                av_init_packet(pkt);
2566
                pkt->data=NULL;
2567
                pkt->size=0;
2568
                pkt->stream_index= is->video_stream;
2569
                packet_queue_put(&is->videoq, pkt);
2570
            }
2571
            SDL_Delay(10);
2572
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2573
                if(loop!=1 && (!loop || --loop)){
2574
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2575
                }else if(autoexit){
2576
                    ret=AVERROR_EOF;
2577
                    goto fail;
2578
                }
2579
            }
2580
            eof=0;
2581
            continue;
2582
        }
2583
        ret = av_read_frame(ic, pkt);
2584
        if (ret < 0) {
2585
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2586
                eof=1;
2587
            if (ic->pb && ic->pb->error)
2588
                break;
2589
            SDL_Delay(100); /* wait for user event */
2590
            continue;
2591
        }
2592
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2593
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2594
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2595
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2596
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2597
                <= ((double)duration/1000000);
2598
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2599
            packet_queue_put(&is->audioq, pkt);
2600
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2601
            packet_queue_put(&is->videoq, pkt);
2602
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2603
            packet_queue_put(&is->subtitleq, pkt);
2604
        } else {
2605
            av_free_packet(pkt);
2606
        }
2607
    }
2608
    /* wait until the end */
2609
    while (!is->abort_request) {
2610
        SDL_Delay(100);
2611
    }
2612

    
2613
    ret = 0;
2614
 fail:
2615
    /* disable interrupting */
2616
    global_video_state = NULL;
2617

    
2618
    /* close each stream */
2619
    if (is->audio_stream >= 0)
2620
        stream_component_close(is, is->audio_stream);
2621
    if (is->video_stream >= 0)
2622
        stream_component_close(is, is->video_stream);
2623
    if (is->subtitle_stream >= 0)
2624
        stream_component_close(is, is->subtitle_stream);
2625
    if (is->ic) {
2626
        av_close_input_file(is->ic);
2627
        is->ic = NULL; /* safety */
2628
    }
2629
    url_set_interrupt_cb(NULL);
2630

    
2631
    if (ret != 0) {
2632
        SDL_Event event;
2633

    
2634
        event.type = FF_QUIT_EVENT;
2635
        event.user.data1 = is;
2636
        SDL_PushEvent(&event);
2637
    }
2638
    return 0;
2639
}
2640

    
2641
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2642
{
2643
    VideoState *is;
2644

    
2645
    is = av_mallocz(sizeof(VideoState));
2646
    if (!is)
2647
        return NULL;
2648
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2649
    is->iformat = iformat;
2650
    is->ytop = 0;
2651
    is->xleft = 0;
2652

    
2653
    /* start video display */
2654
    is->pictq_mutex = SDL_CreateMutex();
2655
    is->pictq_cond = SDL_CreateCond();
2656

    
2657
    is->subpq_mutex = SDL_CreateMutex();
2658
    is->subpq_cond = SDL_CreateCond();
2659

    
2660
    is->av_sync_type = av_sync_type;
2661
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2662
    if (!is->parse_tid) {
2663
        av_free(is);
2664
        return NULL;
2665
    }
2666
    return is;
2667
}
2668

    
2669
static void stream_cycle_channel(VideoState *is, int codec_type)
2670
{
2671
    AVFormatContext *ic = is->ic;
2672
    int start_index, stream_index;
2673
    AVStream *st;
2674

    
2675
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2676
        start_index = is->video_stream;
2677
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2678
        start_index = is->audio_stream;
2679
    else
2680
        start_index = is->subtitle_stream;
2681
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2682
        return;
2683
    stream_index = start_index;
2684
    for(;;) {
2685
        if (++stream_index >= is->ic->nb_streams)
2686
        {
2687
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2688
            {
2689
                stream_index = -1;
2690
                goto the_end;
2691
            } else
2692
                stream_index = 0;
2693
        }
2694
        if (stream_index == start_index)
2695
            return;
2696
        st = ic->streams[stream_index];
2697
        if (st->codec->codec_type == codec_type) {
2698
            /* check that parameters are OK */
2699
            switch(codec_type) {
2700
            case AVMEDIA_TYPE_AUDIO:
2701
                if (st->codec->sample_rate != 0 &&
2702
                    st->codec->channels != 0)
2703
                    goto the_end;
2704
                break;
2705
            case AVMEDIA_TYPE_VIDEO:
2706
            case AVMEDIA_TYPE_SUBTITLE:
2707
                goto the_end;
2708
            default:
2709
                break;
2710
            }
2711
        }
2712
    }
2713
 the_end:
2714
    stream_component_close(is, start_index);
2715
    stream_component_open(is, stream_index);
2716
}
2717

    
2718

    
2719
static void toggle_full_screen(void)
2720
{
2721
    is_full_screen = !is_full_screen;
2722
    if (!fs_screen_width) {
2723
        /* use default SDL method */
2724
//        SDL_WM_ToggleFullScreen(screen);
2725
    }
2726
    video_open(cur_stream);
2727
}
2728

    
2729
static void toggle_pause(void)
2730
{
2731
    if (cur_stream)
2732
        stream_pause(cur_stream);
2733
    step = 0;
2734
}
2735

    
2736
static void step_to_next_frame(void)
2737
{
2738
    if (cur_stream) {
2739
        /* if the stream is paused unpause it, then step */
2740
        if (cur_stream->paused)
2741
            stream_pause(cur_stream);
2742
    }
2743
    step = 1;
2744
}
2745

    
2746
static void toggle_audio_display(void)
2747
{
2748
    if (cur_stream) {
2749
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2750
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2751
        fill_rectangle(screen,
2752
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2753
                    bgcolor);
2754
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2755
    }
2756
}
2757

    
2758
/* handle an event sent by the GUI */
2759
static void event_loop(void)
2760
{
2761
    SDL_Event event;
2762
    double incr, pos, frac;
2763

    
2764
    for(;;) {
2765
        double x;
2766
        SDL_WaitEvent(&event);
2767
        switch(event.type) {
2768
        case SDL_KEYDOWN:
2769
            if (exit_on_keydown) {
2770
                do_exit();
2771
                break;
2772
            }
2773
            switch(event.key.keysym.sym) {
2774
            case SDLK_ESCAPE:
2775
            case SDLK_q:
2776
                do_exit();
2777
                break;
2778
            case SDLK_f:
2779
                toggle_full_screen();
2780
                break;
2781
            case SDLK_p:
2782
            case SDLK_SPACE:
2783
                toggle_pause();
2784
                break;
2785
            case SDLK_s: //S: Step to next frame
2786
                step_to_next_frame();
2787
                break;
2788
            case SDLK_a:
2789
                if (cur_stream)
2790
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2791
                break;
2792
            case SDLK_v:
2793
                if (cur_stream)
2794
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2795
                break;
2796
            case SDLK_t:
2797
                if (cur_stream)
2798
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2799
                break;
2800
            case SDLK_w:
2801
                toggle_audio_display();
2802
                break;
2803
            case SDLK_LEFT:
2804
                incr = -10.0;
2805
                goto do_seek;
2806
            case SDLK_RIGHT:
2807
                incr = 10.0;
2808
                goto do_seek;
2809
            case SDLK_UP:
2810
                incr = 60.0;
2811
                goto do_seek;
2812
            case SDLK_DOWN:
2813
                incr = -60.0;
2814
            do_seek:
2815
                if (cur_stream) {
2816
                    if (seek_by_bytes) {
2817
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2818
                            pos= cur_stream->video_current_pos;
2819
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2820
                            pos= cur_stream->audio_pkt.pos;
2821
                        }else
2822
                            pos = avio_tell(cur_stream->ic->pb);
2823
                        if (cur_stream->ic->bit_rate)
2824
                            incr *= cur_stream->ic->bit_rate / 8.0;
2825
                        else
2826
                            incr *= 180000.0;
2827
                        pos += incr;
2828
                        stream_seek(cur_stream, pos, incr, 1);
2829
                    } else {
2830
                        pos = get_master_clock(cur_stream);
2831
                        pos += incr;
2832
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2833
                    }
2834
                }
2835
                break;
2836
            default:
2837
                break;
2838
            }
2839
            break;
2840
        case SDL_MOUSEBUTTONDOWN:
2841
            if (exit_on_mousedown) {
2842
                do_exit();
2843
                break;
2844
            }
2845
        case SDL_MOUSEMOTION:
2846
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2847
                x= event.button.x;
2848
            }else{
2849
                if(event.motion.state != SDL_PRESSED)
2850
                    break;
2851
                x= event.motion.x;
2852
            }
2853
            if (cur_stream) {
2854
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2855
                    uint64_t size=  avio_size(cur_stream->ic->pb);
2856
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2857
                }else{
2858
                    int64_t ts;
2859
                    int ns, hh, mm, ss;
2860
                    int tns, thh, tmm, tss;
2861
                    tns = cur_stream->ic->duration/1000000LL;
2862
                    thh = tns/3600;
2863
                    tmm = (tns%3600)/60;
2864
                    tss = (tns%60);
2865
                    frac = x/cur_stream->width;
2866
                    ns = frac*tns;
2867
                    hh = ns/3600;
2868
                    mm = (ns%3600)/60;
2869
                    ss = (ns%60);
2870
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2871
                            hh, mm, ss, thh, tmm, tss);
2872
                    ts = frac*cur_stream->ic->duration;
2873
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2874
                        ts += cur_stream->ic->start_time;
2875
                    stream_seek(cur_stream, ts, 0, 0);
2876
                }
2877
            }
2878
            break;
2879
        case SDL_VIDEORESIZE:
2880
            if (cur_stream) {
2881
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2882
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2883
                screen_width = cur_stream->width = event.resize.w;
2884
                screen_height= cur_stream->height= event.resize.h;
2885
            }
2886
            break;
2887
        case SDL_QUIT:
2888
        case FF_QUIT_EVENT:
2889
            do_exit();
2890
            break;
2891
        case FF_ALLOC_EVENT:
2892
            video_open(event.user.data1);
2893
            alloc_picture(event.user.data1);
2894
            break;
2895
        case FF_REFRESH_EVENT:
2896
            video_refresh_timer(event.user.data1);
2897
            cur_stream->refresh=0;
2898
            break;
2899
        default:
2900
            break;
2901
        }
2902
    }
2903
}
2904

    
2905
static void opt_frame_size(const char *arg)
2906
{
2907
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2908
        fprintf(stderr, "Incorrect frame size\n");
2909
        exit(1);
2910
    }
2911
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2912
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2913
        exit(1);
2914
    }
2915
}
2916

    
2917
static int opt_width(const char *opt, const char *arg)
2918
{
2919
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2920
    return 0;
2921
}
2922

    
2923
static int opt_height(const char *opt, const char *arg)
2924
{
2925
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2926
    return 0;
2927
}
2928

    
2929
static void opt_format(const char *arg)
2930
{
2931
    file_iformat = av_find_input_format(arg);
2932
    if (!file_iformat) {
2933
        fprintf(stderr, "Unknown input format: %s\n", arg);
2934
        exit(1);
2935
    }
2936
}
2937

    
2938
static void opt_frame_pix_fmt(const char *arg)
2939
{
2940
    frame_pix_fmt = av_get_pix_fmt(arg);
2941
}
2942

    
2943
static int opt_sync(const char *opt, const char *arg)
2944
{
2945
    if (!strcmp(arg, "audio"))
2946
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2947
    else if (!strcmp(arg, "video"))
2948
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2949
    else if (!strcmp(arg, "ext"))
2950
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2951
    else {
2952
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2953
        exit(1);
2954
    }
2955
    return 0;
2956
}
2957

    
2958
static int opt_seek(const char *opt, const char *arg)
2959
{
2960
    start_time = parse_time_or_die(opt, arg, 1);
2961
    return 0;
2962
}
2963

    
2964
static int opt_duration(const char *opt, const char *arg)
2965
{
2966
    duration = parse_time_or_die(opt, arg, 1);
2967
    return 0;
2968
}
2969

    
2970
static int opt_debug(const char *opt, const char *arg)
2971
{
2972
    av_log_set_level(99);
2973
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2974
    return 0;
2975
}
2976

    
2977
static int opt_vismv(const char *opt, const char *arg)
2978
{
2979
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2980
    return 0;
2981
}
2982

    
2983
static int opt_thread_count(const char *opt, const char *arg)
2984
{
2985
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2986
#if !HAVE_THREADS
2987
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2988
#endif
2989
    return 0;
2990
}
2991

    
2992
static const OptionDef options[] = {
2993
#include "cmdutils_common_opts.h"
2994
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2995
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2996
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2997
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2998
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2999
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3000
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3001
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3002
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3003
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3004
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3005
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3006
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3007
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3008
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3009
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3010
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3011
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3012
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3013
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3014
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3015
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3016
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3017
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3018
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3019
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3020
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3021
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3022
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3023
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3024
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3025
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3026
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3027
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3028
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3029
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3030
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3031
#if CONFIG_AVFILTER
3032
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3033
#endif
3034
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3035
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3036
    { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3037
    { NULL, },
3038
};
3039

    
3040
static void show_usage(void)
3041
{
3042
    printf("Simple media player\n");
3043
    printf("usage: ffplay [options] input_file\n");
3044
    printf("\n");
3045
}
3046

    
3047
static void show_help(void)
3048
{
3049
    av_log_set_callback(log_callback_help);
3050
    show_usage();
3051
    show_help_options(options, "Main options:\n",
3052
                      OPT_EXPERT, 0);
3053
    show_help_options(options, "\nAdvanced options:\n",
3054
                      OPT_EXPERT, OPT_EXPERT);
3055
    printf("\n");
3056
    av_opt_show2(avcodec_opts[0], NULL,
3057
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3058
    printf("\n");
3059
    av_opt_show2(avformat_opts, NULL,
3060
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3061
#if !CONFIG_AVFILTER
3062
    printf("\n");
3063
    av_opt_show2(sws_opts, NULL,
3064
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3065
#endif
3066
    printf("\nWhile playing:\n"
3067
           "q, ESC              quit\n"
3068
           "f                   toggle full screen\n"
3069
           "p, SPC              pause\n"
3070
           "a                   cycle audio channel\n"
3071
           "v                   cycle video channel\n"
3072
           "t                   cycle subtitle channel\n"
3073
           "w                   show audio waves\n"
3074
           "s                   activate frame-step mode\n"
3075
           "left/right          seek backward/forward 10 seconds\n"
3076
           "down/up             seek backward/forward 1 minute\n"
3077
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3078
           );
3079
}
3080

    
3081
static void opt_input_file(const char *filename)
3082
{
3083
    if (input_filename) {
3084
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3085
                filename, input_filename);
3086
        exit(1);
3087
    }
3088
    if (!strcmp(filename, "-"))
3089
        filename = "pipe:";
3090
    input_filename = filename;
3091
}
3092

    
3093
/* Called from the main */
3094
int main(int argc, char **argv)
3095
{
3096
    int flags;
3097

    
3098
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3099

    
3100
    /* register all codecs, demux and protocols */
3101
    avcodec_register_all();
3102
#if CONFIG_AVDEVICE
3103
    avdevice_register_all();
3104
#endif
3105
#if CONFIG_AVFILTER
3106
    avfilter_register_all();
3107
#endif
3108
    av_register_all();
3109

    
3110
    init_opts();
3111

    
3112
    show_banner();
3113

    
3114
    parse_options(argc, argv, options, opt_input_file);
3115

    
3116
    if (!input_filename) {
3117
        show_usage();
3118
        fprintf(stderr, "An input file must be specified\n");
3119
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3120
        exit(1);
3121
    }
3122

    
3123
    if (display_disable) {
3124
        video_disable = 1;
3125
    }
3126
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3127
#if !defined(__MINGW32__) && !defined(__APPLE__)
3128
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3129
#endif
3130
    if (SDL_Init (flags)) {
3131
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3132
        exit(1);
3133
    }
3134

    
3135
    if (!display_disable) {
3136
#if HAVE_SDL_VIDEO_SIZE
3137
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3138
        fs_screen_width = vi->current_w;
3139
        fs_screen_height = vi->current_h;
3140
#endif
3141
    }
3142

    
3143
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3144
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3145
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3146

    
3147
    av_init_packet(&flush_pkt);
3148
    flush_pkt.data= "FLUSH";
3149

    
3150
    cur_stream = stream_open(input_filename, file_iformat);
3151

    
3152
    event_loop();
3153

    
3154
    /* never returns */
3155

    
3156
    return 0;
3157
}