Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 5d4890d7

History | View | Annotate | Download (99.3 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <inttypes.h>
24
#include <math.h>
25
#include <limits.h>
26
#include "libavutil/avstring.h"
27
#include "libavutil/colorspace.h"
28
#include "libavutil/pixdesc.h"
29
#include "libavcore/parseutils.h"
30
#include "libavformat/avformat.h"
31
#include "libavdevice/avdevice.h"
32
#include "libswscale/swscale.h"
33
#include "libavcodec/audioconvert.h"
34
#include "libavcodec/opt.h"
35
#include "libavcodec/avfft.h"
36

    
37
#if CONFIG_AVFILTER
38
# include "libavfilter/avfilter.h"
39
# include "libavfilter/avfiltergraph.h"
40
# include "libavfilter/graphparser.h"
41
#endif
42

    
43
#include "cmdutils.h"
44

    
45
#include <SDL.h>
46
#include <SDL_thread.h>
47

    
48
#ifdef __MINGW32__
49
#undef main /* We don't want SDL to override our main() */
50
#endif
51

    
52
#include <unistd.h>
53
#include <assert.h>
54

    
55
const char program_name[] = "FFplay";
56
const int program_birth_year = 2003;
57

    
58
//#define DEBUG_SYNC
59

    
60
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62
#define MIN_FRAMES 5
63

    
64
/* SDL audio buffer size, in samples. Should be small to have precise
65
   A/V sync as SDL does not have hardware buffer fullness info. */
66
#define SDL_AUDIO_BUFFER_SIZE 1024
67

    
68
/* no AV sync correction is done if below the AV sync threshold */
69
#define AV_SYNC_THRESHOLD 0.01
70
/* no AV correction is done if too big error */
71
#define AV_NOSYNC_THRESHOLD 10.0
72

    
73
#define FRAME_SKIP_FACTOR 0.05
74

    
75
/* maximum audio speed change to get correct sync */
76
#define SAMPLE_CORRECTION_PERCENT_MAX 10
77

    
78
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79
#define AUDIO_DIFF_AVG_NB   20
80

    
81
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82
#define SAMPLE_ARRAY_SIZE (2*65536)
83

    
84
static int sws_flags = SWS_BICUBIC;
85

    
86
typedef struct PacketQueue {
87
    AVPacketList *first_pkt, *last_pkt;
88
    int nb_packets;
89
    int size;
90
    int abort_request;
91
    SDL_mutex *mutex;
92
    SDL_cond *cond;
93
} PacketQueue;
94

    
95
#define VIDEO_PICTURE_QUEUE_SIZE 2
96
#define SUBPICTURE_QUEUE_SIZE 4
97

    
98
typedef struct VideoPicture {
99
    double pts;                                  ///<presentation time stamp for this picture
100
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101
    int64_t pos;                                 ///<byte position in file
102
    SDL_Overlay *bmp;
103
    int width, height; /* source height & width */
104
    int allocated;
105
    enum PixelFormat pix_fmt;
106

    
107
#if CONFIG_AVFILTER
108
    AVFilterBufferRef *picref;
109
#endif
110
} VideoPicture;
111

    
112
typedef struct SubPicture {
113
    double pts; /* presentation time stamp for this picture */
114
    AVSubtitle sub;
115
} SubPicture;
116

    
117
enum {
118
    AV_SYNC_AUDIO_MASTER, /* default choice */
119
    AV_SYNC_VIDEO_MASTER,
120
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
121
};
122

    
123
typedef struct VideoState {
124
    SDL_Thread *parse_tid;
125
    SDL_Thread *video_tid;
126
    SDL_Thread *refresh_tid;
127
    AVInputFormat *iformat;
128
    int no_background;
129
    int abort_request;
130
    int paused;
131
    int last_paused;
132
    int seek_req;
133
    int seek_flags;
134
    int64_t seek_pos;
135
    int64_t seek_rel;
136
    int read_pause_return;
137
    AVFormatContext *ic;
138
    int dtg_active_format;
139

    
140
    int audio_stream;
141

    
142
    int av_sync_type;
143
    double external_clock; /* external clock base */
144
    int64_t external_clock_time;
145

    
146
    double audio_clock;
147
    double audio_diff_cum; /* used for AV difference average computation */
148
    double audio_diff_avg_coef;
149
    double audio_diff_threshold;
150
    int audio_diff_avg_count;
151
    AVStream *audio_st;
152
    PacketQueue audioq;
153
    int audio_hw_buf_size;
154
    /* samples output by the codec. we reserve more space for avsync
155
       compensation */
156
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158
    uint8_t *audio_buf;
159
    unsigned int audio_buf_size; /* in bytes */
160
    int audio_buf_index; /* in bytes */
161
    AVPacket audio_pkt_temp;
162
    AVPacket audio_pkt;
163
    enum SampleFormat audio_src_fmt;
164
    AVAudioConvert *reformat_ctx;
165

    
166
    int show_audio; /* if true, display audio samples */
167
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
168
    int sample_array_index;
169
    int last_i_start;
170
    RDFTContext *rdft;
171
    int rdft_bits;
172
    FFTSample *rdft_data;
173
    int xpos;
174

    
175
    SDL_Thread *subtitle_tid;
176
    int subtitle_stream;
177
    int subtitle_stream_changed;
178
    AVStream *subtitle_st;
179
    PacketQueue subtitleq;
180
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181
    int subpq_size, subpq_rindex, subpq_windex;
182
    SDL_mutex *subpq_mutex;
183
    SDL_cond *subpq_cond;
184

    
185
    double frame_timer;
186
    double frame_last_pts;
187
    double frame_last_delay;
188
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189
    int video_stream;
190
    AVStream *video_st;
191
    PacketQueue videoq;
192
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194
    int64_t video_current_pos;                   ///<current displayed file pos
195
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196
    int pictq_size, pictq_rindex, pictq_windex;
197
    SDL_mutex *pictq_mutex;
198
    SDL_cond *pictq_cond;
199
#if !CONFIG_AVFILTER
200
    struct SwsContext *img_convert_ctx;
201
#endif
202

    
203
    //    QETimer *video_timer;
204
    char filename[1024];
205
    int width, height, xleft, ytop;
206

    
207
    int64_t faulty_pts;
208
    int64_t faulty_dts;
209
    int64_t last_dts_for_fault_detection;
210
    int64_t last_pts_for_fault_detection;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714
        /* if an active format is indicated, then it overrides the
715
           mpeg format */
716
#if 0
717
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
719
            printf("dtg_active_format=%d\n", is->dtg_active_format);
720
        }
721
#endif
722
#if 0
723
        switch(is->video_st->codec->dtg_active_format) {
724
        case FF_DTG_AFD_SAME:
725
        default:
726
            /* nothing to do */
727
            break;
728
        case FF_DTG_AFD_4_3:
729
            aspect_ratio = 4.0 / 3.0;
730
            break;
731
        case FF_DTG_AFD_16_9:
732
            aspect_ratio = 16.0 / 9.0;
733
            break;
734
        case FF_DTG_AFD_14_9:
735
            aspect_ratio = 14.0 / 9.0;
736
            break;
737
        case FF_DTG_AFD_4_3_SP_14_9:
738
            aspect_ratio = 14.0 / 9.0;
739
            break;
740
        case FF_DTG_AFD_16_9_SP_14_9:
741
            aspect_ratio = 14.0 / 9.0;
742
            break;
743
        case FF_DTG_AFD_SP_4_3:
744
            aspect_ratio = 4.0 / 3.0;
745
            break;
746
        }
747
#endif
748

    
749
        if (is->subtitle_st)
750
        {
751
            if (is->subpq_size > 0)
752
            {
753
                sp = &is->subpq[is->subpq_rindex];
754

    
755
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756
                {
757
                    SDL_LockYUVOverlay (vp->bmp);
758

    
759
                    pict.data[0] = vp->bmp->pixels[0];
760
                    pict.data[1] = vp->bmp->pixels[2];
761
                    pict.data[2] = vp->bmp->pixels[1];
762

    
763
                    pict.linesize[0] = vp->bmp->pitches[0];
764
                    pict.linesize[1] = vp->bmp->pitches[2];
765
                    pict.linesize[2] = vp->bmp->pitches[1];
766

    
767
                    for (i = 0; i < sp->sub.num_rects; i++)
768
                        blend_subrect(&pict, sp->sub.rects[i],
769
                                      vp->bmp->w, vp->bmp->h);
770

    
771
                    SDL_UnlockYUVOverlay (vp->bmp);
772
                }
773
            }
774
        }
775

    
776

    
777
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
778
        height = is->height;
779
        width = ((int)rint(height * aspect_ratio)) & ~1;
780
        if (width > is->width) {
781
            width = is->width;
782
            height = ((int)rint(width / aspect_ratio)) & ~1;
783
        }
784
        x = (is->width - width) / 2;
785
        y = (is->height - height) / 2;
786
        if (!is->no_background) {
787
            /* fill the background */
788
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789
        } else {
790
            is->no_background = 0;
791
        }
792
        rect.x = is->xleft + x;
793
        rect.y = is->ytop  + y;
794
        rect.w = width;
795
        rect.h = height;
796
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
797
    } else {
798
#if 0
799
        fill_rectangle(screen,
800
                       is->xleft, is->ytop, is->width, is->height,
801
                       QERGB(0x00, 0x00, 0x00));
802
#endif
803
    }
804
}
805

    
806
static inline int compute_mod(int a, int b)
807
{
808
    a = a % b;
809
    if (a >= 0)
810
        return a;
811
    else
812
        return a + b;
813
}
814

    
815
static void video_audio_display(VideoState *s)
816
{
817
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818
    int ch, channels, h, h2, bgcolor, fgcolor;
819
    int16_t time_diff;
820
    int rdft_bits, nb_freq;
821

    
822
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823
        ;
824
    nb_freq= 1<<(rdft_bits-1);
825

    
826
    /* compute display index : center on currently output samples */
827
    channels = s->audio_st->codec->channels;
828
    nb_display_channels = channels;
829
    if (!s->paused) {
830
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831
        n = 2 * channels;
832
        delay = audio_write_get_buf_size(s);
833
        delay /= n;
834

    
835
        /* to be more precise, we take into account the time spent since
836
           the last buffer computation */
837
        if (audio_callback_time) {
838
            time_diff = av_gettime() - audio_callback_time;
839
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840
        }
841

    
842
        delay += 2*data_used;
843
        if (delay < data_used)
844
            delay = data_used;
845

    
846
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847
        if(s->show_audio==1){
848
            h= INT_MIN;
849
            for(i=0; i<1000; i+=channels){
850
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851
                int a= s->sample_array[idx];
852
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855
                int score= a-d;
856
                if(h<score && (b^c)<0){
857
                    h= score;
858
                    i_start= idx;
859
                }
860
            }
861
        }
862

    
863
        s->last_i_start = i_start;
864
    } else {
865
        i_start = s->last_i_start;
866
    }
867

    
868
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869
    if(s->show_audio==1){
870
        fill_rectangle(screen,
871
                       s->xleft, s->ytop, s->width, s->height,
872
                       bgcolor);
873

    
874
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875

    
876
        /* total height for one channel */
877
        h = s->height / nb_display_channels;
878
        /* graph height / 2 */
879
        h2 = (h * 9) / 20;
880
        for(ch = 0;ch < nb_display_channels; ch++) {
881
            i = i_start + ch;
882
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883
            for(x = 0; x < s->width; x++) {
884
                y = (s->sample_array[i] * h2) >> 15;
885
                if (y < 0) {
886
                    y = -y;
887
                    ys = y1 - y;
888
                } else {
889
                    ys = y1;
890
                }
891
                fill_rectangle(screen,
892
                               s->xleft + x, ys, 1, y,
893
                               fgcolor);
894
                i += channels;
895
                if (i >= SAMPLE_ARRAY_SIZE)
896
                    i -= SAMPLE_ARRAY_SIZE;
897
            }
898
        }
899

    
900
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901

    
902
        for(ch = 1;ch < nb_display_channels; ch++) {
903
            y = s->ytop + ch * h;
904
            fill_rectangle(screen,
905
                           s->xleft, y, s->width, 1,
906
                           fgcolor);
907
        }
908
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909
    }else{
910
        nb_display_channels= FFMIN(nb_display_channels, 2);
911
        if(rdft_bits != s->rdft_bits){
912
            av_rdft_end(s->rdft);
913
            av_free(s->rdft_data);
914
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915
            s->rdft_bits= rdft_bits;
916
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917
        }
918
        {
919
            FFTSample *data[2];
920
            for(ch = 0;ch < nb_display_channels; ch++) {
921
                data[ch] = s->rdft_data + 2*nb_freq*ch;
922
                i = i_start + ch;
923
                for(x = 0; x < 2*nb_freq; x++) {
924
                    double w= (x-nb_freq)*(1.0/nb_freq);
925
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
926
                    i += channels;
927
                    if (i >= SAMPLE_ARRAY_SIZE)
928
                        i -= SAMPLE_ARRAY_SIZE;
929
                }
930
                av_rdft_calc(s->rdft, data[ch]);
931
            }
932
            //least efficient way to do this, we should of course directly access it but its more than fast enough
933
            for(y=0; y<s->height; y++){
934
                double w= 1/sqrt(nb_freq);
935
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
938
                a= FFMIN(a,255);
939
                b= FFMIN(b,255);
940
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941

    
942
                fill_rectangle(screen,
943
                            s->xpos, s->height-y, 1, 1,
944
                            fgcolor);
945
            }
946
        }
947
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948
        s->xpos++;
949
        if(s->xpos >= s->width)
950
            s->xpos= s->xleft;
951
    }
952
}
953

    
954
static int video_open(VideoState *is){
955
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956
    int w,h;
957

    
958
    if(is_full_screen) flags |= SDL_FULLSCREEN;
959
    else               flags |= SDL_RESIZABLE;
960

    
961
    if (is_full_screen && fs_screen_width) {
962
        w = fs_screen_width;
963
        h = fs_screen_height;
964
    } else if(!is_full_screen && screen_width){
965
        w = screen_width;
966
        h = screen_height;
967
#if CONFIG_AVFILTER
968
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969
        w = is->out_video_filter->inputs[0]->w;
970
        h = is->out_video_filter->inputs[0]->h;
971
#else
972
    }else if (is->video_st && is->video_st->codec->width){
973
        w = is->video_st->codec->width;
974
        h = is->video_st->codec->height;
975
#endif
976
    } else {
977
        w = 640;
978
        h = 480;
979
    }
980
    if(screen && is->width == screen->w && screen->w == w
981
       && is->height== screen->h && screen->h == h)
982
        return 0;
983

    
984
#ifndef __APPLE__
985
    screen = SDL_SetVideoMode(w, h, 0, flags);
986
#else
987
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988
    screen = SDL_SetVideoMode(w, h, 24, flags);
989
#endif
990
    if (!screen) {
991
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
992
        return -1;
993
    }
994
    if (!window_title)
995
        window_title = input_filename;
996
    SDL_WM_SetCaption(window_title, window_title);
997

    
998
    is->width = screen->w;
999
    is->height = screen->h;
1000

    
1001
    return 0;
1002
}
1003

    
1004
/* display the current picture, if any */
1005
static void video_display(VideoState *is)
1006
{
1007
    if(!screen)
1008
        video_open(cur_stream);
1009
    if (is->audio_st && is->show_audio)
1010
        video_audio_display(is);
1011
    else if (is->video_st)
1012
        video_image_display(is);
1013
}
1014

    
1015
static int refresh_thread(void *opaque)
1016
{
1017
    VideoState *is= opaque;
1018
    while(!is->abort_request){
1019
    SDL_Event event;
1020
    event.type = FF_REFRESH_EVENT;
1021
    event.user.data1 = opaque;
1022
        if(!is->refresh){
1023
            is->refresh=1;
1024
    SDL_PushEvent(&event);
1025
        }
1026
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027
    }
1028
    return 0;
1029
}
1030

    
1031
/* get the current audio clock value */
1032
static double get_audio_clock(VideoState *is)
1033
{
1034
    double pts;
1035
    int hw_buf_size, bytes_per_sec;
1036
    pts = is->audio_clock;
1037
    hw_buf_size = audio_write_get_buf_size(is);
1038
    bytes_per_sec = 0;
1039
    if (is->audio_st) {
1040
        bytes_per_sec = is->audio_st->codec->sample_rate *
1041
            2 * is->audio_st->codec->channels;
1042
    }
1043
    if (bytes_per_sec)
1044
        pts -= (double)hw_buf_size / bytes_per_sec;
1045
    return pts;
1046
}
1047

    
1048
/* get the current video clock value */
1049
static double get_video_clock(VideoState *is)
1050
{
1051
    if (is->paused) {
1052
        return is->video_current_pts;
1053
    } else {
1054
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055
    }
1056
}
1057

    
1058
/* get the current external clock value */
1059
static double get_external_clock(VideoState *is)
1060
{
1061
    int64_t ti;
1062
    ti = av_gettime();
1063
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064
}
1065

    
1066
/* get the current master clock value */
1067
static double get_master_clock(VideoState *is)
1068
{
1069
    double val;
1070

    
1071
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072
        if (is->video_st)
1073
            val = get_video_clock(is);
1074
        else
1075
            val = get_audio_clock(is);
1076
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077
        if (is->audio_st)
1078
            val = get_audio_clock(is);
1079
        else
1080
            val = get_video_clock(is);
1081
    } else {
1082
        val = get_external_clock(is);
1083
    }
1084
    return val;
1085
}
1086

    
1087
/* seek in the stream */
1088
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089
{
1090
    if (!is->seek_req) {
1091
        is->seek_pos = pos;
1092
        is->seek_rel = rel;
1093
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094
        if (seek_by_bytes)
1095
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1096
        is->seek_req = 1;
1097
    }
1098
}
1099

    
1100
/* pause or resume the video */
1101
static void stream_pause(VideoState *is)
1102
{
1103
    if (is->paused) {
1104
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105
        if(is->read_pause_return != AVERROR(ENOSYS)){
1106
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107
        }
1108
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109
    }
1110
    is->paused = !is->paused;
1111
}
1112

    
1113
static double compute_target_time(double frame_current_pts, VideoState *is)
1114
{
1115
    double delay, sync_threshold, diff;
1116

    
1117
    /* compute nominal delay */
1118
    delay = frame_current_pts - is->frame_last_pts;
1119
    if (delay <= 0 || delay >= 10.0) {
1120
        /* if incorrect delay, use previous one */
1121
        delay = is->frame_last_delay;
1122
    } else {
1123
        is->frame_last_delay = delay;
1124
    }
1125
    is->frame_last_pts = frame_current_pts;
1126

    
1127
    /* update delay to follow master synchronisation source */
1128
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130
        /* if video is slave, we try to correct big delays by
1131
           duplicating or deleting a frame */
1132
        diff = get_video_clock(is) - get_master_clock(is);
1133

    
1134
        /* skip or repeat frame. We take into account the
1135
           delay to compute the threshold. I still don't know
1136
           if it is the best guess */
1137
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139
            if (diff <= -sync_threshold)
1140
                delay = 0;
1141
            else if (diff >= sync_threshold)
1142
                delay = 2 * delay;
1143
        }
1144
    }
1145
    is->frame_timer += delay;
1146
#if defined(DEBUG_SYNC)
1147
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148
            delay, actual_delay, frame_current_pts, -diff);
1149
#endif
1150

    
1151
    return is->frame_timer;
1152
}
1153

    
1154
/* called to display each frame */
1155
static void video_refresh_timer(void *opaque)
1156
{
1157
    VideoState *is = opaque;
1158
    VideoPicture *vp;
1159

    
1160
    SubPicture *sp, *sp2;
1161

    
1162
    if (is->video_st) {
1163
retry:
1164
        if (is->pictq_size == 0) {
1165
            //nothing to do, no picture to display in the que
1166
        } else {
1167
            double time= av_gettime()/1000000.0;
1168
            double next_target;
1169
            /* dequeue the picture */
1170
            vp = &is->pictq[is->pictq_rindex];
1171

    
1172
            if(time < vp->target_clock)
1173
                return;
1174
            /* update current video pts */
1175
            is->video_current_pts = vp->pts;
1176
            is->video_current_pts_drift = is->video_current_pts - time;
1177
            is->video_current_pos = vp->pos;
1178
            if(is->pictq_size > 1){
1179
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180
                assert(nextvp->target_clock >= vp->target_clock);
1181
                next_target= nextvp->target_clock;
1182
            }else{
1183
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184
            }
1185
            if(framedrop && time > next_target){
1186
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187
                if(is->pictq_size > 1 || time > next_target + 0.5){
1188
                    /* update queue size and signal for next picture */
1189
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190
                        is->pictq_rindex = 0;
1191

    
1192
                    SDL_LockMutex(is->pictq_mutex);
1193
                    is->pictq_size--;
1194
                    SDL_CondSignal(is->pictq_cond);
1195
                    SDL_UnlockMutex(is->pictq_mutex);
1196
                    goto retry;
1197
                }
1198
            }
1199

    
1200
            if(is->subtitle_st) {
1201
                if (is->subtitle_stream_changed) {
1202
                    SDL_LockMutex(is->subpq_mutex);
1203

    
1204
                    while (is->subpq_size) {
1205
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1206

    
1207
                        /* update queue size and signal for next picture */
1208
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209
                            is->subpq_rindex = 0;
1210

    
1211
                        is->subpq_size--;
1212
                    }
1213
                    is->subtitle_stream_changed = 0;
1214

    
1215
                    SDL_CondSignal(is->subpq_cond);
1216
                    SDL_UnlockMutex(is->subpq_mutex);
1217
                } else {
1218
                    if (is->subpq_size > 0) {
1219
                        sp = &is->subpq[is->subpq_rindex];
1220

    
1221
                        if (is->subpq_size > 1)
1222
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223
                        else
1224
                            sp2 = NULL;
1225

    
1226
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228
                        {
1229
                            free_subpicture(sp);
1230

    
1231
                            /* update queue size and signal for next picture */
1232
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233
                                is->subpq_rindex = 0;
1234

    
1235
                            SDL_LockMutex(is->subpq_mutex);
1236
                            is->subpq_size--;
1237
                            SDL_CondSignal(is->subpq_cond);
1238
                            SDL_UnlockMutex(is->subpq_mutex);
1239
                        }
1240
                    }
1241
                }
1242
            }
1243

    
1244
            /* display picture */
1245
            video_display(is);
1246

    
1247
            /* update queue size and signal for next picture */
1248
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1249
                is->pictq_rindex = 0;
1250

    
1251
            SDL_LockMutex(is->pictq_mutex);
1252
            is->pictq_size--;
1253
            SDL_CondSignal(is->pictq_cond);
1254
            SDL_UnlockMutex(is->pictq_mutex);
1255
        }
1256
    } else if (is->audio_st) {
1257
        /* draw the next audio frame */
1258

    
1259
        /* if only audio stream, then display the audio bars (better
1260
           than nothing, just to test the implementation */
1261

    
1262
        /* display picture */
1263
        video_display(is);
1264
    }
1265
    if (show_status) {
1266
        static int64_t last_time;
1267
        int64_t cur_time;
1268
        int aqsize, vqsize, sqsize;
1269
        double av_diff;
1270

    
1271
        cur_time = av_gettime();
1272
        if (!last_time || (cur_time - last_time) >= 30000) {
1273
            aqsize = 0;
1274
            vqsize = 0;
1275
            sqsize = 0;
1276
            if (is->audio_st)
1277
                aqsize = is->audioq.size;
1278
            if (is->video_st)
1279
                vqsize = is->videoq.size;
1280
            if (is->subtitle_st)
1281
                sqsize = is->subtitleq.size;
1282
            av_diff = 0;
1283
            if (is->audio_st && is->video_st)
1284
                av_diff = get_audio_clock(is) - get_video_clock(is);
1285
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1286
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1287
            fflush(stdout);
1288
            last_time = cur_time;
1289
        }
1290
    }
1291
}
1292

    
1293
/* allocate a picture (needs to do that in main thread to avoid
1294
   potential locking problems */
1295
static void alloc_picture(void *opaque)
1296
{
1297
    VideoState *is = opaque;
1298
    VideoPicture *vp;
1299

    
1300
    vp = &is->pictq[is->pictq_windex];
1301

    
1302
    if (vp->bmp)
1303
        SDL_FreeYUVOverlay(vp->bmp);
1304

    
1305
#if CONFIG_AVFILTER
1306
    if (vp->picref)
1307
        avfilter_unref_buffer(vp->picref);
1308
    vp->picref = NULL;
1309

    
1310
    vp->width   = is->out_video_filter->inputs[0]->w;
1311
    vp->height  = is->out_video_filter->inputs[0]->h;
1312
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1313
#else
1314
    vp->width   = is->video_st->codec->width;
1315
    vp->height  = is->video_st->codec->height;
1316
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1317
#endif
1318

    
1319
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1320
                                   SDL_YV12_OVERLAY,
1321
                                   screen);
1322

    
1323
    SDL_LockMutex(is->pictq_mutex);
1324
    vp->allocated = 1;
1325
    SDL_CondSignal(is->pictq_cond);
1326
    SDL_UnlockMutex(is->pictq_mutex);
1327
}
1328

    
1329
/**
1330
 *
1331
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1332
 */
1333
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1334
{
1335
    VideoPicture *vp;
1336
    int dst_pix_fmt;
1337
#if CONFIG_AVFILTER
1338
    AVPicture pict_src;
1339
#endif
1340
    /* wait until we have space to put a new picture */
1341
    SDL_LockMutex(is->pictq_mutex);
1342

    
1343
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1344
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1345

    
1346
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1347
           !is->videoq.abort_request) {
1348
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1349
    }
1350
    SDL_UnlockMutex(is->pictq_mutex);
1351

    
1352
    if (is->videoq.abort_request)
1353
        return -1;
1354

    
1355
    vp = &is->pictq[is->pictq_windex];
1356

    
1357
    /* alloc or resize hardware picture buffer */
1358
    if (!vp->bmp ||
1359
#if CONFIG_AVFILTER
1360
        vp->width  != is->out_video_filter->inputs[0]->w ||
1361
        vp->height != is->out_video_filter->inputs[0]->h) {
1362
#else
1363
        vp->width != is->video_st->codec->width ||
1364
        vp->height != is->video_st->codec->height) {
1365
#endif
1366
        SDL_Event event;
1367

    
1368
        vp->allocated = 0;
1369

    
1370
        /* the allocation must be done in the main thread to avoid
1371
           locking problems */
1372
        event.type = FF_ALLOC_EVENT;
1373
        event.user.data1 = is;
1374
        SDL_PushEvent(&event);
1375

    
1376
        /* wait until the picture is allocated */
1377
        SDL_LockMutex(is->pictq_mutex);
1378
        while (!vp->allocated && !is->videoq.abort_request) {
1379
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1380
        }
1381
        SDL_UnlockMutex(is->pictq_mutex);
1382

    
1383
        if (is->videoq.abort_request)
1384
            return -1;
1385
    }
1386

    
1387
    /* if the frame is not skipped, then display it */
1388
    if (vp->bmp) {
1389
        AVPicture pict;
1390
#if CONFIG_AVFILTER
1391
        if(vp->picref)
1392
            avfilter_unref_buffer(vp->picref);
1393
        vp->picref = src_frame->opaque;
1394
#endif
1395

    
1396
        /* get a pointer on the bitmap */
1397
        SDL_LockYUVOverlay (vp->bmp);
1398

    
1399
        dst_pix_fmt = PIX_FMT_YUV420P;
1400
        memset(&pict,0,sizeof(AVPicture));
1401
        pict.data[0] = vp->bmp->pixels[0];
1402
        pict.data[1] = vp->bmp->pixels[2];
1403
        pict.data[2] = vp->bmp->pixels[1];
1404

    
1405
        pict.linesize[0] = vp->bmp->pitches[0];
1406
        pict.linesize[1] = vp->bmp->pitches[2];
1407
        pict.linesize[2] = vp->bmp->pitches[1];
1408

    
1409
#if CONFIG_AVFILTER
1410
        pict_src.data[0] = src_frame->data[0];
1411
        pict_src.data[1] = src_frame->data[1];
1412
        pict_src.data[2] = src_frame->data[2];
1413

    
1414
        pict_src.linesize[0] = src_frame->linesize[0];
1415
        pict_src.linesize[1] = src_frame->linesize[1];
1416
        pict_src.linesize[2] = src_frame->linesize[2];
1417

    
1418
        //FIXME use direct rendering
1419
        av_picture_copy(&pict, &pict_src,
1420
                        vp->pix_fmt, vp->width, vp->height);
1421
#else
1422
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1423
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1424
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1425
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1426
        if (is->img_convert_ctx == NULL) {
1427
            fprintf(stderr, "Cannot initialize the conversion context\n");
1428
            exit(1);
1429
        }
1430
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1431
                  0, vp->height, pict.data, pict.linesize);
1432
#endif
1433
        /* update the bitmap content */
1434
        SDL_UnlockYUVOverlay(vp->bmp);
1435

    
1436
        vp->pts = pts;
1437
        vp->pos = pos;
1438

    
1439
        /* now we can update the picture count */
1440
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1441
            is->pictq_windex = 0;
1442
        SDL_LockMutex(is->pictq_mutex);
1443
        vp->target_clock= compute_target_time(vp->pts, is);
1444

    
1445
        is->pictq_size++;
1446
        SDL_UnlockMutex(is->pictq_mutex);
1447
    }
1448
    return 0;
1449
}
1450

    
1451
/**
1452
 * compute the exact PTS for the picture if it is omitted in the stream
1453
 * @param pts1 the dts of the pkt / pts of the frame
1454
 */
1455
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1456
{
1457
    double frame_delay, pts;
1458

    
1459
    pts = pts1;
1460

    
1461
    if (pts != 0) {
1462
        /* update video clock with pts, if present */
1463
        is->video_clock = pts;
1464
    } else {
1465
        pts = is->video_clock;
1466
    }
1467
    /* update video clock for next frame */
1468
    frame_delay = av_q2d(is->video_st->codec->time_base);
1469
    /* for MPEG2, the frame can be repeated, so we update the
1470
       clock accordingly */
1471
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1472
    is->video_clock += frame_delay;
1473

    
1474
#if defined(DEBUG_SYNC) && 0
1475
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1476
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1477
#endif
1478
    return queue_picture(is, src_frame, pts, pos);
1479
}
1480

    
1481
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1482
{
1483
    int len1, got_picture, i;
1484

    
1485
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1486
            return -1;
1487

    
1488
        if(pkt->data == flush_pkt.data){
1489
            avcodec_flush_buffers(is->video_st->codec);
1490

    
1491
            SDL_LockMutex(is->pictq_mutex);
1492
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1493
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1494
                is->pictq[i].target_clock= 0;
1495
            }
1496
            while (is->pictq_size && !is->videoq.abort_request) {
1497
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1498
            }
1499
            is->video_current_pos= -1;
1500
            SDL_UnlockMutex(is->pictq_mutex);
1501

    
1502
            is->last_dts_for_fault_detection=
1503
            is->last_pts_for_fault_detection= INT64_MIN;
1504
            is->frame_last_pts= AV_NOPTS_VALUE;
1505
            is->frame_last_delay = 0;
1506
            is->frame_timer = (double)av_gettime() / 1000000.0;
1507
            is->skip_frames= 1;
1508
            is->skip_frames_index= 0;
1509
            return 0;
1510
        }
1511

    
1512
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1513
           this packet, if any */
1514
        is->video_st->codec->reordered_opaque= pkt->pts;
1515
        len1 = avcodec_decode_video2(is->video_st->codec,
1516
                                    frame, &got_picture,
1517
                                    pkt);
1518

    
1519
        if (got_picture) {
1520
            if(pkt->dts != AV_NOPTS_VALUE){
1521
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1522
                is->last_dts_for_fault_detection= pkt->dts;
1523
            }
1524
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1525
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1526
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1527
            }
1528
        }
1529

    
1530
        if(   (   decoder_reorder_pts==1
1531
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1532
               || pkt->dts == AV_NOPTS_VALUE)
1533
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1534
            *pts= frame->reordered_opaque;
1535
        else if(pkt->dts != AV_NOPTS_VALUE)
1536
            *pts= pkt->dts;
1537
        else
1538
            *pts= 0;
1539

    
1540
//            if (len1 < 0)
1541
//                break;
1542
    if (got_picture){
1543
        is->skip_frames_index += 1;
1544
        if(is->skip_frames_index >= is->skip_frames){
1545
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1546
            return 1;
1547
        }
1548

    
1549
    }
1550
    return 0;
1551
}
1552

    
1553
#if CONFIG_AVFILTER
1554
typedef struct {
1555
    VideoState *is;
1556
    AVFrame *frame;
1557
    int use_dr1;
1558
} FilterPriv;
1559

    
1560
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1561
{
1562
    AVFilterContext *ctx = codec->opaque;
1563
    AVFilterBufferRef  *ref;
1564
    int perms = AV_PERM_WRITE;
1565
    int i, w, h, stride[4];
1566
    unsigned edge;
1567

    
1568
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1569
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1570
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1571
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1572
    }
1573
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1574

    
1575
    w = codec->width;
1576
    h = codec->height;
1577
    avcodec_align_dimensions2(codec, &w, &h, stride);
1578
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1579
    w += edge << 1;
1580
    h += edge << 1;
1581

    
1582
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1583
        return -1;
1584

    
1585
    ref->w = codec->width;
1586
    ref->h = codec->height;
1587
    for(i = 0; i < 4; i ++) {
1588
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1589
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1590

    
1591
        if (ref->data[i]) {
1592
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1593
        }
1594
        pic->data[i]     = ref->data[i];
1595
        pic->linesize[i] = ref->linesize[i];
1596
    }
1597
    pic->opaque = ref;
1598
    pic->age    = INT_MAX;
1599
    pic->type   = FF_BUFFER_TYPE_USER;
1600
    pic->reordered_opaque = codec->reordered_opaque;
1601
    return 0;
1602
}
1603

    
1604
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1605
{
1606
    memset(pic->data, 0, sizeof(pic->data));
1607
    avfilter_unref_buffer(pic->opaque);
1608
}
1609

    
1610
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1611
{
1612
    AVFilterBufferRef *ref = pic->opaque;
1613

    
1614
    if (pic->data[0] == NULL) {
1615
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1616
        return codec->get_buffer(codec, pic);
1617
    }
1618

    
1619
    if ((codec->width != ref->w) || (codec->height != ref->h) ||
1620
        (codec->pix_fmt != ref->format)) {
1621
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1622
        return -1;
1623
    }
1624

    
1625
    pic->reordered_opaque = codec->reordered_opaque;
1626
    return 0;
1627
}
1628

    
1629
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1630
{
1631
    FilterPriv *priv = ctx->priv;
1632
    AVCodecContext *codec;
1633
    if(!opaque) return -1;
1634

    
1635
    priv->is = opaque;
1636
    codec    = priv->is->video_st->codec;
1637
    codec->opaque = ctx;
1638
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1639
        priv->use_dr1 = 1;
1640
        codec->get_buffer     = input_get_buffer;
1641
        codec->release_buffer = input_release_buffer;
1642
        codec->reget_buffer   = input_reget_buffer;
1643
    }
1644

    
1645
    priv->frame = avcodec_alloc_frame();
1646

    
1647
    return 0;
1648
}
1649

    
1650
static void input_uninit(AVFilterContext *ctx)
1651
{
1652
    FilterPriv *priv = ctx->priv;
1653
    av_free(priv->frame);
1654
}
1655

    
1656
static int input_request_frame(AVFilterLink *link)
1657
{
1658
    FilterPriv *priv = link->src->priv;
1659
    AVFilterBufferRef *picref;
1660
    int64_t pts = 0;
1661
    AVPacket pkt;
1662
    int ret;
1663

    
1664
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1665
        av_free_packet(&pkt);
1666
    if (ret < 0)
1667
        return -1;
1668

    
1669
    if(priv->use_dr1) {
1670
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1671
    } else {
1672
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1673
        av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1674
                        picref->format, link->w, link->h);
1675
    }
1676
    av_free_packet(&pkt);
1677

    
1678
    picref->pts = pts;
1679
    picref->pos = pkt.pos;
1680
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1681
    avfilter_start_frame(link, picref);
1682
    avfilter_draw_slice(link, 0, link->h, 1);
1683
    avfilter_end_frame(link);
1684

    
1685
    return 0;
1686
}
1687

    
1688
static int input_query_formats(AVFilterContext *ctx)
1689
{
1690
    FilterPriv *priv = ctx->priv;
1691
    enum PixelFormat pix_fmts[] = {
1692
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1693
    };
1694

    
1695
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1696
    return 0;
1697
}
1698

    
1699
static int input_config_props(AVFilterLink *link)
1700
{
1701
    FilterPriv *priv  = link->src->priv;
1702
    AVCodecContext *c = priv->is->video_st->codec;
1703

    
1704
    link->w = c->width;
1705
    link->h = c->height;
1706

    
1707
    return 0;
1708
}
1709

    
1710
static AVFilter input_filter =
1711
{
1712
    .name      = "ffplay_input",
1713

    
1714
    .priv_size = sizeof(FilterPriv),
1715

    
1716
    .init      = input_init,
1717
    .uninit    = input_uninit,
1718

    
1719
    .query_formats = input_query_formats,
1720

    
1721
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1722
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1723
                                    .type = AVMEDIA_TYPE_VIDEO,
1724
                                    .request_frame = input_request_frame,
1725
                                    .config_props  = input_config_props, },
1726
                                  { .name = NULL }},
1727
};
1728

    
1729
static void output_end_frame(AVFilterLink *link)
1730
{
1731
}
1732

    
1733
static int output_query_formats(AVFilterContext *ctx)
1734
{
1735
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1736

    
1737
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1738
    return 0;
1739
}
1740

    
1741
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1742
                                    int64_t *pts, int64_t *pos)
1743
{
1744
    AVFilterBufferRef *pic;
1745

    
1746
    if(avfilter_request_frame(ctx->inputs[0]))
1747
        return -1;
1748
    if(!(pic = ctx->inputs[0]->cur_buf))
1749
        return -1;
1750
    ctx->inputs[0]->cur_buf = NULL;
1751

    
1752
    frame->opaque = pic;
1753
    *pts          = pic->pts;
1754
    *pos          = pic->pos;
1755

    
1756
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1757
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1758

    
1759
    return 1;
1760
}
1761

    
1762
static AVFilter output_filter =
1763
{
1764
    .name      = "ffplay_output",
1765

    
1766
    .query_formats = output_query_formats,
1767

    
1768
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1769
                                    .type          = AVMEDIA_TYPE_VIDEO,
1770
                                    .end_frame     = output_end_frame,
1771
                                    .min_perms     = AV_PERM_READ, },
1772
                                  { .name = NULL }},
1773
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1774
};
1775
#endif  /* CONFIG_AVFILTER */
1776

    
1777
static int video_thread(void *arg)
1778
{
1779
    VideoState *is = arg;
1780
    AVFrame *frame= avcodec_alloc_frame();
1781
    int64_t pts_int;
1782
    double pts;
1783
    int ret;
1784

    
1785
#if CONFIG_AVFILTER
1786
    int64_t pos;
1787
    char sws_flags_str[128];
1788
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1789
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1790
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1791
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1792

    
1793
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1794
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1795

    
1796
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1797
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1798

    
1799

    
1800
    if(vfilters) {
1801
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1802
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1803

    
1804
        outputs->name    = av_strdup("in");
1805
        outputs->filter  = filt_src;
1806
        outputs->pad_idx = 0;
1807
        outputs->next    = NULL;
1808

    
1809
        inputs->name    = av_strdup("out");
1810
        inputs->filter  = filt_out;
1811
        inputs->pad_idx = 0;
1812
        inputs->next    = NULL;
1813

    
1814
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1815
            goto the_end;
1816
        av_freep(&vfilters);
1817
    } else {
1818
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1819
    }
1820
    avfilter_graph_add_filter(graph, filt_src);
1821
    avfilter_graph_add_filter(graph, filt_out);
1822

    
1823
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1824
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1825
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1826

    
1827
    is->out_video_filter = filt_out;
1828
#endif
1829

    
1830
    for(;;) {
1831
#if !CONFIG_AVFILTER
1832
        AVPacket pkt;
1833
#endif
1834
        while (is->paused && !is->videoq.abort_request)
1835
            SDL_Delay(10);
1836
#if CONFIG_AVFILTER
1837
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1838
#else
1839
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1840
#endif
1841

    
1842
        if (ret < 0) goto the_end;
1843

    
1844
        if (!ret)
1845
            continue;
1846

    
1847
        pts = pts_int*av_q2d(is->video_st->time_base);
1848

    
1849
#if CONFIG_AVFILTER
1850
        ret = output_picture2(is, frame, pts, pos);
1851
#else
1852
        ret = output_picture2(is, frame, pts,  pkt.pos);
1853
        av_free_packet(&pkt);
1854
#endif
1855
        if (ret < 0)
1856
            goto the_end;
1857

    
1858
        if (step)
1859
            if (cur_stream)
1860
                stream_pause(cur_stream);
1861
    }
1862
 the_end:
1863
#if CONFIG_AVFILTER
1864
    avfilter_graph_destroy(graph);
1865
    av_freep(&graph);
1866
#endif
1867
    av_free(frame);
1868
    return 0;
1869
}
1870

    
1871
static int subtitle_thread(void *arg)
1872
{
1873
    VideoState *is = arg;
1874
    SubPicture *sp;
1875
    AVPacket pkt1, *pkt = &pkt1;
1876
    int len1, got_subtitle;
1877
    double pts;
1878
    int i, j;
1879
    int r, g, b, y, u, v, a;
1880

    
1881
    for(;;) {
1882
        while (is->paused && !is->subtitleq.abort_request) {
1883
            SDL_Delay(10);
1884
        }
1885
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1886
            break;
1887

    
1888
        if(pkt->data == flush_pkt.data){
1889
            avcodec_flush_buffers(is->subtitle_st->codec);
1890
            continue;
1891
        }
1892
        SDL_LockMutex(is->subpq_mutex);
1893
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1894
               !is->subtitleq.abort_request) {
1895
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1896
        }
1897
        SDL_UnlockMutex(is->subpq_mutex);
1898

    
1899
        if (is->subtitleq.abort_request)
1900
            goto the_end;
1901

    
1902
        sp = &is->subpq[is->subpq_windex];
1903

    
1904
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1905
           this packet, if any */
1906
        pts = 0;
1907
        if (pkt->pts != AV_NOPTS_VALUE)
1908
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1909

    
1910
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1911
                                    &sp->sub, &got_subtitle,
1912
                                    pkt);
1913
//            if (len1 < 0)
1914
//                break;
1915
        if (got_subtitle && sp->sub.format == 0) {
1916
            sp->pts = pts;
1917

    
1918
            for (i = 0; i < sp->sub.num_rects; i++)
1919
            {
1920
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1921
                {
1922
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1923
                    y = RGB_TO_Y_CCIR(r, g, b);
1924
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1925
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1926
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1927
                }
1928
            }
1929

    
1930
            /* now we can update the picture count */
1931
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1932
                is->subpq_windex = 0;
1933
            SDL_LockMutex(is->subpq_mutex);
1934
            is->subpq_size++;
1935
            SDL_UnlockMutex(is->subpq_mutex);
1936
        }
1937
        av_free_packet(pkt);
1938
//        if (step)
1939
//            if (cur_stream)
1940
//                stream_pause(cur_stream);
1941
    }
1942
 the_end:
1943
    return 0;
1944
}
1945

    
1946
/* copy samples for viewing in editor window */
1947
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1948
{
1949
    int size, len, channels;
1950

    
1951
    channels = is->audio_st->codec->channels;
1952

    
1953
    size = samples_size / sizeof(short);
1954
    while (size > 0) {
1955
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1956
        if (len > size)
1957
            len = size;
1958
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1959
        samples += len;
1960
        is->sample_array_index += len;
1961
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1962
            is->sample_array_index = 0;
1963
        size -= len;
1964
    }
1965
}
1966

    
1967
/* return the new audio buffer size (samples can be added or deleted
1968
   to get better sync if video or external master clock) */
1969
static int synchronize_audio(VideoState *is, short *samples,
1970
                             int samples_size1, double pts)
1971
{
1972
    int n, samples_size;
1973
    double ref_clock;
1974

    
1975
    n = 2 * is->audio_st->codec->channels;
1976
    samples_size = samples_size1;
1977

    
1978
    /* if not master, then we try to remove or add samples to correct the clock */
1979
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1980
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1981
        double diff, avg_diff;
1982
        int wanted_size, min_size, max_size, nb_samples;
1983

    
1984
        ref_clock = get_master_clock(is);
1985
        diff = get_audio_clock(is) - ref_clock;
1986

    
1987
        if (diff < AV_NOSYNC_THRESHOLD) {
1988
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1989
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1990
                /* not enough measures to have a correct estimate */
1991
                is->audio_diff_avg_count++;
1992
            } else {
1993
                /* estimate the A-V difference */
1994
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1995

    
1996
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1997
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1998
                    nb_samples = samples_size / n;
1999

    
2000
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2001
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2002
                    if (wanted_size < min_size)
2003
                        wanted_size = min_size;
2004
                    else if (wanted_size > max_size)
2005
                        wanted_size = max_size;
2006

    
2007
                    /* add or remove samples to correction the synchro */
2008
                    if (wanted_size < samples_size) {
2009
                        /* remove samples */
2010
                        samples_size = wanted_size;
2011
                    } else if (wanted_size > samples_size) {
2012
                        uint8_t *samples_end, *q;
2013
                        int nb;
2014

    
2015
                        /* add samples */
2016
                        nb = (samples_size - wanted_size);
2017
                        samples_end = (uint8_t *)samples + samples_size - n;
2018
                        q = samples_end + n;
2019
                        while (nb > 0) {
2020
                            memcpy(q, samples_end, n);
2021
                            q += n;
2022
                            nb -= n;
2023
                        }
2024
                        samples_size = wanted_size;
2025
                    }
2026
                }
2027
#if 0
2028
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2029
                       diff, avg_diff, samples_size - samples_size1,
2030
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2031
#endif
2032
            }
2033
        } else {
2034
            /* too big difference : may be initial PTS errors, so
2035
               reset A-V filter */
2036
            is->audio_diff_avg_count = 0;
2037
            is->audio_diff_cum = 0;
2038
        }
2039
    }
2040

    
2041
    return samples_size;
2042
}
2043

    
2044
/* decode one audio frame and returns its uncompressed size */
2045
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2046
{
2047
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2048
    AVPacket *pkt = &is->audio_pkt;
2049
    AVCodecContext *dec= is->audio_st->codec;
2050
    int n, len1, data_size;
2051
    double pts;
2052

    
2053
    for(;;) {
2054
        /* NOTE: the audio packet can contain several frames */
2055
        while (pkt_temp->size > 0) {
2056
            data_size = sizeof(is->audio_buf1);
2057
            len1 = avcodec_decode_audio3(dec,
2058
                                        (int16_t *)is->audio_buf1, &data_size,
2059
                                        pkt_temp);
2060
            if (len1 < 0) {
2061
                /* if error, we skip the frame */
2062
                pkt_temp->size = 0;
2063
                break;
2064
            }
2065

    
2066
            pkt_temp->data += len1;
2067
            pkt_temp->size -= len1;
2068
            if (data_size <= 0)
2069
                continue;
2070

    
2071
            if (dec->sample_fmt != is->audio_src_fmt) {
2072
                if (is->reformat_ctx)
2073
                    av_audio_convert_free(is->reformat_ctx);
2074
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2075
                                                         dec->sample_fmt, 1, NULL, 0);
2076
                if (!is->reformat_ctx) {
2077
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2078
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2079
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2080
                        break;
2081
                }
2082
                is->audio_src_fmt= dec->sample_fmt;
2083
            }
2084

    
2085
            if (is->reformat_ctx) {
2086
                const void *ibuf[6]= {is->audio_buf1};
2087
                void *obuf[6]= {is->audio_buf2};
2088
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2089
                int ostride[6]= {2};
2090
                int len= data_size/istride[0];
2091
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2092
                    printf("av_audio_convert() failed\n");
2093
                    break;
2094
                }
2095
                is->audio_buf= is->audio_buf2;
2096
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2097
                          remove this legacy cruft */
2098
                data_size= len*2;
2099
            }else{
2100
                is->audio_buf= is->audio_buf1;
2101
            }
2102

    
2103
            /* if no pts, then compute it */
2104
            pts = is->audio_clock;
2105
            *pts_ptr = pts;
2106
            n = 2 * dec->channels;
2107
            is->audio_clock += (double)data_size /
2108
                (double)(n * dec->sample_rate);
2109
#if defined(DEBUG_SYNC)
2110
            {
2111
                static double last_clock;
2112
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2113
                       is->audio_clock - last_clock,
2114
                       is->audio_clock, pts);
2115
                last_clock = is->audio_clock;
2116
            }
2117
#endif
2118
            return data_size;
2119
        }
2120

    
2121
        /* free the current packet */
2122
        if (pkt->data)
2123
            av_free_packet(pkt);
2124

    
2125
        if (is->paused || is->audioq.abort_request) {
2126
            return -1;
2127
        }
2128

    
2129
        /* read next packet */
2130
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2131
            return -1;
2132
        if(pkt->data == flush_pkt.data){
2133
            avcodec_flush_buffers(dec);
2134
            continue;
2135
        }
2136

    
2137
        pkt_temp->data = pkt->data;
2138
        pkt_temp->size = pkt->size;
2139

    
2140
        /* if update the audio clock with the pts */
2141
        if (pkt->pts != AV_NOPTS_VALUE) {
2142
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2143
        }
2144
    }
2145
}
2146

    
2147
/* get the current audio output buffer size, in samples. With SDL, we
2148
   cannot have a precise information */
2149
static int audio_write_get_buf_size(VideoState *is)
2150
{
2151
    return is->audio_buf_size - is->audio_buf_index;
2152
}
2153

    
2154

    
2155
/* prepare a new audio buffer */
2156
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2157
{
2158
    VideoState *is = opaque;
2159
    int audio_size, len1;
2160
    double pts;
2161

    
2162
    audio_callback_time = av_gettime();
2163

    
2164
    while (len > 0) {
2165
        if (is->audio_buf_index >= is->audio_buf_size) {
2166
           audio_size = audio_decode_frame(is, &pts);
2167
           if (audio_size < 0) {
2168
                /* if error, just output silence */
2169
               is->audio_buf = is->audio_buf1;
2170
               is->audio_buf_size = 1024;
2171
               memset(is->audio_buf, 0, is->audio_buf_size);
2172
           } else {
2173
               if (is->show_audio)
2174
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2175
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2176
                                              pts);
2177
               is->audio_buf_size = audio_size;
2178
           }
2179
           is->audio_buf_index = 0;
2180
        }
2181
        len1 = is->audio_buf_size - is->audio_buf_index;
2182
        if (len1 > len)
2183
            len1 = len;
2184
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2185
        len -= len1;
2186
        stream += len1;
2187
        is->audio_buf_index += len1;
2188
    }
2189
}
2190

    
2191
/* open a given stream. Return 0 if OK */
2192
static int stream_component_open(VideoState *is, int stream_index)
2193
{
2194
    AVFormatContext *ic = is->ic;
2195
    AVCodecContext *avctx;
2196
    AVCodec *codec;
2197
    SDL_AudioSpec wanted_spec, spec;
2198

    
2199
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2200
        return -1;
2201
    avctx = ic->streams[stream_index]->codec;
2202

    
2203
    /* prepare audio output */
2204
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2205
        if (avctx->channels > 0) {
2206
            avctx->request_channels = FFMIN(2, avctx->channels);
2207
        } else {
2208
            avctx->request_channels = 2;
2209
        }
2210
    }
2211

    
2212
    codec = avcodec_find_decoder(avctx->codec_id);
2213
    avctx->debug_mv = debug_mv;
2214
    avctx->debug = debug;
2215
    avctx->workaround_bugs = workaround_bugs;
2216
    avctx->lowres = lowres;
2217
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2218
    avctx->idct_algo= idct;
2219
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2220
    avctx->skip_frame= skip_frame;
2221
    avctx->skip_idct= skip_idct;
2222
    avctx->skip_loop_filter= skip_loop_filter;
2223
    avctx->error_recognition= error_recognition;
2224
    avctx->error_concealment= error_concealment;
2225
    avcodec_thread_init(avctx, thread_count);
2226

    
2227
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2228

    
2229
    if (!codec ||
2230
        avcodec_open(avctx, codec) < 0)
2231
        return -1;
2232

    
2233
    /* prepare audio output */
2234
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2235
        wanted_spec.freq = avctx->sample_rate;
2236
        wanted_spec.format = AUDIO_S16SYS;
2237
        wanted_spec.channels = avctx->channels;
2238
        wanted_spec.silence = 0;
2239
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2240
        wanted_spec.callback = sdl_audio_callback;
2241
        wanted_spec.userdata = is;
2242
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2243
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2244
            return -1;
2245
        }
2246
        is->audio_hw_buf_size = spec.size;
2247
        is->audio_src_fmt= SAMPLE_FMT_S16;
2248
    }
2249

    
2250
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2251
    switch(avctx->codec_type) {
2252
    case AVMEDIA_TYPE_AUDIO:
2253
        is->audio_stream = stream_index;
2254
        is->audio_st = ic->streams[stream_index];
2255
        is->audio_buf_size = 0;
2256
        is->audio_buf_index = 0;
2257

    
2258
        /* init averaging filter */
2259
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2260
        is->audio_diff_avg_count = 0;
2261
        /* since we do not have a precise anough audio fifo fullness,
2262
           we correct audio sync only if larger than this threshold */
2263
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2264

    
2265
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2266
        packet_queue_init(&is->audioq);
2267
        SDL_PauseAudio(0);
2268
        break;
2269
    case AVMEDIA_TYPE_VIDEO:
2270
        is->video_stream = stream_index;
2271
        is->video_st = ic->streams[stream_index];
2272

    
2273
//        is->video_current_pts_time = av_gettime();
2274

    
2275
        packet_queue_init(&is->videoq);
2276
        is->video_tid = SDL_CreateThread(video_thread, is);
2277
        break;
2278
    case AVMEDIA_TYPE_SUBTITLE:
2279
        is->subtitle_stream = stream_index;
2280
        is->subtitle_st = ic->streams[stream_index];
2281
        packet_queue_init(&is->subtitleq);
2282

    
2283
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2284
        break;
2285
    default:
2286
        break;
2287
    }
2288
    return 0;
2289
}
2290

    
2291
static void stream_component_close(VideoState *is, int stream_index)
2292
{
2293
    AVFormatContext *ic = is->ic;
2294
    AVCodecContext *avctx;
2295

    
2296
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2297
        return;
2298
    avctx = ic->streams[stream_index]->codec;
2299

    
2300
    switch(avctx->codec_type) {
2301
    case AVMEDIA_TYPE_AUDIO:
2302
        packet_queue_abort(&is->audioq);
2303

    
2304
        SDL_CloseAudio();
2305

    
2306
        packet_queue_end(&is->audioq);
2307
        if (is->reformat_ctx)
2308
            av_audio_convert_free(is->reformat_ctx);
2309
        is->reformat_ctx = NULL;
2310
        break;
2311
    case AVMEDIA_TYPE_VIDEO:
2312
        packet_queue_abort(&is->videoq);
2313

    
2314
        /* note: we also signal this mutex to make sure we deblock the
2315
           video thread in all cases */
2316
        SDL_LockMutex(is->pictq_mutex);
2317
        SDL_CondSignal(is->pictq_cond);
2318
        SDL_UnlockMutex(is->pictq_mutex);
2319

    
2320
        SDL_WaitThread(is->video_tid, NULL);
2321

    
2322
        packet_queue_end(&is->videoq);
2323
        break;
2324
    case AVMEDIA_TYPE_SUBTITLE:
2325
        packet_queue_abort(&is->subtitleq);
2326

    
2327
        /* note: we also signal this mutex to make sure we deblock the
2328
           video thread in all cases */
2329
        SDL_LockMutex(is->subpq_mutex);
2330
        is->subtitle_stream_changed = 1;
2331

    
2332
        SDL_CondSignal(is->subpq_cond);
2333
        SDL_UnlockMutex(is->subpq_mutex);
2334

    
2335
        SDL_WaitThread(is->subtitle_tid, NULL);
2336

    
2337
        packet_queue_end(&is->subtitleq);
2338
        break;
2339
    default:
2340
        break;
2341
    }
2342

    
2343
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2344
    avcodec_close(avctx);
2345
    switch(avctx->codec_type) {
2346
    case AVMEDIA_TYPE_AUDIO:
2347
        is->audio_st = NULL;
2348
        is->audio_stream = -1;
2349
        break;
2350
    case AVMEDIA_TYPE_VIDEO:
2351
        is->video_st = NULL;
2352
        is->video_stream = -1;
2353
        break;
2354
    case AVMEDIA_TYPE_SUBTITLE:
2355
        is->subtitle_st = NULL;
2356
        is->subtitle_stream = -1;
2357
        break;
2358
    default:
2359
        break;
2360
    }
2361
}
2362

    
2363
/* since we have only one decoding thread, we can use a global
2364
   variable instead of a thread local variable */
2365
static VideoState *global_video_state;
2366

    
2367
static int decode_interrupt_cb(void)
2368
{
2369
    return (global_video_state && global_video_state->abort_request);
2370
}
2371

    
2372
/* this thread gets the stream from the disk or the network */
2373
static int decode_thread(void *arg)
2374
{
2375
    VideoState *is = arg;
2376
    AVFormatContext *ic;
2377
    int err, i, ret;
2378
    int st_index[AVMEDIA_TYPE_NB];
2379
    int st_count[AVMEDIA_TYPE_NB]={0};
2380
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2381
    AVPacket pkt1, *pkt = &pkt1;
2382
    AVFormatParameters params, *ap = &params;
2383
    int eof=0;
2384
    int pkt_in_play_range = 0;
2385

    
2386
    ic = avformat_alloc_context();
2387

    
2388
    memset(st_index, -1, sizeof(st_index));
2389
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2390
    is->video_stream = -1;
2391
    is->audio_stream = -1;
2392
    is->subtitle_stream = -1;
2393

    
2394
    global_video_state = is;
2395
    url_set_interrupt_cb(decode_interrupt_cb);
2396

    
2397
    memset(ap, 0, sizeof(*ap));
2398

    
2399
    ap->prealloced_context = 1;
2400
    ap->width = frame_width;
2401
    ap->height= frame_height;
2402
    ap->time_base= (AVRational){1, 25};
2403
    ap->pix_fmt = frame_pix_fmt;
2404

    
2405
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2406

    
2407
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2408
    if (err < 0) {
2409
        print_error(is->filename, err);
2410
        ret = -1;
2411
        goto fail;
2412
    }
2413
    is->ic = ic;
2414

    
2415
    if(genpts)
2416
        ic->flags |= AVFMT_FLAG_GENPTS;
2417

    
2418
    err = av_find_stream_info(ic);
2419
    if (err < 0) {
2420
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2421
        ret = -1;
2422
        goto fail;
2423
    }
2424
    if(ic->pb)
2425
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2426

    
2427
    if(seek_by_bytes<0)
2428
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2429

    
2430
    /* if seeking requested, we execute it */
2431
    if (start_time != AV_NOPTS_VALUE) {
2432
        int64_t timestamp;
2433

    
2434
        timestamp = start_time;
2435
        /* add the stream start time */
2436
        if (ic->start_time != AV_NOPTS_VALUE)
2437
            timestamp += ic->start_time;
2438
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2439
        if (ret < 0) {
2440
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2441
                    is->filename, (double)timestamp / AV_TIME_BASE);
2442
        }
2443
    }
2444

    
2445
    for(i = 0; i < ic->nb_streams; i++) {
2446
        AVStream *st= ic->streams[i];
2447
        AVCodecContext *avctx = st->codec;
2448
        ic->streams[i]->discard = AVDISCARD_ALL;
2449
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2450
            continue;
2451
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2452
            continue;
2453

    
2454
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2455
            continue;
2456
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2457

    
2458
        switch(avctx->codec_type) {
2459
        case AVMEDIA_TYPE_AUDIO:
2460
            if (!audio_disable)
2461
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2462
            break;
2463
        case AVMEDIA_TYPE_VIDEO:
2464
        case AVMEDIA_TYPE_SUBTITLE:
2465
            if (!video_disable)
2466
                st_index[avctx->codec_type] = i;
2467
            break;
2468
        default:
2469
            break;
2470
        }
2471
    }
2472
    if (show_status) {
2473
        dump_format(ic, 0, is->filename, 0);
2474
    }
2475

    
2476
    /* open the streams */
2477
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2478
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2479
    }
2480

    
2481
    ret=-1;
2482
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2483
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2484
    }
2485
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2486
    if(ret<0) {
2487
        if (!display_disable)
2488
            is->show_audio = 2;
2489
    }
2490

    
2491
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2492
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2493
    }
2494

    
2495
    if (is->video_stream < 0 && is->audio_stream < 0) {
2496
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2497
        ret = -1;
2498
        goto fail;
2499
    }
2500

    
2501
    for(;;) {
2502
        if (is->abort_request)
2503
            break;
2504
        if (is->paused != is->last_paused) {
2505
            is->last_paused = is->paused;
2506
            if (is->paused)
2507
                is->read_pause_return= av_read_pause(ic);
2508
            else
2509
                av_read_play(ic);
2510
        }
2511
#if CONFIG_RTSP_DEMUXER
2512
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2513
            /* wait 10 ms to avoid trying to get another packet */
2514
            /* XXX: horrible */
2515
            SDL_Delay(10);
2516
            continue;
2517
        }
2518
#endif
2519
        if (is->seek_req) {
2520
            int64_t seek_target= is->seek_pos;
2521
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2522
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2523
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2524
//      of the seek_pos/seek_rel variables
2525

    
2526
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2527
            if (ret < 0) {
2528
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2529
            }else{
2530
                if (is->audio_stream >= 0) {
2531
                    packet_queue_flush(&is->audioq);
2532
                    packet_queue_put(&is->audioq, &flush_pkt);
2533
                }
2534
                if (is->subtitle_stream >= 0) {
2535
                    packet_queue_flush(&is->subtitleq);
2536
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2537
                }
2538
                if (is->video_stream >= 0) {
2539
                    packet_queue_flush(&is->videoq);
2540
                    packet_queue_put(&is->videoq, &flush_pkt);
2541
                }
2542
            }
2543
            is->seek_req = 0;
2544
            eof= 0;
2545
        }
2546

    
2547
        /* if the queue are full, no need to read more */
2548
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2549
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2550
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2551
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2552
            /* wait 10 ms */
2553
            SDL_Delay(10);
2554
            continue;
2555
        }
2556
        if(url_feof(ic->pb) || eof) {
2557
            if(is->video_stream >= 0){
2558
                av_init_packet(pkt);
2559
                pkt->data=NULL;
2560
                pkt->size=0;
2561
                pkt->stream_index= is->video_stream;
2562
                packet_queue_put(&is->videoq, pkt);
2563
            }
2564
            SDL_Delay(10);
2565
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2566
                if(loop!=1 && (!loop || --loop)){
2567
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2568
                }else if(autoexit){
2569
                    ret=AVERROR_EOF;
2570
                    goto fail;
2571
                }
2572
            }
2573
            continue;
2574
        }
2575
        ret = av_read_frame(ic, pkt);
2576
        if (ret < 0) {
2577
            if (ret == AVERROR_EOF)
2578
                eof=1;
2579
            if (url_ferror(ic->pb))
2580
                break;
2581
            SDL_Delay(100); /* wait for user event */
2582
            continue;
2583
        }
2584
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2585
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2586
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2587
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2588
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2589
                <= ((double)duration/1000000);
2590
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2591
            packet_queue_put(&is->audioq, pkt);
2592
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2593
            packet_queue_put(&is->videoq, pkt);
2594
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2595
            packet_queue_put(&is->subtitleq, pkt);
2596
        } else {
2597
            av_free_packet(pkt);
2598
        }
2599
    }
2600
    /* wait until the end */
2601
    while (!is->abort_request) {
2602
        SDL_Delay(100);
2603
    }
2604

    
2605
    ret = 0;
2606
 fail:
2607
    /* disable interrupting */
2608
    global_video_state = NULL;
2609

    
2610
    /* close each stream */
2611
    if (is->audio_stream >= 0)
2612
        stream_component_close(is, is->audio_stream);
2613
    if (is->video_stream >= 0)
2614
        stream_component_close(is, is->video_stream);
2615
    if (is->subtitle_stream >= 0)
2616
        stream_component_close(is, is->subtitle_stream);
2617
    if (is->ic) {
2618
        av_close_input_file(is->ic);
2619
        is->ic = NULL; /* safety */
2620
    }
2621
    url_set_interrupt_cb(NULL);
2622

    
2623
    if (ret != 0) {
2624
        SDL_Event event;
2625

    
2626
        event.type = FF_QUIT_EVENT;
2627
        event.user.data1 = is;
2628
        SDL_PushEvent(&event);
2629
    }
2630
    return 0;
2631
}
2632

    
2633
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2634
{
2635
    VideoState *is;
2636

    
2637
    is = av_mallocz(sizeof(VideoState));
2638
    if (!is)
2639
        return NULL;
2640
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2641
    is->iformat = iformat;
2642
    is->ytop = 0;
2643
    is->xleft = 0;
2644

    
2645
    /* start video display */
2646
    is->pictq_mutex = SDL_CreateMutex();
2647
    is->pictq_cond = SDL_CreateCond();
2648

    
2649
    is->subpq_mutex = SDL_CreateMutex();
2650
    is->subpq_cond = SDL_CreateCond();
2651

    
2652
    is->av_sync_type = av_sync_type;
2653
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2654
    if (!is->parse_tid) {
2655
        av_free(is);
2656
        return NULL;
2657
    }
2658
    return is;
2659
}
2660

    
2661
static void stream_close(VideoState *is)
2662
{
2663
    VideoPicture *vp;
2664
    int i;
2665
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2666
    is->abort_request = 1;
2667
    SDL_WaitThread(is->parse_tid, NULL);
2668
    SDL_WaitThread(is->refresh_tid, NULL);
2669

    
2670
    /* free all pictures */
2671
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2672
        vp = &is->pictq[i];
2673
#if CONFIG_AVFILTER
2674
        if (vp->picref) {
2675
            avfilter_unref_buffer(vp->picref);
2676
            vp->picref = NULL;
2677
        }
2678
#endif
2679
        if (vp->bmp) {
2680
            SDL_FreeYUVOverlay(vp->bmp);
2681
            vp->bmp = NULL;
2682
        }
2683
    }
2684
    SDL_DestroyMutex(is->pictq_mutex);
2685
    SDL_DestroyCond(is->pictq_cond);
2686
    SDL_DestroyMutex(is->subpq_mutex);
2687
    SDL_DestroyCond(is->subpq_cond);
2688
#if !CONFIG_AVFILTER
2689
    if (is->img_convert_ctx)
2690
        sws_freeContext(is->img_convert_ctx);
2691
#endif
2692
    av_free(is);
2693
}
2694

    
2695
static void stream_cycle_channel(VideoState *is, int codec_type)
2696
{
2697
    AVFormatContext *ic = is->ic;
2698
    int start_index, stream_index;
2699
    AVStream *st;
2700

    
2701
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2702
        start_index = is->video_stream;
2703
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2704
        start_index = is->audio_stream;
2705
    else
2706
        start_index = is->subtitle_stream;
2707
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2708
        return;
2709
    stream_index = start_index;
2710
    for(;;) {
2711
        if (++stream_index >= is->ic->nb_streams)
2712
        {
2713
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2714
            {
2715
                stream_index = -1;
2716
                goto the_end;
2717
            } else
2718
                stream_index = 0;
2719
        }
2720
        if (stream_index == start_index)
2721
            return;
2722
        st = ic->streams[stream_index];
2723
        if (st->codec->codec_type == codec_type) {
2724
            /* check that parameters are OK */
2725
            switch(codec_type) {
2726
            case AVMEDIA_TYPE_AUDIO:
2727
                if (st->codec->sample_rate != 0 &&
2728
                    st->codec->channels != 0)
2729
                    goto the_end;
2730
                break;
2731
            case AVMEDIA_TYPE_VIDEO:
2732
            case AVMEDIA_TYPE_SUBTITLE:
2733
                goto the_end;
2734
            default:
2735
                break;
2736
            }
2737
        }
2738
    }
2739
 the_end:
2740
    stream_component_close(is, start_index);
2741
    stream_component_open(is, stream_index);
2742
}
2743

    
2744

    
2745
static void toggle_full_screen(void)
2746
{
2747
    is_full_screen = !is_full_screen;
2748
    if (!fs_screen_width) {
2749
        /* use default SDL method */
2750
//        SDL_WM_ToggleFullScreen(screen);
2751
    }
2752
    video_open(cur_stream);
2753
}
2754

    
2755
static void toggle_pause(void)
2756
{
2757
    if (cur_stream)
2758
        stream_pause(cur_stream);
2759
    step = 0;
2760
}
2761

    
2762
static void step_to_next_frame(void)
2763
{
2764
    if (cur_stream) {
2765
        /* if the stream is paused unpause it, then step */
2766
        if (cur_stream->paused)
2767
            stream_pause(cur_stream);
2768
    }
2769
    step = 1;
2770
}
2771

    
2772
static void do_exit(void)
2773
{
2774
    int i;
2775
    if (cur_stream) {
2776
        stream_close(cur_stream);
2777
        cur_stream = NULL;
2778
    }
2779
    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2780
        av_free(avcodec_opts[i]);
2781
    av_free(avformat_opts);
2782
    av_free(sws_opts);
2783
#if CONFIG_AVFILTER
2784
    avfilter_uninit();
2785
#endif
2786
    if (show_status)
2787
        printf("\n");
2788
    SDL_Quit();
2789
    exit(0);
2790
}
2791

    
2792
static void toggle_audio_display(void)
2793
{
2794
    if (cur_stream) {
2795
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2796
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2797
        fill_rectangle(screen,
2798
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2799
                    bgcolor);
2800
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2801
    }
2802
}
2803

    
2804
/* handle an event sent by the GUI */
2805
static void event_loop(void)
2806
{
2807
    SDL_Event event;
2808
    double incr, pos, frac;
2809

    
2810
    for(;;) {
2811
        double x;
2812
        SDL_WaitEvent(&event);
2813
        switch(event.type) {
2814
        case SDL_KEYDOWN:
2815
            if (exit_on_keydown) {
2816
                do_exit();
2817
                break;
2818
            }
2819
            switch(event.key.keysym.sym) {
2820
            case SDLK_ESCAPE:
2821
            case SDLK_q:
2822
                do_exit();
2823
                break;
2824
            case SDLK_f:
2825
                toggle_full_screen();
2826
                break;
2827
            case SDLK_p:
2828
            case SDLK_SPACE:
2829
                toggle_pause();
2830
                break;
2831
            case SDLK_s: //S: Step to next frame
2832
                step_to_next_frame();
2833
                break;
2834
            case SDLK_a:
2835
                if (cur_stream)
2836
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2837
                break;
2838
            case SDLK_v:
2839
                if (cur_stream)
2840
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2841
                break;
2842
            case SDLK_t:
2843
                if (cur_stream)
2844
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2845
                break;
2846
            case SDLK_w:
2847
                toggle_audio_display();
2848
                break;
2849
            case SDLK_LEFT:
2850
                incr = -10.0;
2851
                goto do_seek;
2852
            case SDLK_RIGHT:
2853
                incr = 10.0;
2854
                goto do_seek;
2855
            case SDLK_UP:
2856
                incr = 60.0;
2857
                goto do_seek;
2858
            case SDLK_DOWN:
2859
                incr = -60.0;
2860
            do_seek:
2861
                if (cur_stream) {
2862
                    if (seek_by_bytes) {
2863
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2864
                            pos= cur_stream->video_current_pos;
2865
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2866
                            pos= cur_stream->audio_pkt.pos;
2867
                        }else
2868
                            pos = url_ftell(cur_stream->ic->pb);
2869
                        if (cur_stream->ic->bit_rate)
2870
                            incr *= cur_stream->ic->bit_rate / 8.0;
2871
                        else
2872
                            incr *= 180000.0;
2873
                        pos += incr;
2874
                        stream_seek(cur_stream, pos, incr, 1);
2875
                    } else {
2876
                        pos = get_master_clock(cur_stream);
2877
                        pos += incr;
2878
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2879
                    }
2880
                }
2881
                break;
2882
            default:
2883
                break;
2884
            }
2885
            break;
2886
        case SDL_MOUSEBUTTONDOWN:
2887
            if (exit_on_mousedown) {
2888
                do_exit();
2889
                break;
2890
            }
2891
        case SDL_MOUSEMOTION:
2892
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2893
                x= event.button.x;
2894
            }else{
2895
                if(event.motion.state != SDL_PRESSED)
2896
                    break;
2897
                x= event.motion.x;
2898
            }
2899
            if (cur_stream) {
2900
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2901
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2902
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2903
                }else{
2904
                    int64_t ts;
2905
                    int ns, hh, mm, ss;
2906
                    int tns, thh, tmm, tss;
2907
                    tns = cur_stream->ic->duration/1000000LL;
2908
                    thh = tns/3600;
2909
                    tmm = (tns%3600)/60;
2910
                    tss = (tns%60);
2911
                    frac = x/cur_stream->width;
2912
                    ns = frac*tns;
2913
                    hh = ns/3600;
2914
                    mm = (ns%3600)/60;
2915
                    ss = (ns%60);
2916
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2917
                            hh, mm, ss, thh, tmm, tss);
2918
                    ts = frac*cur_stream->ic->duration;
2919
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2920
                        ts += cur_stream->ic->start_time;
2921
                    stream_seek(cur_stream, ts, 0, 0);
2922
                }
2923
            }
2924
            break;
2925
        case SDL_VIDEORESIZE:
2926
            if (cur_stream) {
2927
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2928
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2929
                screen_width = cur_stream->width = event.resize.w;
2930
                screen_height= cur_stream->height= event.resize.h;
2931
            }
2932
            break;
2933
        case SDL_QUIT:
2934
        case FF_QUIT_EVENT:
2935
            do_exit();
2936
            break;
2937
        case FF_ALLOC_EVENT:
2938
            video_open(event.user.data1);
2939
            alloc_picture(event.user.data1);
2940
            break;
2941
        case FF_REFRESH_EVENT:
2942
            video_refresh_timer(event.user.data1);
2943
            cur_stream->refresh=0;
2944
            break;
2945
        default:
2946
            break;
2947
        }
2948
    }
2949
}
2950

    
2951
static void opt_frame_size(const char *arg)
2952
{
2953
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2954
        fprintf(stderr, "Incorrect frame size\n");
2955
        exit(1);
2956
    }
2957
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2958
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2959
        exit(1);
2960
    }
2961
}
2962

    
2963
static int opt_width(const char *opt, const char *arg)
2964
{
2965
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2966
    return 0;
2967
}
2968

    
2969
static int opt_height(const char *opt, const char *arg)
2970
{
2971
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2972
    return 0;
2973
}
2974

    
2975
static void opt_format(const char *arg)
2976
{
2977
    file_iformat = av_find_input_format(arg);
2978
    if (!file_iformat) {
2979
        fprintf(stderr, "Unknown input format: %s\n", arg);
2980
        exit(1);
2981
    }
2982
}
2983

    
2984
static void opt_frame_pix_fmt(const char *arg)
2985
{
2986
    frame_pix_fmt = av_get_pix_fmt(arg);
2987
}
2988

    
2989
static int opt_sync(const char *opt, const char *arg)
2990
{
2991
    if (!strcmp(arg, "audio"))
2992
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2993
    else if (!strcmp(arg, "video"))
2994
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2995
    else if (!strcmp(arg, "ext"))
2996
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2997
    else {
2998
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2999
        exit(1);
3000
    }
3001
    return 0;
3002
}
3003

    
3004
static int opt_seek(const char *opt, const char *arg)
3005
{
3006
    start_time = parse_time_or_die(opt, arg, 1);
3007
    return 0;
3008
}
3009

    
3010
static int opt_duration(const char *opt, const char *arg)
3011
{
3012
    duration = parse_time_or_die(opt, arg, 1);
3013
    return 0;
3014
}
3015

    
3016
static int opt_debug(const char *opt, const char *arg)
3017
{
3018
    av_log_set_level(99);
3019
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3020
    return 0;
3021
}
3022

    
3023
static int opt_vismv(const char *opt, const char *arg)
3024
{
3025
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3026
    return 0;
3027
}
3028

    
3029
static int opt_thread_count(const char *opt, const char *arg)
3030
{
3031
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3032
#if !HAVE_THREADS
3033
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3034
#endif
3035
    return 0;
3036
}
3037

    
3038
static const OptionDef options[] = {
3039
#include "cmdutils_common_opts.h"
3040
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3041
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3042
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3043
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3044
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3045
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3046
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3047
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3048
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3049
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3050
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3051
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3052
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3053
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3054
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3055
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3056
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3057
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3058
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3059
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3060
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3061
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3062
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3063
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3064
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3065
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3066
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3067
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3068
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3069
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3070
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3071
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3072
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3073
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3074
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3075
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3076
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3077
#if CONFIG_AVFILTER
3078
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3079
#endif
3080
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3081
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3082
    { NULL, },
3083
};
3084

    
3085
static void show_usage(void)
3086
{
3087
    printf("Simple media player\n");
3088
    printf("usage: ffplay [options] input_file\n");
3089
    printf("\n");
3090
}
3091

    
3092
static void show_help(void)
3093
{
3094
    show_usage();
3095
    show_help_options(options, "Main options:\n",
3096
                      OPT_EXPERT, 0);
3097
    show_help_options(options, "\nAdvanced options:\n",
3098
                      OPT_EXPERT, OPT_EXPERT);
3099
    printf("\nWhile playing:\n"
3100
           "q, ESC              quit\n"
3101
           "f                   toggle full screen\n"
3102
           "p, SPC              pause\n"
3103
           "a                   cycle audio channel\n"
3104
           "v                   cycle video channel\n"
3105
           "t                   cycle subtitle channel\n"
3106
           "w                   show audio waves\n"
3107
           "s                   activate frame-step mode\n"
3108
           "left/right          seek backward/forward 10 seconds\n"
3109
           "down/up             seek backward/forward 1 minute\n"
3110
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3111
           );
3112
}
3113

    
3114
static void opt_input_file(const char *filename)
3115
{
3116
    if (input_filename) {
3117
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3118
                filename, input_filename);
3119
        exit(1);
3120
    }
3121
    if (!strcmp(filename, "-"))
3122
        filename = "pipe:";
3123
    input_filename = filename;
3124
}
3125

    
3126
/* Called from the main */
3127
int main(int argc, char **argv)
3128
{
3129
    int flags, i;
3130

    
3131
    /* register all codecs, demux and protocols */
3132
    avcodec_register_all();
3133
#if CONFIG_AVDEVICE
3134
    avdevice_register_all();
3135
#endif
3136
#if CONFIG_AVFILTER
3137
    avfilter_register_all();
3138
#endif
3139
    av_register_all();
3140

    
3141
    for(i=0; i<AVMEDIA_TYPE_NB; i++){
3142
        avcodec_opts[i]= avcodec_alloc_context2(i);
3143
    }
3144
    avformat_opts = avformat_alloc_context();
3145
#if !CONFIG_AVFILTER
3146
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3147
#endif
3148

    
3149
    show_banner();
3150

    
3151
    parse_options(argc, argv, options, opt_input_file);
3152

    
3153
    if (!input_filename) {
3154
        show_usage();
3155
        fprintf(stderr, "An input file must be specified\n");
3156
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3157
        exit(1);
3158
    }
3159

    
3160
    if (display_disable) {
3161
        video_disable = 1;
3162
    }
3163
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3164
#if !defined(__MINGW32__) && !defined(__APPLE__)
3165
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3166
#endif
3167
    if (SDL_Init (flags)) {
3168
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3169
        exit(1);
3170
    }
3171

    
3172
    if (!display_disable) {
3173
#if HAVE_SDL_VIDEO_SIZE
3174
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3175
        fs_screen_width = vi->current_w;
3176
        fs_screen_height = vi->current_h;
3177
#endif
3178
    }
3179

    
3180
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3181
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3182
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3183

    
3184
    av_init_packet(&flush_pkt);
3185
    flush_pkt.data= "FLUSH";
3186

    
3187
    cur_stream = stream_open(input_filename, file_iformat);
3188

    
3189
    event_loop();
3190

    
3191
    /* never returns */
3192

    
3193
    return 0;
3194
}