Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ cfb7e6e6

History | View | Annotate | Download (99 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <inttypes.h>
24
#include <math.h>
25
#include <limits.h>
26
#include "libavutil/avstring.h"
27
#include "libavutil/colorspace.h"
28
#include "libavutil/pixdesc.h"
29
#include "libavformat/avformat.h"
30
#include "libavdevice/avdevice.h"
31
#include "libswscale/swscale.h"
32
#include "libavcodec/audioconvert.h"
33
#include "libavcodec/opt.h"
34
#include "libavcodec/avfft.h"
35

    
36
#if CONFIG_AVFILTER
37
# include "libavfilter/avfilter.h"
38
# include "libavfilter/avfiltergraph.h"
39
# include "libavfilter/graphparser.h"
40
#endif
41

    
42
#include "cmdutils.h"
43

    
44
#include <SDL.h>
45
#include <SDL_thread.h>
46

    
47
#ifdef __MINGW32__
48
#undef main /* We don't want SDL to override our main() */
49
#endif
50

    
51
#include <unistd.h>
52
#include <assert.h>
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
#define FRAME_SKIP_FACTOR 0.05
73

    
74
/* maximum audio speed change to get correct sync */
75
#define SAMPLE_CORRECTION_PERCENT_MAX 10
76

    
77
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78
#define AUDIO_DIFF_AVG_NB   20
79

    
80
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81
#define SAMPLE_ARRAY_SIZE (2*65536)
82

    
83
static int sws_flags = SWS_BICUBIC;
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 2
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100
    int64_t pos;                                 ///<byte position in file
101
    SDL_Overlay *bmp;
102
    int width, height; /* source height & width */
103
    int allocated;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    SDL_Thread *refresh_tid;
126
    AVInputFormat *iformat;
127
    int no_background;
128
    int abort_request;
129
    int paused;
130
    int last_paused;
131
    int seek_req;
132
    int seek_flags;
133
    int64_t seek_pos;
134
    int64_t seek_rel;
135
    int read_pause_return;
136
    AVFormatContext *ic;
137
    int dtg_active_format;
138

    
139
    int audio_stream;
140

    
141
    int av_sync_type;
142
    double external_clock; /* external clock base */
143
    int64_t external_clock_time;
144

    
145
    double audio_clock;
146
    double audio_diff_cum; /* used for AV difference average computation */
147
    double audio_diff_avg_coef;
148
    double audio_diff_threshold;
149
    int audio_diff_avg_count;
150
    AVStream *audio_st;
151
    PacketQueue audioq;
152
    int audio_hw_buf_size;
153
    /* samples output by the codec. we reserve more space for avsync
154
       compensation */
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157
    uint8_t *audio_buf;
158
    unsigned int audio_buf_size; /* in bytes */
159
    int audio_buf_index; /* in bytes */
160
    AVPacket audio_pkt_temp;
161
    AVPacket audio_pkt;
162
    enum SampleFormat audio_src_fmt;
163
    AVAudioConvert *reformat_ctx;
164

    
165
    int show_audio; /* if true, display audio samples */
166
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
167
    int sample_array_index;
168
    int last_i_start;
169
    RDFTContext *rdft;
170
    int rdft_bits;
171
    FFTSample *rdft_data;
172
    int xpos;
173

    
174
    SDL_Thread *subtitle_tid;
175
    int subtitle_stream;
176
    int subtitle_stream_changed;
177
    AVStream *subtitle_st;
178
    PacketQueue subtitleq;
179
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180
    int subpq_size, subpq_rindex, subpq_windex;
181
    SDL_mutex *subpq_mutex;
182
    SDL_cond *subpq_cond;
183

    
184
    double frame_timer;
185
    double frame_last_pts;
186
    double frame_last_delay;
187
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
188
    int video_stream;
189
    AVStream *video_st;
190
    PacketQueue videoq;
191
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
192
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193
    int64_t video_current_pos;                   ///<current displayed file pos
194
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195
    int pictq_size, pictq_rindex, pictq_windex;
196
    SDL_mutex *pictq_mutex;
197
    SDL_cond *pictq_cond;
198
#if !CONFIG_AVFILTER
199
    struct SwsContext *img_convert_ctx;
200
#endif
201

    
202
    //    QETimer *video_timer;
203
    char filename[1024];
204
    int width, height, xleft, ytop;
205

    
206
    int64_t faulty_pts;
207
    int64_t faulty_dts;
208
    int64_t last_dts_for_fault_detection;
209
    int64_t last_pts_for_fault_detection;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int loop=1;
264
static int framedrop=1;
265

    
266
static int rdftspeed=20;
267
#if CONFIG_AVFILTER
268
static char *vfilters = NULL;
269
#endif
270

    
271
/* current context */
272
static int is_full_screen;
273
static VideoState *cur_stream;
274
static int64_t audio_callback_time;
275

    
276
static AVPacket flush_pkt;
277

    
278
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
279
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
280
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
281

    
282
static SDL_Surface *screen;
283

    
284
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
285

    
286
/* packet queue handling */
287
static void packet_queue_init(PacketQueue *q)
288
{
289
    memset(q, 0, sizeof(PacketQueue));
290
    q->mutex = SDL_CreateMutex();
291
    q->cond = SDL_CreateCond();
292
    packet_queue_put(q, &flush_pkt);
293
}
294

    
295
static void packet_queue_flush(PacketQueue *q)
296
{
297
    AVPacketList *pkt, *pkt1;
298

    
299
    SDL_LockMutex(q->mutex);
300
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
301
        pkt1 = pkt->next;
302
        av_free_packet(&pkt->pkt);
303
        av_freep(&pkt);
304
    }
305
    q->last_pkt = NULL;
306
    q->first_pkt = NULL;
307
    q->nb_packets = 0;
308
    q->size = 0;
309
    SDL_UnlockMutex(q->mutex);
310
}
311

    
312
static void packet_queue_end(PacketQueue *q)
313
{
314
    packet_queue_flush(q);
315
    SDL_DestroyMutex(q->mutex);
316
    SDL_DestroyCond(q->cond);
317
}
318

    
319
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
320
{
321
    AVPacketList *pkt1;
322

    
323
    /* duplicate the packet */
324
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
325
        return -1;
326

    
327
    pkt1 = av_malloc(sizeof(AVPacketList));
328
    if (!pkt1)
329
        return -1;
330
    pkt1->pkt = *pkt;
331
    pkt1->next = NULL;
332

    
333

    
334
    SDL_LockMutex(q->mutex);
335

    
336
    if (!q->last_pkt)
337

    
338
        q->first_pkt = pkt1;
339
    else
340
        q->last_pkt->next = pkt1;
341
    q->last_pkt = pkt1;
342
    q->nb_packets++;
343
    q->size += pkt1->pkt.size + sizeof(*pkt1);
344
    /* XXX: should duplicate packet data in DV case */
345
    SDL_CondSignal(q->cond);
346

    
347
    SDL_UnlockMutex(q->mutex);
348
    return 0;
349
}
350

    
351
static void packet_queue_abort(PacketQueue *q)
352
{
353
    SDL_LockMutex(q->mutex);
354

    
355
    q->abort_request = 1;
356

    
357
    SDL_CondSignal(q->cond);
358

    
359
    SDL_UnlockMutex(q->mutex);
360
}
361

    
362
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364
{
365
    AVPacketList *pkt1;
366
    int ret;
367

    
368
    SDL_LockMutex(q->mutex);
369

    
370
    for(;;) {
371
        if (q->abort_request) {
372
            ret = -1;
373
            break;
374
        }
375

    
376
        pkt1 = q->first_pkt;
377
        if (pkt1) {
378
            q->first_pkt = pkt1->next;
379
            if (!q->first_pkt)
380
                q->last_pkt = NULL;
381
            q->nb_packets--;
382
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
383
            *pkt = pkt1->pkt;
384
            av_free(pkt1);
385
            ret = 1;
386
            break;
387
        } else if (!block) {
388
            ret = 0;
389
            break;
390
        } else {
391
            SDL_CondWait(q->cond, q->mutex);
392
        }
393
    }
394
    SDL_UnlockMutex(q->mutex);
395
    return ret;
396
}
397

    
398
static inline void fill_rectangle(SDL_Surface *screen,
399
                                  int x, int y, int w, int h, int color)
400
{
401
    SDL_Rect rect;
402
    rect.x = x;
403
    rect.y = y;
404
    rect.w = w;
405
    rect.h = h;
406
    SDL_FillRect(screen, &rect, color);
407
}
408

    
409
#if 0
410
/* draw only the border of a rectangle */
411
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412
{
413
    int w1, w2, h1, h2;
414

415
    /* fill the background */
416
    w1 = x;
417
    if (w1 < 0)
418
        w1 = 0;
419
    w2 = s->width - (x + w);
420
    if (w2 < 0)
421
        w2 = 0;
422
    h1 = y;
423
    if (h1 < 0)
424
        h1 = 0;
425
    h2 = s->height - (y + h);
426
    if (h2 < 0)
427
        h2 = 0;
428
    fill_rectangle(screen,
429
                   s->xleft, s->ytop,
430
                   w1, s->height,
431
                   color);
432
    fill_rectangle(screen,
433
                   s->xleft + s->width - w2, s->ytop,
434
                   w2, s->height,
435
                   color);
436
    fill_rectangle(screen,
437
                   s->xleft + w1, s->ytop,
438
                   s->width - w1 - w2, h1,
439
                   color);
440
    fill_rectangle(screen,
441
                   s->xleft + w1, s->ytop + s->height - h2,
442
                   s->width - w1 - w2, h2,
443
                   color);
444
}
445
#endif
446

    
447
#define ALPHA_BLEND(a, oldp, newp, s)\
448
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449

    
450
#define RGBA_IN(r, g, b, a, s)\
451
{\
452
    unsigned int v = ((const uint32_t *)(s))[0];\
453
    a = (v >> 24) & 0xff;\
454
    r = (v >> 16) & 0xff;\
455
    g = (v >> 8) & 0xff;\
456
    b = v & 0xff;\
457
}
458

    
459
#define YUVA_IN(y, u, v, a, s, pal)\
460
{\
461
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462
    a = (val >> 24) & 0xff;\
463
    y = (val >> 16) & 0xff;\
464
    u = (val >> 8) & 0xff;\
465
    v = val & 0xff;\
466
}
467

    
468
#define YUVA_OUT(d, y, u, v, a)\
469
{\
470
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471
}
472

    
473

    
474
#define BPP 1
475

    
476
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477
{
478
    int wrap, wrap3, width2, skip2;
479
    int y, u, v, a, u1, v1, a1, w, h;
480
    uint8_t *lum, *cb, *cr;
481
    const uint8_t *p;
482
    const uint32_t *pal;
483
    int dstx, dsty, dstw, dsth;
484

    
485
    dstw = av_clip(rect->w, 0, imgw);
486
    dsth = av_clip(rect->h, 0, imgh);
487
    dstx = av_clip(rect->x, 0, imgw - dstw);
488
    dsty = av_clip(rect->y, 0, imgh - dsth);
489
    lum = dst->data[0] + dsty * dst->linesize[0];
490
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492

    
493
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494
    skip2 = dstx >> 1;
495
    wrap = dst->linesize[0];
496
    wrap3 = rect->pict.linesize[0];
497
    p = rect->pict.data[0];
498
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499

    
500
    if (dsty & 1) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510
            cb++;
511
            cr++;
512
            lum++;
513
            p += BPP;
514
        }
515
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 = u;
518
            v1 = v;
519
            a1 = a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521

    
522
            YUVA_IN(y, u, v, a, p + BPP, pal);
523
            u1 += u;
524
            v1 += v;
525
            a1 += a;
526
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529
            cb++;
530
            cr++;
531
            p += 2 * BPP;
532
            lum += 2;
533
        }
534
        if (w) {
535
            YUVA_IN(y, u, v, a, p, pal);
536
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539
            p++;
540
            lum++;
541
        }
542
        p += wrap3 - dstw * BPP;
543
        lum += wrap - dstw - dstx;
544
        cb += dst->linesize[1] - width2 - skip2;
545
        cr += dst->linesize[2] - width2 - skip2;
546
    }
547
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548
        lum += dstx;
549
        cb += skip2;
550
        cr += skip2;
551

    
552
        if (dstx & 1) {
553
            YUVA_IN(y, u, v, a, p, pal);
554
            u1 = u;
555
            v1 = v;
556
            a1 = a;
557
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558
            p += wrap3;
559
            lum += wrap;
560
            YUVA_IN(y, u, v, a, p, pal);
561
            u1 += u;
562
            v1 += v;
563
            a1 += a;
564
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567
            cb++;
568
            cr++;
569
            p += -wrap3 + BPP;
570
            lum += -wrap + 1;
571
        }
572
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573
            YUVA_IN(y, u, v, a, p, pal);
574
            u1 = u;
575
            v1 = v;
576
            a1 = a;
577
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578

    
579
            YUVA_IN(y, u, v, a, p + BPP, pal);
580
            u1 += u;
581
            v1 += v;
582
            a1 += a;
583
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584
            p += wrap3;
585
            lum += wrap;
586

    
587
            YUVA_IN(y, u, v, a, p, pal);
588
            u1 += u;
589
            v1 += v;
590
            a1 += a;
591
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592

    
593
            YUVA_IN(y, u, v, a, p + BPP, pal);
594
            u1 += u;
595
            v1 += v;
596
            a1 += a;
597
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598

    
599
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601

    
602
            cb++;
603
            cr++;
604
            p += -wrap3 + 2 * BPP;
605
            lum += -wrap + 2;
606
        }
607
        if (w) {
608
            YUVA_IN(y, u, v, a, p, pal);
609
            u1 = u;
610
            v1 = v;
611
            a1 = a;
612
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
            p += wrap3;
614
            lum += wrap;
615
            YUVA_IN(y, u, v, a, p, pal);
616
            u1 += u;
617
            v1 += v;
618
            a1 += a;
619
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622
            cb++;
623
            cr++;
624
            p += -wrap3 + BPP;
625
            lum += -wrap + 1;
626
        }
627
        p += wrap3 + (wrap3 - dstw * BPP);
628
        lum += wrap + (wrap - dstw - dstx);
629
        cb += dst->linesize[1] - width2 - skip2;
630
        cr += dst->linesize[2] - width2 - skip2;
631
    }
632
    /* handle odd height */
633
    if (h) {
634
        lum += dstx;
635
        cb += skip2;
636
        cr += skip2;
637

    
638
        if (dstx & 1) {
639
            YUVA_IN(y, u, v, a, p, pal);
640
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643
            cb++;
644
            cr++;
645
            lum++;
646
            p += BPP;
647
        }
648
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649
            YUVA_IN(y, u, v, a, p, pal);
650
            u1 = u;
651
            v1 = v;
652
            a1 = a;
653
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654

    
655
            YUVA_IN(y, u, v, a, p + BPP, pal);
656
            u1 += u;
657
            v1 += v;
658
            a1 += a;
659
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662
            cb++;
663
            cr++;
664
            p += 2 * BPP;
665
            lum += 2;
666
        }
667
        if (w) {
668
            YUVA_IN(y, u, v, a, p, pal);
669
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672
        }
673
    }
674
}
675

    
676
static void free_subpicture(SubPicture *sp)
677
{
678
    int i;
679

    
680
    for (i = 0; i < sp->sub.num_rects; i++)
681
    {
682
        av_freep(&sp->sub.rects[i]->pict.data[0]);
683
        av_freep(&sp->sub.rects[i]->pict.data[1]);
684
        av_freep(&sp->sub.rects[i]);
685
    }
686

    
687
    av_free(sp->sub.rects);
688

    
689
    memset(&sp->sub, 0, sizeof(AVSubtitle));
690
}
691

    
692
static void video_image_display(VideoState *is)
693
{
694
    VideoPicture *vp;
695
    SubPicture *sp;
696
    AVPicture pict;
697
    float aspect_ratio;
698
    int width, height, x, y;
699
    SDL_Rect rect;
700
    int i;
701

    
702
    vp = &is->pictq[is->pictq_rindex];
703
    if (vp->bmp) {
704
#if CONFIG_AVFILTER
705
         if (vp->picref->pixel_aspect.num == 0)
706
             aspect_ratio = 0;
707
         else
708
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
709
#else
710

    
711
        /* XXX: use variable in the frame */
712
        if (is->video_st->sample_aspect_ratio.num)
713
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
714
        else if (is->video_st->codec->sample_aspect_ratio.num)
715
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
716
        else
717
            aspect_ratio = 0;
718
#endif
719
        if (aspect_ratio <= 0.0)
720
            aspect_ratio = 1.0;
721
        aspect_ratio *= (float)vp->width / (float)vp->height;
722
        /* if an active format is indicated, then it overrides the
723
           mpeg format */
724
#if 0
725
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
726
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
727
            printf("dtg_active_format=%d\n", is->dtg_active_format);
728
        }
729
#endif
730
#if 0
731
        switch(is->video_st->codec->dtg_active_format) {
732
        case FF_DTG_AFD_SAME:
733
        default:
734
            /* nothing to do */
735
            break;
736
        case FF_DTG_AFD_4_3:
737
            aspect_ratio = 4.0 / 3.0;
738
            break;
739
        case FF_DTG_AFD_16_9:
740
            aspect_ratio = 16.0 / 9.0;
741
            break;
742
        case FF_DTG_AFD_14_9:
743
            aspect_ratio = 14.0 / 9.0;
744
            break;
745
        case FF_DTG_AFD_4_3_SP_14_9:
746
            aspect_ratio = 14.0 / 9.0;
747
            break;
748
        case FF_DTG_AFD_16_9_SP_14_9:
749
            aspect_ratio = 14.0 / 9.0;
750
            break;
751
        case FF_DTG_AFD_SP_4_3:
752
            aspect_ratio = 4.0 / 3.0;
753
            break;
754
        }
755
#endif
756

    
757
        if (is->subtitle_st)
758
        {
759
            if (is->subpq_size > 0)
760
            {
761
                sp = &is->subpq[is->subpq_rindex];
762

    
763
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
764
                {
765
                    SDL_LockYUVOverlay (vp->bmp);
766

    
767
                    pict.data[0] = vp->bmp->pixels[0];
768
                    pict.data[1] = vp->bmp->pixels[2];
769
                    pict.data[2] = vp->bmp->pixels[1];
770

    
771
                    pict.linesize[0] = vp->bmp->pitches[0];
772
                    pict.linesize[1] = vp->bmp->pitches[2];
773
                    pict.linesize[2] = vp->bmp->pitches[1];
774

    
775
                    for (i = 0; i < sp->sub.num_rects; i++)
776
                        blend_subrect(&pict, sp->sub.rects[i],
777
                                      vp->bmp->w, vp->bmp->h);
778

    
779
                    SDL_UnlockYUVOverlay (vp->bmp);
780
                }
781
            }
782
        }
783

    
784

    
785
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
786
        height = is->height;
787
        width = ((int)rint(height * aspect_ratio)) & ~1;
788
        if (width > is->width) {
789
            width = is->width;
790
            height = ((int)rint(width / aspect_ratio)) & ~1;
791
        }
792
        x = (is->width - width) / 2;
793
        y = (is->height - height) / 2;
794
        if (!is->no_background) {
795
            /* fill the background */
796
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
797
        } else {
798
            is->no_background = 0;
799
        }
800
        rect.x = is->xleft + x;
801
        rect.y = is->ytop  + y;
802
        rect.w = width;
803
        rect.h = height;
804
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
805
    } else {
806
#if 0
807
        fill_rectangle(screen,
808
                       is->xleft, is->ytop, is->width, is->height,
809
                       QERGB(0x00, 0x00, 0x00));
810
#endif
811
    }
812
}
813

    
814
static inline int compute_mod(int a, int b)
815
{
816
    a = a % b;
817
    if (a >= 0)
818
        return a;
819
    else
820
        return a + b;
821
}
822

    
823
static void video_audio_display(VideoState *s)
824
{
825
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
826
    int ch, channels, h, h2, bgcolor, fgcolor;
827
    int16_t time_diff;
828
    int rdft_bits, nb_freq;
829

    
830
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
831
        ;
832
    nb_freq= 1<<(rdft_bits-1);
833

    
834
    /* compute display index : center on currently output samples */
835
    channels = s->audio_st->codec->channels;
836
    nb_display_channels = channels;
837
    if (!s->paused) {
838
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
839
        n = 2 * channels;
840
        delay = audio_write_get_buf_size(s);
841
        delay /= n;
842

    
843
        /* to be more precise, we take into account the time spent since
844
           the last buffer computation */
845
        if (audio_callback_time) {
846
            time_diff = av_gettime() - audio_callback_time;
847
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
848
        }
849

    
850
        delay += 2*data_used;
851
        if (delay < data_used)
852
            delay = data_used;
853

    
854
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
855
        if(s->show_audio==1){
856
            h= INT_MIN;
857
            for(i=0; i<1000; i+=channels){
858
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
859
                int a= s->sample_array[idx];
860
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
861
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
862
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
863
                int score= a-d;
864
                if(h<score && (b^c)<0){
865
                    h= score;
866
                    i_start= idx;
867
                }
868
            }
869
        }
870

    
871
        s->last_i_start = i_start;
872
    } else {
873
        i_start = s->last_i_start;
874
    }
875

    
876
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
877
    if(s->show_audio==1){
878
        fill_rectangle(screen,
879
                       s->xleft, s->ytop, s->width, s->height,
880
                       bgcolor);
881

    
882
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
883

    
884
        /* total height for one channel */
885
        h = s->height / nb_display_channels;
886
        /* graph height / 2 */
887
        h2 = (h * 9) / 20;
888
        for(ch = 0;ch < nb_display_channels; ch++) {
889
            i = i_start + ch;
890
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
891
            for(x = 0; x < s->width; x++) {
892
                y = (s->sample_array[i] * h2) >> 15;
893
                if (y < 0) {
894
                    y = -y;
895
                    ys = y1 - y;
896
                } else {
897
                    ys = y1;
898
                }
899
                fill_rectangle(screen,
900
                               s->xleft + x, ys, 1, y,
901
                               fgcolor);
902
                i += channels;
903
                if (i >= SAMPLE_ARRAY_SIZE)
904
                    i -= SAMPLE_ARRAY_SIZE;
905
            }
906
        }
907

    
908
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
909

    
910
        for(ch = 1;ch < nb_display_channels; ch++) {
911
            y = s->ytop + ch * h;
912
            fill_rectangle(screen,
913
                           s->xleft, y, s->width, 1,
914
                           fgcolor);
915
        }
916
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
917
    }else{
918
        nb_display_channels= FFMIN(nb_display_channels, 2);
919
        if(rdft_bits != s->rdft_bits){
920
            av_rdft_end(s->rdft);
921
            av_free(s->rdft_data);
922
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
923
            s->rdft_bits= rdft_bits;
924
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
925
        }
926
        {
927
            FFTSample *data[2];
928
            for(ch = 0;ch < nb_display_channels; ch++) {
929
                data[ch] = s->rdft_data + 2*nb_freq*ch;
930
                i = i_start + ch;
931
                for(x = 0; x < 2*nb_freq; x++) {
932
                    double w= (x-nb_freq)*(1.0/nb_freq);
933
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
934
                    i += channels;
935
                    if (i >= SAMPLE_ARRAY_SIZE)
936
                        i -= SAMPLE_ARRAY_SIZE;
937
                }
938
                av_rdft_calc(s->rdft, data[ch]);
939
            }
940
            //least efficient way to do this, we should of course directly access it but its more than fast enough
941
            for(y=0; y<s->height; y++){
942
                double w= 1/sqrt(nb_freq);
943
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
944
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
945
                a= FFMIN(a,255);
946
                b= FFMIN(b,255);
947
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
948

    
949
                fill_rectangle(screen,
950
                            s->xpos, s->height-y, 1, 1,
951
                            fgcolor);
952
            }
953
        }
954
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
955
        s->xpos++;
956
        if(s->xpos >= s->width)
957
            s->xpos= s->xleft;
958
    }
959
}
960

    
961
static int video_open(VideoState *is){
962
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
963
    int w,h;
964

    
965
    if(is_full_screen) flags |= SDL_FULLSCREEN;
966
    else               flags |= SDL_RESIZABLE;
967

    
968
    if (is_full_screen && fs_screen_width) {
969
        w = fs_screen_width;
970
        h = fs_screen_height;
971
    } else if(!is_full_screen && screen_width){
972
        w = screen_width;
973
        h = screen_height;
974
#if CONFIG_AVFILTER
975
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
976
        w = is->out_video_filter->inputs[0]->w;
977
        h = is->out_video_filter->inputs[0]->h;
978
#else
979
    }else if (is->video_st && is->video_st->codec->width){
980
        w = is->video_st->codec->width;
981
        h = is->video_st->codec->height;
982
#endif
983
    } else {
984
        w = 640;
985
        h = 480;
986
    }
987
    if(screen && is->width == screen->w && screen->w == w
988
       && is->height== screen->h && screen->h == h)
989
        return 0;
990

    
991
#ifndef __APPLE__
992
    screen = SDL_SetVideoMode(w, h, 0, flags);
993
#else
994
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
995
    screen = SDL_SetVideoMode(w, h, 24, flags);
996
#endif
997
    if (!screen) {
998
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
999
        return -1;
1000
    }
1001
    if (!window_title)
1002
        window_title = input_filename;
1003
    SDL_WM_SetCaption(window_title, window_title);
1004

    
1005
    is->width = screen->w;
1006
    is->height = screen->h;
1007

    
1008
    return 0;
1009
}
1010

    
1011
/* display the current picture, if any */
1012
static void video_display(VideoState *is)
1013
{
1014
    if(!screen)
1015
        video_open(cur_stream);
1016
    if (is->audio_st && is->show_audio)
1017
        video_audio_display(is);
1018
    else if (is->video_st)
1019
        video_image_display(is);
1020
}
1021

    
1022
static int refresh_thread(void *opaque)
1023
{
1024
    VideoState *is= opaque;
1025
    while(!is->abort_request){
1026
    SDL_Event event;
1027
    event.type = FF_REFRESH_EVENT;
1028
    event.user.data1 = opaque;
1029
        if(!is->refresh){
1030
            is->refresh=1;
1031
    SDL_PushEvent(&event);
1032
        }
1033
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1034
    }
1035
    return 0;
1036
}
1037

    
1038
/* get the current audio clock value */
1039
static double get_audio_clock(VideoState *is)
1040
{
1041
    double pts;
1042
    int hw_buf_size, bytes_per_sec;
1043
    pts = is->audio_clock;
1044
    hw_buf_size = audio_write_get_buf_size(is);
1045
    bytes_per_sec = 0;
1046
    if (is->audio_st) {
1047
        bytes_per_sec = is->audio_st->codec->sample_rate *
1048
            2 * is->audio_st->codec->channels;
1049
    }
1050
    if (bytes_per_sec)
1051
        pts -= (double)hw_buf_size / bytes_per_sec;
1052
    return pts;
1053
}
1054

    
1055
/* get the current video clock value */
1056
static double get_video_clock(VideoState *is)
1057
{
1058
    if (is->paused) {
1059
        return is->video_current_pts;
1060
    } else {
1061
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1062
    }
1063
}
1064

    
1065
/* get the current external clock value */
1066
static double get_external_clock(VideoState *is)
1067
{
1068
    int64_t ti;
1069
    ti = av_gettime();
1070
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1071
}
1072

    
1073
/* get the current master clock value */
1074
static double get_master_clock(VideoState *is)
1075
{
1076
    double val;
1077

    
1078
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1079
        if (is->video_st)
1080
            val = get_video_clock(is);
1081
        else
1082
            val = get_audio_clock(is);
1083
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1084
        if (is->audio_st)
1085
            val = get_audio_clock(is);
1086
        else
1087
            val = get_video_clock(is);
1088
    } else {
1089
        val = get_external_clock(is);
1090
    }
1091
    return val;
1092
}
1093

    
1094
/* seek in the stream */
1095
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1096
{
1097
    if (!is->seek_req) {
1098
        is->seek_pos = pos;
1099
        is->seek_rel = rel;
1100
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1101
        if (seek_by_bytes)
1102
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1103
        is->seek_req = 1;
1104
    }
1105
}
1106

    
1107
/* pause or resume the video */
1108
static void stream_pause(VideoState *is)
1109
{
1110
    if (is->paused) {
1111
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1112
        if(is->read_pause_return != AVERROR(ENOSYS)){
1113
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1114
        }
1115
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1116
    }
1117
    is->paused = !is->paused;
1118
}
1119

    
1120
static double compute_target_time(double frame_current_pts, VideoState *is)
1121
{
1122
    double delay, sync_threshold, diff;
1123

    
1124
    /* compute nominal delay */
1125
    delay = frame_current_pts - is->frame_last_pts;
1126
    if (delay <= 0 || delay >= 10.0) {
1127
        /* if incorrect delay, use previous one */
1128
        delay = is->frame_last_delay;
1129
    } else {
1130
        is->frame_last_delay = delay;
1131
    }
1132
    is->frame_last_pts = frame_current_pts;
1133

    
1134
    /* update delay to follow master synchronisation source */
1135
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1136
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1137
        /* if video is slave, we try to correct big delays by
1138
           duplicating or deleting a frame */
1139
        diff = get_video_clock(is) - get_master_clock(is);
1140

    
1141
        /* skip or repeat frame. We take into account the
1142
           delay to compute the threshold. I still don't know
1143
           if it is the best guess */
1144
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1145
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1146
            if (diff <= -sync_threshold)
1147
                delay = 0;
1148
            else if (diff >= sync_threshold)
1149
                delay = 2 * delay;
1150
        }
1151
    }
1152
    is->frame_timer += delay;
1153
#if defined(DEBUG_SYNC)
1154
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1155
            delay, actual_delay, frame_current_pts, -diff);
1156
#endif
1157

    
1158
    return is->frame_timer;
1159
}
1160

    
1161
/* called to display each frame */
1162
static void video_refresh_timer(void *opaque)
1163
{
1164
    VideoState *is = opaque;
1165
    VideoPicture *vp;
1166

    
1167
    SubPicture *sp, *sp2;
1168

    
1169
    if (is->video_st) {
1170
retry:
1171
        if (is->pictq_size == 0) {
1172
            //nothing to do, no picture to display in the que
1173
        } else {
1174
            double time= av_gettime()/1000000.0;
1175
            double next_target;
1176
            /* dequeue the picture */
1177
            vp = &is->pictq[is->pictq_rindex];
1178

    
1179
            if(time < vp->target_clock)
1180
                return;
1181
            /* update current video pts */
1182
            is->video_current_pts = vp->pts;
1183
            is->video_current_pts_drift = is->video_current_pts - time;
1184
            is->video_current_pos = vp->pos;
1185
            if(is->pictq_size > 1){
1186
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1187
                assert(nextvp->target_clock >= vp->target_clock);
1188
                next_target= nextvp->target_clock;
1189
            }else{
1190
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1191
            }
1192
            if(framedrop && time > next_target){
1193
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1194
                if(is->pictq_size > 1 || time > next_target + 0.5){
1195
                    /* update queue size and signal for next picture */
1196
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1197
                        is->pictq_rindex = 0;
1198

    
1199
                    SDL_LockMutex(is->pictq_mutex);
1200
                    is->pictq_size--;
1201
                    SDL_CondSignal(is->pictq_cond);
1202
                    SDL_UnlockMutex(is->pictq_mutex);
1203
                    goto retry;
1204
                }
1205
            }
1206

    
1207
            if(is->subtitle_st) {
1208
                if (is->subtitle_stream_changed) {
1209
                    SDL_LockMutex(is->subpq_mutex);
1210

    
1211
                    while (is->subpq_size) {
1212
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1213

    
1214
                        /* update queue size and signal for next picture */
1215
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1216
                            is->subpq_rindex = 0;
1217

    
1218
                        is->subpq_size--;
1219
                    }
1220
                    is->subtitle_stream_changed = 0;
1221

    
1222
                    SDL_CondSignal(is->subpq_cond);
1223
                    SDL_UnlockMutex(is->subpq_mutex);
1224
                } else {
1225
                    if (is->subpq_size > 0) {
1226
                        sp = &is->subpq[is->subpq_rindex];
1227

    
1228
                        if (is->subpq_size > 1)
1229
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1230
                        else
1231
                            sp2 = NULL;
1232

    
1233
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1234
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1235
                        {
1236
                            free_subpicture(sp);
1237

    
1238
                            /* update queue size and signal for next picture */
1239
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1240
                                is->subpq_rindex = 0;
1241

    
1242
                            SDL_LockMutex(is->subpq_mutex);
1243
                            is->subpq_size--;
1244
                            SDL_CondSignal(is->subpq_cond);
1245
                            SDL_UnlockMutex(is->subpq_mutex);
1246
                        }
1247
                    }
1248
                }
1249
            }
1250

    
1251
            /* display picture */
1252
            video_display(is);
1253

    
1254
            /* update queue size and signal for next picture */
1255
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1256
                is->pictq_rindex = 0;
1257

    
1258
            SDL_LockMutex(is->pictq_mutex);
1259
            is->pictq_size--;
1260
            SDL_CondSignal(is->pictq_cond);
1261
            SDL_UnlockMutex(is->pictq_mutex);
1262
        }
1263
    } else if (is->audio_st) {
1264
        /* draw the next audio frame */
1265

    
1266
        /* if only audio stream, then display the audio bars (better
1267
           than nothing, just to test the implementation */
1268

    
1269
        /* display picture */
1270
        video_display(is);
1271
    }
1272
    if (show_status) {
1273
        static int64_t last_time;
1274
        int64_t cur_time;
1275
        int aqsize, vqsize, sqsize;
1276
        double av_diff;
1277

    
1278
        cur_time = av_gettime();
1279
        if (!last_time || (cur_time - last_time) >= 30000) {
1280
            aqsize = 0;
1281
            vqsize = 0;
1282
            sqsize = 0;
1283
            if (is->audio_st)
1284
                aqsize = is->audioq.size;
1285
            if (is->video_st)
1286
                vqsize = is->videoq.size;
1287
            if (is->subtitle_st)
1288
                sqsize = is->subtitleq.size;
1289
            av_diff = 0;
1290
            if (is->audio_st && is->video_st)
1291
                av_diff = get_audio_clock(is) - get_video_clock(is);
1292
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1293
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1294
            fflush(stdout);
1295
            last_time = cur_time;
1296
        }
1297
    }
1298
}
1299

    
1300
/* allocate a picture (needs to do that in main thread to avoid
1301
   potential locking problems */
1302
static void alloc_picture(void *opaque)
1303
{
1304
    VideoState *is = opaque;
1305
    VideoPicture *vp;
1306

    
1307
    vp = &is->pictq[is->pictq_windex];
1308

    
1309
    if (vp->bmp)
1310
        SDL_FreeYUVOverlay(vp->bmp);
1311

    
1312
#if CONFIG_AVFILTER
1313
    if (vp->picref)
1314
        avfilter_unref_pic(vp->picref);
1315
    vp->picref = NULL;
1316

    
1317
    vp->width   = is->out_video_filter->inputs[0]->w;
1318
    vp->height  = is->out_video_filter->inputs[0]->h;
1319
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1320
#else
1321
    vp->width   = is->video_st->codec->width;
1322
    vp->height  = is->video_st->codec->height;
1323
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1324
#endif
1325

    
1326
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1327
                                   SDL_YV12_OVERLAY,
1328
                                   screen);
1329

    
1330
    SDL_LockMutex(is->pictq_mutex);
1331
    vp->allocated = 1;
1332
    SDL_CondSignal(is->pictq_cond);
1333
    SDL_UnlockMutex(is->pictq_mutex);
1334
}
1335

    
1336
/**
1337
 *
1338
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1339
 */
1340
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1341
{
1342
    VideoPicture *vp;
1343
    int dst_pix_fmt;
1344
#if CONFIG_AVFILTER
1345
    AVPicture pict_src;
1346
#endif
1347
    /* wait until we have space to put a new picture */
1348
    SDL_LockMutex(is->pictq_mutex);
1349

    
1350
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1351
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1352

    
1353
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1354
           !is->videoq.abort_request) {
1355
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1356
    }
1357
    SDL_UnlockMutex(is->pictq_mutex);
1358

    
1359
    if (is->videoq.abort_request)
1360
        return -1;
1361

    
1362
    vp = &is->pictq[is->pictq_windex];
1363

    
1364
    /* alloc or resize hardware picture buffer */
1365
    if (!vp->bmp ||
1366
#if CONFIG_AVFILTER
1367
        vp->width  != is->out_video_filter->inputs[0]->w ||
1368
        vp->height != is->out_video_filter->inputs[0]->h) {
1369
#else
1370
        vp->width != is->video_st->codec->width ||
1371
        vp->height != is->video_st->codec->height) {
1372
#endif
1373
        SDL_Event event;
1374

    
1375
        vp->allocated = 0;
1376

    
1377
        /* the allocation must be done in the main thread to avoid
1378
           locking problems */
1379
        event.type = FF_ALLOC_EVENT;
1380
        event.user.data1 = is;
1381
        SDL_PushEvent(&event);
1382

    
1383
        /* wait until the picture is allocated */
1384
        SDL_LockMutex(is->pictq_mutex);
1385
        while (!vp->allocated && !is->videoq.abort_request) {
1386
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1387
        }
1388
        SDL_UnlockMutex(is->pictq_mutex);
1389

    
1390
        if (is->videoq.abort_request)
1391
            return -1;
1392
    }
1393

    
1394
    /* if the frame is not skipped, then display it */
1395
    if (vp->bmp) {
1396
        AVPicture pict;
1397
#if CONFIG_AVFILTER
1398
        if(vp->picref)
1399
            avfilter_unref_pic(vp->picref);
1400
        vp->picref = src_frame->opaque;
1401
#endif
1402

    
1403
        /* get a pointer on the bitmap */
1404
        SDL_LockYUVOverlay (vp->bmp);
1405

    
1406
        dst_pix_fmt = PIX_FMT_YUV420P;
1407
        memset(&pict,0,sizeof(AVPicture));
1408
        pict.data[0] = vp->bmp->pixels[0];
1409
        pict.data[1] = vp->bmp->pixels[2];
1410
        pict.data[2] = vp->bmp->pixels[1];
1411

    
1412
        pict.linesize[0] = vp->bmp->pitches[0];
1413
        pict.linesize[1] = vp->bmp->pitches[2];
1414
        pict.linesize[2] = vp->bmp->pitches[1];
1415

    
1416
#if CONFIG_AVFILTER
1417
        pict_src.data[0] = src_frame->data[0];
1418
        pict_src.data[1] = src_frame->data[1];
1419
        pict_src.data[2] = src_frame->data[2];
1420

    
1421
        pict_src.linesize[0] = src_frame->linesize[0];
1422
        pict_src.linesize[1] = src_frame->linesize[1];
1423
        pict_src.linesize[2] = src_frame->linesize[2];
1424

    
1425
        //FIXME use direct rendering
1426
        av_picture_copy(&pict, &pict_src,
1427
                        vp->pix_fmt, vp->width, vp->height);
1428
#else
1429
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1430
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1431
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1432
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1433
        if (is->img_convert_ctx == NULL) {
1434
            fprintf(stderr, "Cannot initialize the conversion context\n");
1435
            exit(1);
1436
        }
1437
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1438
                  0, vp->height, pict.data, pict.linesize);
1439
#endif
1440
        /* update the bitmap content */
1441
        SDL_UnlockYUVOverlay(vp->bmp);
1442

    
1443
        vp->pts = pts;
1444
        vp->pos = pos;
1445

    
1446
        /* now we can update the picture count */
1447
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1448
            is->pictq_windex = 0;
1449
        SDL_LockMutex(is->pictq_mutex);
1450
        vp->target_clock= compute_target_time(vp->pts, is);
1451

    
1452
        is->pictq_size++;
1453
        SDL_UnlockMutex(is->pictq_mutex);
1454
    }
1455
    return 0;
1456
}
1457

    
1458
/**
1459
 * compute the exact PTS for the picture if it is omitted in the stream
1460
 * @param pts1 the dts of the pkt / pts of the frame
1461
 */
1462
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1463
{
1464
    double frame_delay, pts;
1465

    
1466
    pts = pts1;
1467

    
1468
    if (pts != 0) {
1469
        /* update video clock with pts, if present */
1470
        is->video_clock = pts;
1471
    } else {
1472
        pts = is->video_clock;
1473
    }
1474
    /* update video clock for next frame */
1475
    frame_delay = av_q2d(is->video_st->codec->time_base);
1476
    /* for MPEG2, the frame can be repeated, so we update the
1477
       clock accordingly */
1478
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1479
    is->video_clock += frame_delay;
1480

    
1481
#if defined(DEBUG_SYNC) && 0
1482
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1483
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1484
#endif
1485
    return queue_picture(is, src_frame, pts, pos);
1486
}
1487

    
1488
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1489
{
1490
    int len1, got_picture, i;
1491

    
1492
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1493
            return -1;
1494

    
1495
        if(pkt->data == flush_pkt.data){
1496
            avcodec_flush_buffers(is->video_st->codec);
1497

    
1498
            SDL_LockMutex(is->pictq_mutex);
1499
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1500
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1501
                is->pictq[i].target_clock= 0;
1502
            }
1503
            while (is->pictq_size && !is->videoq.abort_request) {
1504
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1505
            }
1506
            is->video_current_pos= -1;
1507
            SDL_UnlockMutex(is->pictq_mutex);
1508

    
1509
            is->last_dts_for_fault_detection=
1510
            is->last_pts_for_fault_detection= INT64_MIN;
1511
            is->frame_last_pts= AV_NOPTS_VALUE;
1512
            is->frame_last_delay = 0;
1513
            is->frame_timer = (double)av_gettime() / 1000000.0;
1514
            is->skip_frames= 1;
1515
            is->skip_frames_index= 0;
1516
            return 0;
1517
        }
1518

    
1519
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1520
           this packet, if any */
1521
        is->video_st->codec->reordered_opaque= pkt->pts;
1522
        len1 = avcodec_decode_video2(is->video_st->codec,
1523
                                    frame, &got_picture,
1524
                                    pkt);
1525

    
1526
        if (got_picture) {
1527
            if(pkt->dts != AV_NOPTS_VALUE){
1528
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1529
                is->last_dts_for_fault_detection= pkt->dts;
1530
            }
1531
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1532
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1533
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1534
            }
1535
        }
1536

    
1537
        if(   (   decoder_reorder_pts==1
1538
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1539
               || pkt->dts == AV_NOPTS_VALUE)
1540
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1541
            *pts= frame->reordered_opaque;
1542
        else if(pkt->dts != AV_NOPTS_VALUE)
1543
            *pts= pkt->dts;
1544
        else
1545
            *pts= 0;
1546

    
1547
//            if (len1 < 0)
1548
//                break;
1549
    if (got_picture){
1550
        is->skip_frames_index += 1;
1551
        if(is->skip_frames_index >= is->skip_frames){
1552
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1553
            return 1;
1554
        }
1555

    
1556
    }
1557
    return 0;
1558
}
1559

    
1560
#if CONFIG_AVFILTER
1561
typedef struct {
1562
    VideoState *is;
1563
    AVFrame *frame;
1564
    int use_dr1;
1565
} FilterPriv;
1566

    
1567
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1568
{
1569
    AVFilterContext *ctx = codec->opaque;
1570
    AVFilterPicRef  *ref;
1571
    int perms = AV_PERM_WRITE;
1572
    int i, w, h, stride[4];
1573
    unsigned edge;
1574

    
1575
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1576
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1577
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1578
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1579
    }
1580
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1581

    
1582
    w = codec->width;
1583
    h = codec->height;
1584
    avcodec_align_dimensions2(codec, &w, &h, stride);
1585
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1586
    w += edge << 1;
1587
    h += edge << 1;
1588

    
1589
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1590
        return -1;
1591

    
1592
    ref->w = codec->width;
1593
    ref->h = codec->height;
1594
    for(i = 0; i < 4; i ++) {
1595
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w : 0;
1596
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h : 0;
1597

    
1598
        if (ref->data[i]) {
1599
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1600
        }
1601
        pic->data[i]     = ref->data[i];
1602
        pic->linesize[i] = ref->linesize[i];
1603
    }
1604
    pic->opaque = ref;
1605
    pic->age    = INT_MAX;
1606
    pic->type   = FF_BUFFER_TYPE_USER;
1607
    pic->reordered_opaque = codec->reordered_opaque;
1608
    return 0;
1609
}
1610

    
1611
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1612
{
1613
    memset(pic->data, 0, sizeof(pic->data));
1614
    avfilter_unref_pic(pic->opaque);
1615
}
1616

    
1617
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1618
{
1619
    AVFilterPicRef *ref = pic->opaque;
1620

    
1621
    if (pic->data[0] == NULL) {
1622
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1623
        return codec->get_buffer(codec, pic);
1624
    }
1625

    
1626
    if ((codec->width != ref->w) || (codec->height != ref->h) ||
1627
        (codec->pix_fmt != ref->pic->format)) {
1628
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1629
        return -1;
1630
    }
1631

    
1632
    pic->reordered_opaque = codec->reordered_opaque;
1633
    return 0;
1634
}
1635

    
1636
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1637
{
1638
    FilterPriv *priv = ctx->priv;
1639
    AVCodecContext *codec;
1640
    if(!opaque) return -1;
1641

    
1642
    priv->is = opaque;
1643
    codec    = priv->is->video_st->codec;
1644
    codec->opaque = ctx;
1645
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1646
        priv->use_dr1 = 1;
1647
        codec->get_buffer     = input_get_buffer;
1648
        codec->release_buffer = input_release_buffer;
1649
        codec->reget_buffer   = input_reget_buffer;
1650
    }
1651

    
1652
    priv->frame = avcodec_alloc_frame();
1653

    
1654
    return 0;
1655
}
1656

    
1657
static void input_uninit(AVFilterContext *ctx)
1658
{
1659
    FilterPriv *priv = ctx->priv;
1660
    av_free(priv->frame);
1661
}
1662

    
1663
static int input_request_frame(AVFilterLink *link)
1664
{
1665
    FilterPriv *priv = link->src->priv;
1666
    AVFilterPicRef *picref;
1667
    int64_t pts = 0;
1668
    AVPacket pkt;
1669
    int ret;
1670

    
1671
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1672
        av_free_packet(&pkt);
1673
    if (ret < 0)
1674
        return -1;
1675

    
1676
    if(priv->use_dr1) {
1677
        picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1678
    } else {
1679
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1680
        av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1681
                        picref->pic->format, link->w, link->h);
1682
    }
1683
    av_free_packet(&pkt);
1684

    
1685
    picref->pts = pts;
1686
    picref->pos = pkt.pos;
1687
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1688
    avfilter_start_frame(link, picref);
1689
    avfilter_draw_slice(link, 0, link->h, 1);
1690
    avfilter_end_frame(link);
1691

    
1692
    return 0;
1693
}
1694

    
1695
static int input_query_formats(AVFilterContext *ctx)
1696
{
1697
    FilterPriv *priv = ctx->priv;
1698
    enum PixelFormat pix_fmts[] = {
1699
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1700
    };
1701

    
1702
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1703
    return 0;
1704
}
1705

    
1706
static int input_config_props(AVFilterLink *link)
1707
{
1708
    FilterPriv *priv  = link->src->priv;
1709
    AVCodecContext *c = priv->is->video_st->codec;
1710

    
1711
    link->w = c->width;
1712
    link->h = c->height;
1713

    
1714
    return 0;
1715
}
1716

    
1717
static AVFilter input_filter =
1718
{
1719
    .name      = "ffplay_input",
1720

    
1721
    .priv_size = sizeof(FilterPriv),
1722

    
1723
    .init      = input_init,
1724
    .uninit    = input_uninit,
1725

    
1726
    .query_formats = input_query_formats,
1727

    
1728
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1729
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1730
                                    .type = AVMEDIA_TYPE_VIDEO,
1731
                                    .request_frame = input_request_frame,
1732
                                    .config_props  = input_config_props, },
1733
                                  { .name = NULL }},
1734
};
1735

    
1736
static void output_end_frame(AVFilterLink *link)
1737
{
1738
}
1739

    
1740
static int output_query_formats(AVFilterContext *ctx)
1741
{
1742
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1743

    
1744
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1745
    return 0;
1746
}
1747

    
1748
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1749
                                    int64_t *pts, int64_t *pos)
1750
{
1751
    AVFilterPicRef *pic;
1752

    
1753
    if(avfilter_request_frame(ctx->inputs[0]))
1754
        return -1;
1755
    if(!(pic = ctx->inputs[0]->cur_pic))
1756
        return -1;
1757
    ctx->inputs[0]->cur_pic = NULL;
1758

    
1759
    frame->opaque = pic;
1760
    *pts          = pic->pts;
1761
    *pos          = pic->pos;
1762

    
1763
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1764
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1765

    
1766
    return 1;
1767
}
1768

    
1769
static AVFilter output_filter =
1770
{
1771
    .name      = "ffplay_output",
1772

    
1773
    .query_formats = output_query_formats,
1774

    
1775
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1776
                                    .type          = AVMEDIA_TYPE_VIDEO,
1777
                                    .end_frame     = output_end_frame,
1778
                                    .min_perms     = AV_PERM_READ, },
1779
                                  { .name = NULL }},
1780
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1781
};
1782
#endif  /* CONFIG_AVFILTER */
1783

    
1784
static int video_thread(void *arg)
1785
{
1786
    VideoState *is = arg;
1787
    AVFrame *frame= avcodec_alloc_frame();
1788
    int64_t pts_int;
1789
    double pts;
1790
    int ret;
1791

    
1792
#if CONFIG_AVFILTER
1793
    int64_t pos;
1794
    char sws_flags_str[128];
1795
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1796
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1797
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1798
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1799

    
1800
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1801
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1802

    
1803
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1804
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1805

    
1806

    
1807
    if(vfilters) {
1808
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1809
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1810

    
1811
        outputs->name    = av_strdup("in");
1812
        outputs->filter  = filt_src;
1813
        outputs->pad_idx = 0;
1814
        outputs->next    = NULL;
1815

    
1816
        inputs->name    = av_strdup("out");
1817
        inputs->filter  = filt_out;
1818
        inputs->pad_idx = 0;
1819
        inputs->next    = NULL;
1820

    
1821
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1822
            goto the_end;
1823
        av_freep(&vfilters);
1824
    } else {
1825
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1826
    }
1827
    avfilter_graph_add_filter(graph, filt_src);
1828
    avfilter_graph_add_filter(graph, filt_out);
1829

    
1830
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1831
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1832
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1833

    
1834
    is->out_video_filter = filt_out;
1835
#endif
1836

    
1837
    for(;;) {
1838
#if !CONFIG_AVFILTER
1839
        AVPacket pkt;
1840
#endif
1841
        while (is->paused && !is->videoq.abort_request)
1842
            SDL_Delay(10);
1843
#if CONFIG_AVFILTER
1844
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1845
#else
1846
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1847
#endif
1848

    
1849
        if (ret < 0) goto the_end;
1850

    
1851
        if (!ret)
1852
            continue;
1853

    
1854
        pts = pts_int*av_q2d(is->video_st->time_base);
1855

    
1856
#if CONFIG_AVFILTER
1857
        ret = output_picture2(is, frame, pts, pos);
1858
#else
1859
        ret = output_picture2(is, frame, pts,  pkt.pos);
1860
        av_free_packet(&pkt);
1861
#endif
1862
        if (ret < 0)
1863
            goto the_end;
1864

    
1865
        if (step)
1866
            if (cur_stream)
1867
                stream_pause(cur_stream);
1868
    }
1869
 the_end:
1870
#if CONFIG_AVFILTER
1871
    avfilter_graph_destroy(graph);
1872
    av_freep(&graph);
1873
#endif
1874
    av_free(frame);
1875
    return 0;
1876
}
1877

    
1878
static int subtitle_thread(void *arg)
1879
{
1880
    VideoState *is = arg;
1881
    SubPicture *sp;
1882
    AVPacket pkt1, *pkt = &pkt1;
1883
    int len1, got_subtitle;
1884
    double pts;
1885
    int i, j;
1886
    int r, g, b, y, u, v, a;
1887

    
1888
    for(;;) {
1889
        while (is->paused && !is->subtitleq.abort_request) {
1890
            SDL_Delay(10);
1891
        }
1892
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1893
            break;
1894

    
1895
        if(pkt->data == flush_pkt.data){
1896
            avcodec_flush_buffers(is->subtitle_st->codec);
1897
            continue;
1898
        }
1899
        SDL_LockMutex(is->subpq_mutex);
1900
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1901
               !is->subtitleq.abort_request) {
1902
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1903
        }
1904
        SDL_UnlockMutex(is->subpq_mutex);
1905

    
1906
        if (is->subtitleq.abort_request)
1907
            goto the_end;
1908

    
1909
        sp = &is->subpq[is->subpq_windex];
1910

    
1911
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1912
           this packet, if any */
1913
        pts = 0;
1914
        if (pkt->pts != AV_NOPTS_VALUE)
1915
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1916

    
1917
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1918
                                    &sp->sub, &got_subtitle,
1919
                                    pkt);
1920
//            if (len1 < 0)
1921
//                break;
1922
        if (got_subtitle && sp->sub.format == 0) {
1923
            sp->pts = pts;
1924

    
1925
            for (i = 0; i < sp->sub.num_rects; i++)
1926
            {
1927
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1928
                {
1929
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1930
                    y = RGB_TO_Y_CCIR(r, g, b);
1931
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1932
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1933
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1934
                }
1935
            }
1936

    
1937
            /* now we can update the picture count */
1938
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1939
                is->subpq_windex = 0;
1940
            SDL_LockMutex(is->subpq_mutex);
1941
            is->subpq_size++;
1942
            SDL_UnlockMutex(is->subpq_mutex);
1943
        }
1944
        av_free_packet(pkt);
1945
//        if (step)
1946
//            if (cur_stream)
1947
//                stream_pause(cur_stream);
1948
    }
1949
 the_end:
1950
    return 0;
1951
}
1952

    
1953
/* copy samples for viewing in editor window */
1954
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1955
{
1956
    int size, len, channels;
1957

    
1958
    channels = is->audio_st->codec->channels;
1959

    
1960
    size = samples_size / sizeof(short);
1961
    while (size > 0) {
1962
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1963
        if (len > size)
1964
            len = size;
1965
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1966
        samples += len;
1967
        is->sample_array_index += len;
1968
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1969
            is->sample_array_index = 0;
1970
        size -= len;
1971
    }
1972
}
1973

    
1974
/* return the new audio buffer size (samples can be added or deleted
1975
   to get better sync if video or external master clock) */
1976
static int synchronize_audio(VideoState *is, short *samples,
1977
                             int samples_size1, double pts)
1978
{
1979
    int n, samples_size;
1980
    double ref_clock;
1981

    
1982
    n = 2 * is->audio_st->codec->channels;
1983
    samples_size = samples_size1;
1984

    
1985
    /* if not master, then we try to remove or add samples to correct the clock */
1986
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1987
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1988
        double diff, avg_diff;
1989
        int wanted_size, min_size, max_size, nb_samples;
1990

    
1991
        ref_clock = get_master_clock(is);
1992
        diff = get_audio_clock(is) - ref_clock;
1993

    
1994
        if (diff < AV_NOSYNC_THRESHOLD) {
1995
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1996
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1997
                /* not enough measures to have a correct estimate */
1998
                is->audio_diff_avg_count++;
1999
            } else {
2000
                /* estimate the A-V difference */
2001
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2002

    
2003
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2004
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2005
                    nb_samples = samples_size / n;
2006

    
2007
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2008
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2009
                    if (wanted_size < min_size)
2010
                        wanted_size = min_size;
2011
                    else if (wanted_size > max_size)
2012
                        wanted_size = max_size;
2013

    
2014
                    /* add or remove samples to correction the synchro */
2015
                    if (wanted_size < samples_size) {
2016
                        /* remove samples */
2017
                        samples_size = wanted_size;
2018
                    } else if (wanted_size > samples_size) {
2019
                        uint8_t *samples_end, *q;
2020
                        int nb;
2021

    
2022
                        /* add samples */
2023
                        nb = (samples_size - wanted_size);
2024
                        samples_end = (uint8_t *)samples + samples_size - n;
2025
                        q = samples_end + n;
2026
                        while (nb > 0) {
2027
                            memcpy(q, samples_end, n);
2028
                            q += n;
2029
                            nb -= n;
2030
                        }
2031
                        samples_size = wanted_size;
2032
                    }
2033
                }
2034
#if 0
2035
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2036
                       diff, avg_diff, samples_size - samples_size1,
2037
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2038
#endif
2039
            }
2040
        } else {
2041
            /* too big difference : may be initial PTS errors, so
2042
               reset A-V filter */
2043
            is->audio_diff_avg_count = 0;
2044
            is->audio_diff_cum = 0;
2045
        }
2046
    }
2047

    
2048
    return samples_size;
2049
}
2050

    
2051
/* decode one audio frame and returns its uncompressed size */
2052
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2053
{
2054
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2055
    AVPacket *pkt = &is->audio_pkt;
2056
    AVCodecContext *dec= is->audio_st->codec;
2057
    int n, len1, data_size;
2058
    double pts;
2059

    
2060
    for(;;) {
2061
        /* NOTE: the audio packet can contain several frames */
2062
        while (pkt_temp->size > 0) {
2063
            data_size = sizeof(is->audio_buf1);
2064
            len1 = avcodec_decode_audio3(dec,
2065
                                        (int16_t *)is->audio_buf1, &data_size,
2066
                                        pkt_temp);
2067
            if (len1 < 0) {
2068
                /* if error, we skip the frame */
2069
                pkt_temp->size = 0;
2070
                break;
2071
            }
2072

    
2073
            pkt_temp->data += len1;
2074
            pkt_temp->size -= len1;
2075
            if (data_size <= 0)
2076
                continue;
2077

    
2078
            if (dec->sample_fmt != is->audio_src_fmt) {
2079
                if (is->reformat_ctx)
2080
                    av_audio_convert_free(is->reformat_ctx);
2081
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2082
                                                         dec->sample_fmt, 1, NULL, 0);
2083
                if (!is->reformat_ctx) {
2084
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2085
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2086
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2087
                        break;
2088
                }
2089
                is->audio_src_fmt= dec->sample_fmt;
2090
            }
2091

    
2092
            if (is->reformat_ctx) {
2093
                const void *ibuf[6]= {is->audio_buf1};
2094
                void *obuf[6]= {is->audio_buf2};
2095
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2096
                int ostride[6]= {2};
2097
                int len= data_size/istride[0];
2098
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2099
                    printf("av_audio_convert() failed\n");
2100
                    break;
2101
                }
2102
                is->audio_buf= is->audio_buf2;
2103
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2104
                          remove this legacy cruft */
2105
                data_size= len*2;
2106
            }else{
2107
                is->audio_buf= is->audio_buf1;
2108
            }
2109

    
2110
            /* if no pts, then compute it */
2111
            pts = is->audio_clock;
2112
            *pts_ptr = pts;
2113
            n = 2 * dec->channels;
2114
            is->audio_clock += (double)data_size /
2115
                (double)(n * dec->sample_rate);
2116
#if defined(DEBUG_SYNC)
2117
            {
2118
                static double last_clock;
2119
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2120
                       is->audio_clock - last_clock,
2121
                       is->audio_clock, pts);
2122
                last_clock = is->audio_clock;
2123
            }
2124
#endif
2125
            return data_size;
2126
        }
2127

    
2128
        /* free the current packet */
2129
        if (pkt->data)
2130
            av_free_packet(pkt);
2131

    
2132
        if (is->paused || is->audioq.abort_request) {
2133
            return -1;
2134
        }
2135

    
2136
        /* read next packet */
2137
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2138
            return -1;
2139
        if(pkt->data == flush_pkt.data){
2140
            avcodec_flush_buffers(dec);
2141
            continue;
2142
        }
2143

    
2144
        pkt_temp->data = pkt->data;
2145
        pkt_temp->size = pkt->size;
2146

    
2147
        /* if update the audio clock with the pts */
2148
        if (pkt->pts != AV_NOPTS_VALUE) {
2149
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2150
        }
2151
    }
2152
}
2153

    
2154
/* get the current audio output buffer size, in samples. With SDL, we
2155
   cannot have a precise information */
2156
static int audio_write_get_buf_size(VideoState *is)
2157
{
2158
    return is->audio_buf_size - is->audio_buf_index;
2159
}
2160

    
2161

    
2162
/* prepare a new audio buffer */
2163
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2164
{
2165
    VideoState *is = opaque;
2166
    int audio_size, len1;
2167
    double pts;
2168

    
2169
    audio_callback_time = av_gettime();
2170

    
2171
    while (len > 0) {
2172
        if (is->audio_buf_index >= is->audio_buf_size) {
2173
           audio_size = audio_decode_frame(is, &pts);
2174
           if (audio_size < 0) {
2175
                /* if error, just output silence */
2176
               is->audio_buf = is->audio_buf1;
2177
               is->audio_buf_size = 1024;
2178
               memset(is->audio_buf, 0, is->audio_buf_size);
2179
           } else {
2180
               if (is->show_audio)
2181
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2182
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2183
                                              pts);
2184
               is->audio_buf_size = audio_size;
2185
           }
2186
           is->audio_buf_index = 0;
2187
        }
2188
        len1 = is->audio_buf_size - is->audio_buf_index;
2189
        if (len1 > len)
2190
            len1 = len;
2191
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2192
        len -= len1;
2193
        stream += len1;
2194
        is->audio_buf_index += len1;
2195
    }
2196
}
2197

    
2198
/* open a given stream. Return 0 if OK */
2199
static int stream_component_open(VideoState *is, int stream_index)
2200
{
2201
    AVFormatContext *ic = is->ic;
2202
    AVCodecContext *avctx;
2203
    AVCodec *codec;
2204
    SDL_AudioSpec wanted_spec, spec;
2205

    
2206
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2207
        return -1;
2208
    avctx = ic->streams[stream_index]->codec;
2209

    
2210
    /* prepare audio output */
2211
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2212
        if (avctx->channels > 0) {
2213
            avctx->request_channels = FFMIN(2, avctx->channels);
2214
        } else {
2215
            avctx->request_channels = 2;
2216
        }
2217
    }
2218

    
2219
    codec = avcodec_find_decoder(avctx->codec_id);
2220
    avctx->debug_mv = debug_mv;
2221
    avctx->debug = debug;
2222
    avctx->workaround_bugs = workaround_bugs;
2223
    avctx->lowres = lowres;
2224
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2225
    avctx->idct_algo= idct;
2226
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2227
    avctx->skip_frame= skip_frame;
2228
    avctx->skip_idct= skip_idct;
2229
    avctx->skip_loop_filter= skip_loop_filter;
2230
    avctx->error_recognition= error_recognition;
2231
    avctx->error_concealment= error_concealment;
2232
    avcodec_thread_init(avctx, thread_count);
2233

    
2234
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2235

    
2236
    if (!codec ||
2237
        avcodec_open(avctx, codec) < 0)
2238
        return -1;
2239

    
2240
    /* prepare audio output */
2241
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2242
        wanted_spec.freq = avctx->sample_rate;
2243
        wanted_spec.format = AUDIO_S16SYS;
2244
        wanted_spec.channels = avctx->channels;
2245
        wanted_spec.silence = 0;
2246
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2247
        wanted_spec.callback = sdl_audio_callback;
2248
        wanted_spec.userdata = is;
2249
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2250
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2251
            return -1;
2252
        }
2253
        is->audio_hw_buf_size = spec.size;
2254
        is->audio_src_fmt= SAMPLE_FMT_S16;
2255
    }
2256

    
2257
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2258
    switch(avctx->codec_type) {
2259
    case AVMEDIA_TYPE_AUDIO:
2260
        is->audio_stream = stream_index;
2261
        is->audio_st = ic->streams[stream_index];
2262
        is->audio_buf_size = 0;
2263
        is->audio_buf_index = 0;
2264

    
2265
        /* init averaging filter */
2266
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2267
        is->audio_diff_avg_count = 0;
2268
        /* since we do not have a precise anough audio fifo fullness,
2269
           we correct audio sync only if larger than this threshold */
2270
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2271

    
2272
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2273
        packet_queue_init(&is->audioq);
2274
        SDL_PauseAudio(0);
2275
        break;
2276
    case AVMEDIA_TYPE_VIDEO:
2277
        is->video_stream = stream_index;
2278
        is->video_st = ic->streams[stream_index];
2279

    
2280
//        is->video_current_pts_time = av_gettime();
2281

    
2282
        packet_queue_init(&is->videoq);
2283
        is->video_tid = SDL_CreateThread(video_thread, is);
2284
        break;
2285
    case AVMEDIA_TYPE_SUBTITLE:
2286
        is->subtitle_stream = stream_index;
2287
        is->subtitle_st = ic->streams[stream_index];
2288
        packet_queue_init(&is->subtitleq);
2289

    
2290
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2291
        break;
2292
    default:
2293
        break;
2294
    }
2295
    return 0;
2296
}
2297

    
2298
static void stream_component_close(VideoState *is, int stream_index)
2299
{
2300
    AVFormatContext *ic = is->ic;
2301
    AVCodecContext *avctx;
2302

    
2303
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2304
        return;
2305
    avctx = ic->streams[stream_index]->codec;
2306

    
2307
    switch(avctx->codec_type) {
2308
    case AVMEDIA_TYPE_AUDIO:
2309
        packet_queue_abort(&is->audioq);
2310

    
2311
        SDL_CloseAudio();
2312

    
2313
        packet_queue_end(&is->audioq);
2314
        if (is->reformat_ctx)
2315
            av_audio_convert_free(is->reformat_ctx);
2316
        is->reformat_ctx = NULL;
2317
        break;
2318
    case AVMEDIA_TYPE_VIDEO:
2319
        packet_queue_abort(&is->videoq);
2320

    
2321
        /* note: we also signal this mutex to make sure we deblock the
2322
           video thread in all cases */
2323
        SDL_LockMutex(is->pictq_mutex);
2324
        SDL_CondSignal(is->pictq_cond);
2325
        SDL_UnlockMutex(is->pictq_mutex);
2326

    
2327
        SDL_WaitThread(is->video_tid, NULL);
2328

    
2329
        packet_queue_end(&is->videoq);
2330
        break;
2331
    case AVMEDIA_TYPE_SUBTITLE:
2332
        packet_queue_abort(&is->subtitleq);
2333

    
2334
        /* note: we also signal this mutex to make sure we deblock the
2335
           video thread in all cases */
2336
        SDL_LockMutex(is->subpq_mutex);
2337
        is->subtitle_stream_changed = 1;
2338

    
2339
        SDL_CondSignal(is->subpq_cond);
2340
        SDL_UnlockMutex(is->subpq_mutex);
2341

    
2342
        SDL_WaitThread(is->subtitle_tid, NULL);
2343

    
2344
        packet_queue_end(&is->subtitleq);
2345
        break;
2346
    default:
2347
        break;
2348
    }
2349

    
2350
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2351
    avcodec_close(avctx);
2352
    switch(avctx->codec_type) {
2353
    case AVMEDIA_TYPE_AUDIO:
2354
        is->audio_st = NULL;
2355
        is->audio_stream = -1;
2356
        break;
2357
    case AVMEDIA_TYPE_VIDEO:
2358
        is->video_st = NULL;
2359
        is->video_stream = -1;
2360
        break;
2361
    case AVMEDIA_TYPE_SUBTITLE:
2362
        is->subtitle_st = NULL;
2363
        is->subtitle_stream = -1;
2364
        break;
2365
    default:
2366
        break;
2367
    }
2368
}
2369

    
2370
/* since we have only one decoding thread, we can use a global
2371
   variable instead of a thread local variable */
2372
static VideoState *global_video_state;
2373

    
2374
static int decode_interrupt_cb(void)
2375
{
2376
    return (global_video_state && global_video_state->abort_request);
2377
}
2378

    
2379
/* this thread gets the stream from the disk or the network */
2380
static int decode_thread(void *arg)
2381
{
2382
    VideoState *is = arg;
2383
    AVFormatContext *ic;
2384
    int err, i, ret;
2385
    int st_index[AVMEDIA_TYPE_NB];
2386
    int st_count[AVMEDIA_TYPE_NB]={0};
2387
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2388
    AVPacket pkt1, *pkt = &pkt1;
2389
    AVFormatParameters params, *ap = &params;
2390
    int eof=0;
2391
    int pkt_in_play_range = 0;
2392

    
2393
    ic = avformat_alloc_context();
2394

    
2395
    memset(st_index, -1, sizeof(st_index));
2396
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2397
    is->video_stream = -1;
2398
    is->audio_stream = -1;
2399
    is->subtitle_stream = -1;
2400

    
2401
    global_video_state = is;
2402
    url_set_interrupt_cb(decode_interrupt_cb);
2403

    
2404
    memset(ap, 0, sizeof(*ap));
2405

    
2406
    ap->prealloced_context = 1;
2407
    ap->width = frame_width;
2408
    ap->height= frame_height;
2409
    ap->time_base= (AVRational){1, 25};
2410
    ap->pix_fmt = frame_pix_fmt;
2411

    
2412
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2413

    
2414
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2415
    if (err < 0) {
2416
        print_error(is->filename, err);
2417
        ret = -1;
2418
        goto fail;
2419
    }
2420
    is->ic = ic;
2421

    
2422
    if(genpts)
2423
        ic->flags |= AVFMT_FLAG_GENPTS;
2424

    
2425
    err = av_find_stream_info(ic);
2426
    if (err < 0) {
2427
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2428
        ret = -1;
2429
        goto fail;
2430
    }
2431
    if(ic->pb)
2432
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2433

    
2434
    if(seek_by_bytes<0)
2435
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2436

    
2437
    /* if seeking requested, we execute it */
2438
    if (start_time != AV_NOPTS_VALUE) {
2439
        int64_t timestamp;
2440

    
2441
        timestamp = start_time;
2442
        /* add the stream start time */
2443
        if (ic->start_time != AV_NOPTS_VALUE)
2444
            timestamp += ic->start_time;
2445
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2446
        if (ret < 0) {
2447
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2448
                    is->filename, (double)timestamp / AV_TIME_BASE);
2449
        }
2450
    }
2451

    
2452
    for(i = 0; i < ic->nb_streams; i++) {
2453
        AVStream *st= ic->streams[i];
2454
        AVCodecContext *avctx = st->codec;
2455
        ic->streams[i]->discard = AVDISCARD_ALL;
2456
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2457
            continue;
2458
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2459
            continue;
2460

    
2461
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2462
            continue;
2463
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2464

    
2465
        switch(avctx->codec_type) {
2466
        case AVMEDIA_TYPE_AUDIO:
2467
            if (!audio_disable)
2468
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2469
            break;
2470
        case AVMEDIA_TYPE_VIDEO:
2471
        case AVMEDIA_TYPE_SUBTITLE:
2472
            if (!video_disable)
2473
                st_index[avctx->codec_type] = i;
2474
            break;
2475
        default:
2476
            break;
2477
        }
2478
    }
2479
    if (show_status) {
2480
        dump_format(ic, 0, is->filename, 0);
2481
    }
2482

    
2483
    /* open the streams */
2484
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2485
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2486
    }
2487

    
2488
    ret=-1;
2489
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2490
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2491
    }
2492
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2493
    if(ret<0) {
2494
        if (!display_disable)
2495
            is->show_audio = 2;
2496
    }
2497

    
2498
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2499
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2500
    }
2501

    
2502
    if (is->video_stream < 0 && is->audio_stream < 0) {
2503
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2504
        ret = -1;
2505
        goto fail;
2506
    }
2507

    
2508
    for(;;) {
2509
        if (is->abort_request)
2510
            break;
2511
        if (is->paused != is->last_paused) {
2512
            is->last_paused = is->paused;
2513
            if (is->paused)
2514
                is->read_pause_return= av_read_pause(ic);
2515
            else
2516
                av_read_play(ic);
2517
        }
2518
#if CONFIG_RTSP_DEMUXER
2519
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2520
            /* wait 10 ms to avoid trying to get another packet */
2521
            /* XXX: horrible */
2522
            SDL_Delay(10);
2523
            continue;
2524
        }
2525
#endif
2526
        if (is->seek_req) {
2527
            int64_t seek_target= is->seek_pos;
2528
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2529
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2530
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2531
//      of the seek_pos/seek_rel variables
2532

    
2533
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2534
            if (ret < 0) {
2535
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2536
            }else{
2537
                if (is->audio_stream >= 0) {
2538
                    packet_queue_flush(&is->audioq);
2539
                    packet_queue_put(&is->audioq, &flush_pkt);
2540
                }
2541
                if (is->subtitle_stream >= 0) {
2542
                    packet_queue_flush(&is->subtitleq);
2543
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2544
                }
2545
                if (is->video_stream >= 0) {
2546
                    packet_queue_flush(&is->videoq);
2547
                    packet_queue_put(&is->videoq, &flush_pkt);
2548
                }
2549
            }
2550
            is->seek_req = 0;
2551
            eof= 0;
2552
        }
2553

    
2554
        /* if the queue are full, no need to read more */
2555
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2556
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2557
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2558
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2559
            /* wait 10 ms */
2560
            SDL_Delay(10);
2561
            continue;
2562
        }
2563
        if(url_feof(ic->pb) || eof) {
2564
            if(is->video_stream >= 0){
2565
                av_init_packet(pkt);
2566
                pkt->data=NULL;
2567
                pkt->size=0;
2568
                pkt->stream_index= is->video_stream;
2569
                packet_queue_put(&is->videoq, pkt);
2570
            }
2571
            SDL_Delay(10);
2572
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2573
                if(loop!=1 && (!loop || --loop)){
2574
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2575
                }else if(autoexit){
2576
                    ret=AVERROR_EOF;
2577
                    goto fail;
2578
                }
2579
            }
2580
            continue;
2581
        }
2582
        ret = av_read_frame(ic, pkt);
2583
        if (ret < 0) {
2584
            if (ret == AVERROR_EOF)
2585
                eof=1;
2586
            if (url_ferror(ic->pb))
2587
                break;
2588
            SDL_Delay(100); /* wait for user event */
2589
            continue;
2590
        }
2591
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2592
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2593
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2594
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2595
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2596
                <= ((double)duration/1000000);
2597
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2598
            packet_queue_put(&is->audioq, pkt);
2599
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2600
            packet_queue_put(&is->videoq, pkt);
2601
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2602
            packet_queue_put(&is->subtitleq, pkt);
2603
        } else {
2604
            av_free_packet(pkt);
2605
        }
2606
    }
2607
    /* wait until the end */
2608
    while (!is->abort_request) {
2609
        SDL_Delay(100);
2610
    }
2611

    
2612
    ret = 0;
2613
 fail:
2614
    /* disable interrupting */
2615
    global_video_state = NULL;
2616

    
2617
    /* close each stream */
2618
    if (is->audio_stream >= 0)
2619
        stream_component_close(is, is->audio_stream);
2620
    if (is->video_stream >= 0)
2621
        stream_component_close(is, is->video_stream);
2622
    if (is->subtitle_stream >= 0)
2623
        stream_component_close(is, is->subtitle_stream);
2624
    if (is->ic) {
2625
        av_close_input_file(is->ic);
2626
        is->ic = NULL; /* safety */
2627
    }
2628
    url_set_interrupt_cb(NULL);
2629

    
2630
    if (ret != 0) {
2631
        SDL_Event event;
2632

    
2633
        event.type = FF_QUIT_EVENT;
2634
        event.user.data1 = is;
2635
        SDL_PushEvent(&event);
2636
    }
2637
    return 0;
2638
}
2639

    
2640
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2641
{
2642
    VideoState *is;
2643

    
2644
    is = av_mallocz(sizeof(VideoState));
2645
    if (!is)
2646
        return NULL;
2647
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2648
    is->iformat = iformat;
2649
    is->ytop = 0;
2650
    is->xleft = 0;
2651

    
2652
    /* start video display */
2653
    is->pictq_mutex = SDL_CreateMutex();
2654
    is->pictq_cond = SDL_CreateCond();
2655

    
2656
    is->subpq_mutex = SDL_CreateMutex();
2657
    is->subpq_cond = SDL_CreateCond();
2658

    
2659
    is->av_sync_type = av_sync_type;
2660
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2661
    if (!is->parse_tid) {
2662
        av_free(is);
2663
        return NULL;
2664
    }
2665
    return is;
2666
}
2667

    
2668
static void stream_close(VideoState *is)
2669
{
2670
    VideoPicture *vp;
2671
    int i;
2672
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2673
    is->abort_request = 1;
2674
    SDL_WaitThread(is->parse_tid, NULL);
2675
    SDL_WaitThread(is->refresh_tid, NULL);
2676

    
2677
    /* free all pictures */
2678
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2679
        vp = &is->pictq[i];
2680
#if CONFIG_AVFILTER
2681
        if (vp->picref) {
2682
            avfilter_unref_pic(vp->picref);
2683
            vp->picref = NULL;
2684
        }
2685
#endif
2686
        if (vp->bmp) {
2687
            SDL_FreeYUVOverlay(vp->bmp);
2688
            vp->bmp = NULL;
2689
        }
2690
    }
2691
    SDL_DestroyMutex(is->pictq_mutex);
2692
    SDL_DestroyCond(is->pictq_cond);
2693
    SDL_DestroyMutex(is->subpq_mutex);
2694
    SDL_DestroyCond(is->subpq_cond);
2695
#if !CONFIG_AVFILTER
2696
    if (is->img_convert_ctx)
2697
        sws_freeContext(is->img_convert_ctx);
2698
#endif
2699
    av_free(is);
2700
}
2701

    
2702
static void stream_cycle_channel(VideoState *is, int codec_type)
2703
{
2704
    AVFormatContext *ic = is->ic;
2705
    int start_index, stream_index;
2706
    AVStream *st;
2707

    
2708
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2709
        start_index = is->video_stream;
2710
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2711
        start_index = is->audio_stream;
2712
    else
2713
        start_index = is->subtitle_stream;
2714
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2715
        return;
2716
    stream_index = start_index;
2717
    for(;;) {
2718
        if (++stream_index >= is->ic->nb_streams)
2719
        {
2720
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2721
            {
2722
                stream_index = -1;
2723
                goto the_end;
2724
            } else
2725
                stream_index = 0;
2726
        }
2727
        if (stream_index == start_index)
2728
            return;
2729
        st = ic->streams[stream_index];
2730
        if (st->codec->codec_type == codec_type) {
2731
            /* check that parameters are OK */
2732
            switch(codec_type) {
2733
            case AVMEDIA_TYPE_AUDIO:
2734
                if (st->codec->sample_rate != 0 &&
2735
                    st->codec->channels != 0)
2736
                    goto the_end;
2737
                break;
2738
            case AVMEDIA_TYPE_VIDEO:
2739
            case AVMEDIA_TYPE_SUBTITLE:
2740
                goto the_end;
2741
            default:
2742
                break;
2743
            }
2744
        }
2745
    }
2746
 the_end:
2747
    stream_component_close(is, start_index);
2748
    stream_component_open(is, stream_index);
2749
}
2750

    
2751

    
2752
static void toggle_full_screen(void)
2753
{
2754
    is_full_screen = !is_full_screen;
2755
    if (!fs_screen_width) {
2756
        /* use default SDL method */
2757
//        SDL_WM_ToggleFullScreen(screen);
2758
    }
2759
    video_open(cur_stream);
2760
}
2761

    
2762
static void toggle_pause(void)
2763
{
2764
    if (cur_stream)
2765
        stream_pause(cur_stream);
2766
    step = 0;
2767
}
2768

    
2769
static void step_to_next_frame(void)
2770
{
2771
    if (cur_stream) {
2772
        /* if the stream is paused unpause it, then step */
2773
        if (cur_stream->paused)
2774
            stream_pause(cur_stream);
2775
    }
2776
    step = 1;
2777
}
2778

    
2779
static void do_exit(void)
2780
{
2781
    int i;
2782
    if (cur_stream) {
2783
        stream_close(cur_stream);
2784
        cur_stream = NULL;
2785
    }
2786
    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2787
        av_free(avcodec_opts[i]);
2788
    av_free(avformat_opts);
2789
    av_free(sws_opts);
2790
#if CONFIG_AVFILTER
2791
    avfilter_uninit();
2792
#endif
2793
    if (show_status)
2794
        printf("\n");
2795
    SDL_Quit();
2796
    exit(0);
2797
}
2798

    
2799
static void toggle_audio_display(void)
2800
{
2801
    if (cur_stream) {
2802
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2803
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2804
        fill_rectangle(screen,
2805
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2806
                    bgcolor);
2807
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2808
    }
2809
}
2810

    
2811
/* handle an event sent by the GUI */
2812
static void event_loop(void)
2813
{
2814
    SDL_Event event;
2815
    double incr, pos, frac;
2816

    
2817
    for(;;) {
2818
        double x;
2819
        SDL_WaitEvent(&event);
2820
        switch(event.type) {
2821
        case SDL_KEYDOWN:
2822
            switch(event.key.keysym.sym) {
2823
            case SDLK_ESCAPE:
2824
            case SDLK_q:
2825
                do_exit();
2826
                break;
2827
            case SDLK_f:
2828
                toggle_full_screen();
2829
                break;
2830
            case SDLK_p:
2831
            case SDLK_SPACE:
2832
                toggle_pause();
2833
                break;
2834
            case SDLK_s: //S: Step to next frame
2835
                step_to_next_frame();
2836
                break;
2837
            case SDLK_a:
2838
                if (cur_stream)
2839
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2840
                break;
2841
            case SDLK_v:
2842
                if (cur_stream)
2843
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2844
                break;
2845
            case SDLK_t:
2846
                if (cur_stream)
2847
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2848
                break;
2849
            case SDLK_w:
2850
                toggle_audio_display();
2851
                break;
2852
            case SDLK_LEFT:
2853
                incr = -10.0;
2854
                goto do_seek;
2855
            case SDLK_RIGHT:
2856
                incr = 10.0;
2857
                goto do_seek;
2858
            case SDLK_UP:
2859
                incr = 60.0;
2860
                goto do_seek;
2861
            case SDLK_DOWN:
2862
                incr = -60.0;
2863
            do_seek:
2864
                if (cur_stream) {
2865
                    if (seek_by_bytes) {
2866
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2867
                            pos= cur_stream->video_current_pos;
2868
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2869
                            pos= cur_stream->audio_pkt.pos;
2870
                        }else
2871
                            pos = url_ftell(cur_stream->ic->pb);
2872
                        if (cur_stream->ic->bit_rate)
2873
                            incr *= cur_stream->ic->bit_rate / 8.0;
2874
                        else
2875
                            incr *= 180000.0;
2876
                        pos += incr;
2877
                        stream_seek(cur_stream, pos, incr, 1);
2878
                    } else {
2879
                        pos = get_master_clock(cur_stream);
2880
                        pos += incr;
2881
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2882
                    }
2883
                }
2884
                break;
2885
            default:
2886
                break;
2887
            }
2888
            break;
2889
        case SDL_MOUSEBUTTONDOWN:
2890
        case SDL_MOUSEMOTION:
2891
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2892
                x= event.button.x;
2893
            }else{
2894
                if(event.motion.state != SDL_PRESSED)
2895
                    break;
2896
                x= event.motion.x;
2897
            }
2898
            if (cur_stream) {
2899
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2900
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2901
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2902
                }else{
2903
                    int64_t ts;
2904
                    int ns, hh, mm, ss;
2905
                    int tns, thh, tmm, tss;
2906
                    tns = cur_stream->ic->duration/1000000LL;
2907
                    thh = tns/3600;
2908
                    tmm = (tns%3600)/60;
2909
                    tss = (tns%60);
2910
                    frac = x/cur_stream->width;
2911
                    ns = frac*tns;
2912
                    hh = ns/3600;
2913
                    mm = (ns%3600)/60;
2914
                    ss = (ns%60);
2915
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2916
                            hh, mm, ss, thh, tmm, tss);
2917
                    ts = frac*cur_stream->ic->duration;
2918
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2919
                        ts += cur_stream->ic->start_time;
2920
                    stream_seek(cur_stream, ts, 0, 0);
2921
                }
2922
            }
2923
            break;
2924
        case SDL_VIDEORESIZE:
2925
            if (cur_stream) {
2926
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2927
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2928
                screen_width = cur_stream->width = event.resize.w;
2929
                screen_height= cur_stream->height= event.resize.h;
2930
            }
2931
            break;
2932
        case SDL_QUIT:
2933
        case FF_QUIT_EVENT:
2934
            do_exit();
2935
            break;
2936
        case FF_ALLOC_EVENT:
2937
            video_open(event.user.data1);
2938
            alloc_picture(event.user.data1);
2939
            break;
2940
        case FF_REFRESH_EVENT:
2941
            video_refresh_timer(event.user.data1);
2942
            cur_stream->refresh=0;
2943
            break;
2944
        default:
2945
            break;
2946
        }
2947
    }
2948
}
2949

    
2950
static void opt_frame_size(const char *arg)
2951
{
2952
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2953
        fprintf(stderr, "Incorrect frame size\n");
2954
        exit(1);
2955
    }
2956
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2957
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2958
        exit(1);
2959
    }
2960
}
2961

    
2962
static int opt_width(const char *opt, const char *arg)
2963
{
2964
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2965
    return 0;
2966
}
2967

    
2968
static int opt_height(const char *opt, const char *arg)
2969
{
2970
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2971
    return 0;
2972
}
2973

    
2974
static void opt_format(const char *arg)
2975
{
2976
    file_iformat = av_find_input_format(arg);
2977
    if (!file_iformat) {
2978
        fprintf(stderr, "Unknown input format: %s\n", arg);
2979
        exit(1);
2980
    }
2981
}
2982

    
2983
static void opt_frame_pix_fmt(const char *arg)
2984
{
2985
    frame_pix_fmt = av_get_pix_fmt(arg);
2986
}
2987

    
2988
static int opt_sync(const char *opt, const char *arg)
2989
{
2990
    if (!strcmp(arg, "audio"))
2991
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2992
    else if (!strcmp(arg, "video"))
2993
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2994
    else if (!strcmp(arg, "ext"))
2995
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2996
    else {
2997
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2998
        exit(1);
2999
    }
3000
    return 0;
3001
}
3002

    
3003
static int opt_seek(const char *opt, const char *arg)
3004
{
3005
    start_time = parse_time_or_die(opt, arg, 1);
3006
    return 0;
3007
}
3008

    
3009
static int opt_duration(const char *opt, const char *arg)
3010
{
3011
    duration = parse_time_or_die(opt, arg, 1);
3012
    return 0;
3013
}
3014

    
3015
static int opt_debug(const char *opt, const char *arg)
3016
{
3017
    av_log_set_level(99);
3018
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3019
    return 0;
3020
}
3021

    
3022
static int opt_vismv(const char *opt, const char *arg)
3023
{
3024
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3025
    return 0;
3026
}
3027

    
3028
static int opt_thread_count(const char *opt, const char *arg)
3029
{
3030
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3031
#if !HAVE_THREADS
3032
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3033
#endif
3034
    return 0;
3035
}
3036

    
3037
static const OptionDef options[] = {
3038
#include "cmdutils_common_opts.h"
3039
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3040
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3041
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3042
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3043
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3044
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3045
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3046
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3047
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3048
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3049
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3050
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3051
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3052
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3053
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3054
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3055
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3056
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3057
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3058
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3059
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3060
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3061
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3062
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3063
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3064
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3065
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3066
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3067
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3068
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3069
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3070
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3071
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3072
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3073
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3074
#if CONFIG_AVFILTER
3075
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3076
#endif
3077
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3078
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3079
    { NULL, },
3080
};
3081

    
3082
static void show_usage(void)
3083
{
3084
    printf("Simple media player\n");
3085
    printf("usage: ffplay [options] input_file\n");
3086
    printf("\n");
3087
}
3088

    
3089
static void show_help(void)
3090
{
3091
    show_usage();
3092
    show_help_options(options, "Main options:\n",
3093
                      OPT_EXPERT, 0);
3094
    show_help_options(options, "\nAdvanced options:\n",
3095
                      OPT_EXPERT, OPT_EXPERT);
3096
    printf("\nWhile playing:\n"
3097
           "q, ESC              quit\n"
3098
           "f                   toggle full screen\n"
3099
           "p, SPC              pause\n"
3100
           "a                   cycle audio channel\n"
3101
           "v                   cycle video channel\n"
3102
           "t                   cycle subtitle channel\n"
3103
           "w                   show audio waves\n"
3104
           "s                   activate frame-step mode\n"
3105
           "left/right          seek backward/forward 10 seconds\n"
3106
           "down/up             seek backward/forward 1 minute\n"
3107
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3108
           );
3109
}
3110

    
3111
static void opt_input_file(const char *filename)
3112
{
3113
    if (input_filename) {
3114
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3115
                filename, input_filename);
3116
        exit(1);
3117
    }
3118
    if (!strcmp(filename, "-"))
3119
        filename = "pipe:";
3120
    input_filename = filename;
3121
}
3122

    
3123
/* Called from the main */
3124
int main(int argc, char **argv)
3125
{
3126
    int flags, i;
3127

    
3128
    /* register all codecs, demux and protocols */
3129
    avcodec_register_all();
3130
#if CONFIG_AVDEVICE
3131
    avdevice_register_all();
3132
#endif
3133
#if CONFIG_AVFILTER
3134
    avfilter_register_all();
3135
#endif
3136
    av_register_all();
3137

    
3138
    for(i=0; i<AVMEDIA_TYPE_NB; i++){
3139
        avcodec_opts[i]= avcodec_alloc_context2(i);
3140
    }
3141
    avformat_opts = avformat_alloc_context();
3142
#if !CONFIG_AVFILTER
3143
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3144
#endif
3145

    
3146
    show_banner();
3147

    
3148
    parse_options(argc, argv, options, opt_input_file);
3149

    
3150
    if (!input_filename) {
3151
        show_usage();
3152
        fprintf(stderr, "An input file must be specified\n");
3153
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3154
        exit(1);
3155
    }
3156

    
3157
    if (display_disable) {
3158
        video_disable = 1;
3159
    }
3160
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3161
#if !defined(__MINGW32__) && !defined(__APPLE__)
3162
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3163
#endif
3164
    if (SDL_Init (flags)) {
3165
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3166
        exit(1);
3167
    }
3168

    
3169
    if (!display_disable) {
3170
#if HAVE_SDL_VIDEO_SIZE
3171
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3172
        fs_screen_width = vi->current_w;
3173
        fs_screen_height = vi->current_h;
3174
#endif
3175
    }
3176

    
3177
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3178
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3179
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3180

    
3181
    av_init_packet(&flush_pkt);
3182
    flush_pkt.data= "FLUSH";
3183

    
3184
    cur_stream = stream_open(input_filename, file_iformat);
3185

    
3186
    event_loop();
3187

    
3188
    /* never returns */
3189

    
3190
    return 0;
3191
}