Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 00f72577

History | View | Annotate | Download (99.6 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <inttypes.h>
24
#include <math.h>
25
#include <limits.h>
26
#include "libavutil/avstring.h"
27
#include "libavutil/colorspace.h"
28
#include "libavutil/pixdesc.h"
29
#include "libavformat/avformat.h"
30
#include "libavdevice/avdevice.h"
31
#include "libswscale/swscale.h"
32
#include "libavcodec/audioconvert.h"
33
#include "libavcodec/opt.h"
34
#include "libavcodec/avfft.h"
35

    
36
#if CONFIG_AVFILTER
37
# include "libavfilter/avfilter.h"
38
# include "libavfilter/avfiltergraph.h"
39
# include "libavfilter/graphparser.h"
40
#endif
41

    
42
#include "cmdutils.h"
43

    
44
#include <SDL.h>
45
#include <SDL_thread.h>
46

    
47
#ifdef __MINGW32__
48
#undef main /* We don't want SDL to override our main() */
49
#endif
50

    
51
#include <unistd.h>
52
#include <assert.h>
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
#define FRAME_SKIP_FACTOR 0.05
73

    
74
/* maximum audio speed change to get correct sync */
75
#define SAMPLE_CORRECTION_PERCENT_MAX 10
76

    
77
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78
#define AUDIO_DIFF_AVG_NB   20
79

    
80
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81
#define SAMPLE_ARRAY_SIZE (2*65536)
82

    
83
static int sws_flags = SWS_BICUBIC;
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 2
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100
    int64_t pos;                                 ///<byte position in file
101
    SDL_Overlay *bmp;
102
    int width, height; /* source height & width */
103
    int allocated;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    SDL_Thread *refresh_tid;
126
    AVInputFormat *iformat;
127
    int no_background;
128
    int abort_request;
129
    int paused;
130
    int last_paused;
131
    int seek_req;
132
    int seek_flags;
133
    int64_t seek_pos;
134
    int64_t seek_rel;
135
    int read_pause_return;
136
    AVFormatContext *ic;
137
    int dtg_active_format;
138

    
139
    int audio_stream;
140

    
141
    int av_sync_type;
142
    double external_clock; /* external clock base */
143
    int64_t external_clock_time;
144

    
145
    double audio_clock;
146
    double audio_diff_cum; /* used for AV difference average computation */
147
    double audio_diff_avg_coef;
148
    double audio_diff_threshold;
149
    int audio_diff_avg_count;
150
    AVStream *audio_st;
151
    PacketQueue audioq;
152
    int audio_hw_buf_size;
153
    /* samples output by the codec. we reserve more space for avsync
154
       compensation */
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157
    uint8_t *audio_buf;
158
    unsigned int audio_buf_size; /* in bytes */
159
    int audio_buf_index; /* in bytes */
160
    AVPacket audio_pkt_temp;
161
    AVPacket audio_pkt;
162
    enum SampleFormat audio_src_fmt;
163
    AVAudioConvert *reformat_ctx;
164

    
165
    int show_audio; /* if true, display audio samples */
166
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
167
    int sample_array_index;
168
    int last_i_start;
169
    RDFTContext *rdft;
170
    int rdft_bits;
171
    FFTSample *rdft_data;
172
    int xpos;
173

    
174
    SDL_Thread *subtitle_tid;
175
    int subtitle_stream;
176
    int subtitle_stream_changed;
177
    AVStream *subtitle_st;
178
    PacketQueue subtitleq;
179
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180
    int subpq_size, subpq_rindex, subpq_windex;
181
    SDL_mutex *subpq_mutex;
182
    SDL_cond *subpq_cond;
183

    
184
    double frame_timer;
185
    double frame_last_pts;
186
    double frame_last_delay;
187
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
188
    int video_stream;
189
    AVStream *video_st;
190
    PacketQueue videoq;
191
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
192
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193
    int64_t video_current_pos;                   ///<current displayed file pos
194
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195
    int pictq_size, pictq_rindex, pictq_windex;
196
    SDL_mutex *pictq_mutex;
197
    SDL_cond *pictq_cond;
198
#if !CONFIG_AVFILTER
199
    struct SwsContext *img_convert_ctx;
200
#endif
201

    
202
    //    QETimer *video_timer;
203
    char filename[1024];
204
    int width, height, xleft, ytop;
205

    
206
    int64_t faulty_pts;
207
    int64_t faulty_dts;
208
    int64_t last_dts_for_fault_detection;
209
    int64_t last_pts_for_fault_detection;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int exit_on_keydown;
264
static int exit_on_mousedown;
265
static int loop=1;
266
static int framedrop=1;
267

    
268
static int rdftspeed=20;
269
#if CONFIG_AVFILTER
270
static char *vfilters = NULL;
271
#endif
272

    
273
/* current context */
274
static int is_full_screen;
275
static VideoState *cur_stream;
276
static int64_t audio_callback_time;
277

    
278
static AVPacket flush_pkt;
279

    
280
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
281
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283

    
284
static SDL_Surface *screen;
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
287

    
288
/* packet queue handling */
289
static void packet_queue_init(PacketQueue *q)
290
{
291
    memset(q, 0, sizeof(PacketQueue));
292
    q->mutex = SDL_CreateMutex();
293
    q->cond = SDL_CreateCond();
294
    packet_queue_put(q, &flush_pkt);
295
}
296

    
297
static void packet_queue_flush(PacketQueue *q)
298
{
299
    AVPacketList *pkt, *pkt1;
300

    
301
    SDL_LockMutex(q->mutex);
302
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
303
        pkt1 = pkt->next;
304
        av_free_packet(&pkt->pkt);
305
        av_freep(&pkt);
306
    }
307
    q->last_pkt = NULL;
308
    q->first_pkt = NULL;
309
    q->nb_packets = 0;
310
    q->size = 0;
311
    SDL_UnlockMutex(q->mutex);
312
}
313

    
314
static void packet_queue_end(PacketQueue *q)
315
{
316
    packet_queue_flush(q);
317
    SDL_DestroyMutex(q->mutex);
318
    SDL_DestroyCond(q->cond);
319
}
320

    
321
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
322
{
323
    AVPacketList *pkt1;
324

    
325
    /* duplicate the packet */
326
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
327
        return -1;
328

    
329
    pkt1 = av_malloc(sizeof(AVPacketList));
330
    if (!pkt1)
331
        return -1;
332
    pkt1->pkt = *pkt;
333
    pkt1->next = NULL;
334

    
335

    
336
    SDL_LockMutex(q->mutex);
337

    
338
    if (!q->last_pkt)
339

    
340
        q->first_pkt = pkt1;
341
    else
342
        q->last_pkt->next = pkt1;
343
    q->last_pkt = pkt1;
344
    q->nb_packets++;
345
    q->size += pkt1->pkt.size + sizeof(*pkt1);
346
    /* XXX: should duplicate packet data in DV case */
347
    SDL_CondSignal(q->cond);
348

    
349
    SDL_UnlockMutex(q->mutex);
350
    return 0;
351
}
352

    
353
static void packet_queue_abort(PacketQueue *q)
354
{
355
    SDL_LockMutex(q->mutex);
356

    
357
    q->abort_request = 1;
358

    
359
    SDL_CondSignal(q->cond);
360

    
361
    SDL_UnlockMutex(q->mutex);
362
}
363

    
364
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
365
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
366
{
367
    AVPacketList *pkt1;
368
    int ret;
369

    
370
    SDL_LockMutex(q->mutex);
371

    
372
    for(;;) {
373
        if (q->abort_request) {
374
            ret = -1;
375
            break;
376
        }
377

    
378
        pkt1 = q->first_pkt;
379
        if (pkt1) {
380
            q->first_pkt = pkt1->next;
381
            if (!q->first_pkt)
382
                q->last_pkt = NULL;
383
            q->nb_packets--;
384
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
385
            *pkt = pkt1->pkt;
386
            av_free(pkt1);
387
            ret = 1;
388
            break;
389
        } else if (!block) {
390
            ret = 0;
391
            break;
392
        } else {
393
            SDL_CondWait(q->cond, q->mutex);
394
        }
395
    }
396
    SDL_UnlockMutex(q->mutex);
397
    return ret;
398
}
399

    
400
static inline void fill_rectangle(SDL_Surface *screen,
401
                                  int x, int y, int w, int h, int color)
402
{
403
    SDL_Rect rect;
404
    rect.x = x;
405
    rect.y = y;
406
    rect.w = w;
407
    rect.h = h;
408
    SDL_FillRect(screen, &rect, color);
409
}
410

    
411
#if 0
412
/* draw only the border of a rectangle */
413
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
414
{
415
    int w1, w2, h1, h2;
416

417
    /* fill the background */
418
    w1 = x;
419
    if (w1 < 0)
420
        w1 = 0;
421
    w2 = s->width - (x + w);
422
    if (w2 < 0)
423
        w2 = 0;
424
    h1 = y;
425
    if (h1 < 0)
426
        h1 = 0;
427
    h2 = s->height - (y + h);
428
    if (h2 < 0)
429
        h2 = 0;
430
    fill_rectangle(screen,
431
                   s->xleft, s->ytop,
432
                   w1, s->height,
433
                   color);
434
    fill_rectangle(screen,
435
                   s->xleft + s->width - w2, s->ytop,
436
                   w2, s->height,
437
                   color);
438
    fill_rectangle(screen,
439
                   s->xleft + w1, s->ytop,
440
                   s->width - w1 - w2, h1,
441
                   color);
442
    fill_rectangle(screen,
443
                   s->xleft + w1, s->ytop + s->height - h2,
444
                   s->width - w1 - w2, h2,
445
                   color);
446
}
447
#endif
448

    
449
#define ALPHA_BLEND(a, oldp, newp, s)\
450
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
451

    
452
#define RGBA_IN(r, g, b, a, s)\
453
{\
454
    unsigned int v = ((const uint32_t *)(s))[0];\
455
    a = (v >> 24) & 0xff;\
456
    r = (v >> 16) & 0xff;\
457
    g = (v >> 8) & 0xff;\
458
    b = v & 0xff;\
459
}
460

    
461
#define YUVA_IN(y, u, v, a, s, pal)\
462
{\
463
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
464
    a = (val >> 24) & 0xff;\
465
    y = (val >> 16) & 0xff;\
466
    u = (val >> 8) & 0xff;\
467
    v = val & 0xff;\
468
}
469

    
470
#define YUVA_OUT(d, y, u, v, a)\
471
{\
472
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
473
}
474

    
475

    
476
#define BPP 1
477

    
478
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
479
{
480
    int wrap, wrap3, width2, skip2;
481
    int y, u, v, a, u1, v1, a1, w, h;
482
    uint8_t *lum, *cb, *cr;
483
    const uint8_t *p;
484
    const uint32_t *pal;
485
    int dstx, dsty, dstw, dsth;
486

    
487
    dstw = av_clip(rect->w, 0, imgw);
488
    dsth = av_clip(rect->h, 0, imgh);
489
    dstx = av_clip(rect->x, 0, imgw - dstw);
490
    dsty = av_clip(rect->y, 0, imgh - dsth);
491
    lum = dst->data[0] + dsty * dst->linesize[0];
492
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
493
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
494

    
495
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
496
    skip2 = dstx >> 1;
497
    wrap = dst->linesize[0];
498
    wrap3 = rect->pict.linesize[0];
499
    p = rect->pict.data[0];
500
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
501

    
502
    if (dsty & 1) {
503
        lum += dstx;
504
        cb += skip2;
505
        cr += skip2;
506

    
507
        if (dstx & 1) {
508
            YUVA_IN(y, u, v, a, p, pal);
509
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
511
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
512
            cb++;
513
            cr++;
514
            lum++;
515
            p += BPP;
516
        }
517
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
518
            YUVA_IN(y, u, v, a, p, pal);
519
            u1 = u;
520
            v1 = v;
521
            a1 = a;
522
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523

    
524
            YUVA_IN(y, u, v, a, p + BPP, pal);
525
            u1 += u;
526
            v1 += v;
527
            a1 += a;
528
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
529
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
530
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
531
            cb++;
532
            cr++;
533
            p += 2 * BPP;
534
            lum += 2;
535
        }
536
        if (w) {
537
            YUVA_IN(y, u, v, a, p, pal);
538
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
540
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
541
            p++;
542
            lum++;
543
        }
544
        p += wrap3 - dstw * BPP;
545
        lum += wrap - dstw - dstx;
546
        cb += dst->linesize[1] - width2 - skip2;
547
        cr += dst->linesize[2] - width2 - skip2;
548
    }
549
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
550
        lum += dstx;
551
        cb += skip2;
552
        cr += skip2;
553

    
554
        if (dstx & 1) {
555
            YUVA_IN(y, u, v, a, p, pal);
556
            u1 = u;
557
            v1 = v;
558
            a1 = a;
559
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
            p += wrap3;
561
            lum += wrap;
562
            YUVA_IN(y, u, v, a, p, pal);
563
            u1 += u;
564
            v1 += v;
565
            a1 += a;
566
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569
            cb++;
570
            cr++;
571
            p += -wrap3 + BPP;
572
            lum += -wrap + 1;
573
        }
574
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 = u;
577
            v1 = v;
578
            a1 = a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580

    
581
            YUVA_IN(y, u, v, a, p + BPP, pal);
582
            u1 += u;
583
            v1 += v;
584
            a1 += a;
585
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
586
            p += wrap3;
587
            lum += wrap;
588

    
589
            YUVA_IN(y, u, v, a, p, pal);
590
            u1 += u;
591
            v1 += v;
592
            a1 += a;
593
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594

    
595
            YUVA_IN(y, u, v, a, p + BPP, pal);
596
            u1 += u;
597
            v1 += v;
598
            a1 += a;
599
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
600

    
601
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
602
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
603

    
604
            cb++;
605
            cr++;
606
            p += -wrap3 + 2 * BPP;
607
            lum += -wrap + 2;
608
        }
609
        if (w) {
610
            YUVA_IN(y, u, v, a, p, pal);
611
            u1 = u;
612
            v1 = v;
613
            a1 = a;
614
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
615
            p += wrap3;
616
            lum += wrap;
617
            YUVA_IN(y, u, v, a, p, pal);
618
            u1 += u;
619
            v1 += v;
620
            a1 += a;
621
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
623
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
624
            cb++;
625
            cr++;
626
            p += -wrap3 + BPP;
627
            lum += -wrap + 1;
628
        }
629
        p += wrap3 + (wrap3 - dstw * BPP);
630
        lum += wrap + (wrap - dstw - dstx);
631
        cb += dst->linesize[1] - width2 - skip2;
632
        cr += dst->linesize[2] - width2 - skip2;
633
    }
634
    /* handle odd height */
635
    if (h) {
636
        lum += dstx;
637
        cb += skip2;
638
        cr += skip2;
639

    
640
        if (dstx & 1) {
641
            YUVA_IN(y, u, v, a, p, pal);
642
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
643
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
644
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
645
            cb++;
646
            cr++;
647
            lum++;
648
            p += BPP;
649
        }
650
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
651
            YUVA_IN(y, u, v, a, p, pal);
652
            u1 = u;
653
            v1 = v;
654
            a1 = a;
655
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
656

    
657
            YUVA_IN(y, u, v, a, p + BPP, pal);
658
            u1 += u;
659
            v1 += v;
660
            a1 += a;
661
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
662
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
663
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
664
            cb++;
665
            cr++;
666
            p += 2 * BPP;
667
            lum += 2;
668
        }
669
        if (w) {
670
            YUVA_IN(y, u, v, a, p, pal);
671
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
672
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
673
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
674
        }
675
    }
676
}
677

    
678
static void free_subpicture(SubPicture *sp)
679
{
680
    int i;
681

    
682
    for (i = 0; i < sp->sub.num_rects; i++)
683
    {
684
        av_freep(&sp->sub.rects[i]->pict.data[0]);
685
        av_freep(&sp->sub.rects[i]->pict.data[1]);
686
        av_freep(&sp->sub.rects[i]);
687
    }
688

    
689
    av_free(sp->sub.rects);
690

    
691
    memset(&sp->sub, 0, sizeof(AVSubtitle));
692
}
693

    
694
static void video_image_display(VideoState *is)
695
{
696
    VideoPicture *vp;
697
    SubPicture *sp;
698
    AVPicture pict;
699
    float aspect_ratio;
700
    int width, height, x, y;
701
    SDL_Rect rect;
702
    int i;
703

    
704
    vp = &is->pictq[is->pictq_rindex];
705
    if (vp->bmp) {
706
#if CONFIG_AVFILTER
707
         if (vp->picref->pixel_aspect.num == 0)
708
             aspect_ratio = 0;
709
         else
710
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
711
#else
712

    
713
        /* XXX: use variable in the frame */
714
        if (is->video_st->sample_aspect_ratio.num)
715
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
716
        else if (is->video_st->codec->sample_aspect_ratio.num)
717
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
718
        else
719
            aspect_ratio = 0;
720
#endif
721
        if (aspect_ratio <= 0.0)
722
            aspect_ratio = 1.0;
723
        aspect_ratio *= (float)vp->width / (float)vp->height;
724
        /* if an active format is indicated, then it overrides the
725
           mpeg format */
726
#if 0
727
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
728
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
729
            printf("dtg_active_format=%d\n", is->dtg_active_format);
730
        }
731
#endif
732
#if 0
733
        switch(is->video_st->codec->dtg_active_format) {
734
        case FF_DTG_AFD_SAME:
735
        default:
736
            /* nothing to do */
737
            break;
738
        case FF_DTG_AFD_4_3:
739
            aspect_ratio = 4.0 / 3.0;
740
            break;
741
        case FF_DTG_AFD_16_9:
742
            aspect_ratio = 16.0 / 9.0;
743
            break;
744
        case FF_DTG_AFD_14_9:
745
            aspect_ratio = 14.0 / 9.0;
746
            break;
747
        case FF_DTG_AFD_4_3_SP_14_9:
748
            aspect_ratio = 14.0 / 9.0;
749
            break;
750
        case FF_DTG_AFD_16_9_SP_14_9:
751
            aspect_ratio = 14.0 / 9.0;
752
            break;
753
        case FF_DTG_AFD_SP_4_3:
754
            aspect_ratio = 4.0 / 3.0;
755
            break;
756
        }
757
#endif
758

    
759
        if (is->subtitle_st)
760
        {
761
            if (is->subpq_size > 0)
762
            {
763
                sp = &is->subpq[is->subpq_rindex];
764

    
765
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
766
                {
767
                    SDL_LockYUVOverlay (vp->bmp);
768

    
769
                    pict.data[0] = vp->bmp->pixels[0];
770
                    pict.data[1] = vp->bmp->pixels[2];
771
                    pict.data[2] = vp->bmp->pixels[1];
772

    
773
                    pict.linesize[0] = vp->bmp->pitches[0];
774
                    pict.linesize[1] = vp->bmp->pitches[2];
775
                    pict.linesize[2] = vp->bmp->pitches[1];
776

    
777
                    for (i = 0; i < sp->sub.num_rects; i++)
778
                        blend_subrect(&pict, sp->sub.rects[i],
779
                                      vp->bmp->w, vp->bmp->h);
780

    
781
                    SDL_UnlockYUVOverlay (vp->bmp);
782
                }
783
            }
784
        }
785

    
786

    
787
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
788
        height = is->height;
789
        width = ((int)rint(height * aspect_ratio)) & ~1;
790
        if (width > is->width) {
791
            width = is->width;
792
            height = ((int)rint(width / aspect_ratio)) & ~1;
793
        }
794
        x = (is->width - width) / 2;
795
        y = (is->height - height) / 2;
796
        if (!is->no_background) {
797
            /* fill the background */
798
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
799
        } else {
800
            is->no_background = 0;
801
        }
802
        rect.x = is->xleft + x;
803
        rect.y = is->ytop  + y;
804
        rect.w = width;
805
        rect.h = height;
806
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
807
    } else {
808
#if 0
809
        fill_rectangle(screen,
810
                       is->xleft, is->ytop, is->width, is->height,
811
                       QERGB(0x00, 0x00, 0x00));
812
#endif
813
    }
814
}
815

    
816
static inline int compute_mod(int a, int b)
817
{
818
    a = a % b;
819
    if (a >= 0)
820
        return a;
821
    else
822
        return a + b;
823
}
824

    
825
static void video_audio_display(VideoState *s)
826
{
827
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
828
    int ch, channels, h, h2, bgcolor, fgcolor;
829
    int16_t time_diff;
830
    int rdft_bits, nb_freq;
831

    
832
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
833
        ;
834
    nb_freq= 1<<(rdft_bits-1);
835

    
836
    /* compute display index : center on currently output samples */
837
    channels = s->audio_st->codec->channels;
838
    nb_display_channels = channels;
839
    if (!s->paused) {
840
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
841
        n = 2 * channels;
842
        delay = audio_write_get_buf_size(s);
843
        delay /= n;
844

    
845
        /* to be more precise, we take into account the time spent since
846
           the last buffer computation */
847
        if (audio_callback_time) {
848
            time_diff = av_gettime() - audio_callback_time;
849
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
850
        }
851

    
852
        delay += 2*data_used;
853
        if (delay < data_used)
854
            delay = data_used;
855

    
856
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
857
        if(s->show_audio==1){
858
            h= INT_MIN;
859
            for(i=0; i<1000; i+=channels){
860
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
861
                int a= s->sample_array[idx];
862
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
863
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
864
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
865
                int score= a-d;
866
                if(h<score && (b^c)<0){
867
                    h= score;
868
                    i_start= idx;
869
                }
870
            }
871
        }
872

    
873
        s->last_i_start = i_start;
874
    } else {
875
        i_start = s->last_i_start;
876
    }
877

    
878
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
879
    if(s->show_audio==1){
880
        fill_rectangle(screen,
881
                       s->xleft, s->ytop, s->width, s->height,
882
                       bgcolor);
883

    
884
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
885

    
886
        /* total height for one channel */
887
        h = s->height / nb_display_channels;
888
        /* graph height / 2 */
889
        h2 = (h * 9) / 20;
890
        for(ch = 0;ch < nb_display_channels; ch++) {
891
            i = i_start + ch;
892
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
893
            for(x = 0; x < s->width; x++) {
894
                y = (s->sample_array[i] * h2) >> 15;
895
                if (y < 0) {
896
                    y = -y;
897
                    ys = y1 - y;
898
                } else {
899
                    ys = y1;
900
                }
901
                fill_rectangle(screen,
902
                               s->xleft + x, ys, 1, y,
903
                               fgcolor);
904
                i += channels;
905
                if (i >= SAMPLE_ARRAY_SIZE)
906
                    i -= SAMPLE_ARRAY_SIZE;
907
            }
908
        }
909

    
910
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
911

    
912
        for(ch = 1;ch < nb_display_channels; ch++) {
913
            y = s->ytop + ch * h;
914
            fill_rectangle(screen,
915
                           s->xleft, y, s->width, 1,
916
                           fgcolor);
917
        }
918
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
919
    }else{
920
        nb_display_channels= FFMIN(nb_display_channels, 2);
921
        if(rdft_bits != s->rdft_bits){
922
            av_rdft_end(s->rdft);
923
            av_free(s->rdft_data);
924
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
925
            s->rdft_bits= rdft_bits;
926
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
927
        }
928
        {
929
            FFTSample *data[2];
930
            for(ch = 0;ch < nb_display_channels; ch++) {
931
                data[ch] = s->rdft_data + 2*nb_freq*ch;
932
                i = i_start + ch;
933
                for(x = 0; x < 2*nb_freq; x++) {
934
                    double w= (x-nb_freq)*(1.0/nb_freq);
935
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
936
                    i += channels;
937
                    if (i >= SAMPLE_ARRAY_SIZE)
938
                        i -= SAMPLE_ARRAY_SIZE;
939
                }
940
                av_rdft_calc(s->rdft, data[ch]);
941
            }
942
            //least efficient way to do this, we should of course directly access it but its more than fast enough
943
            for(y=0; y<s->height; y++){
944
                double w= 1/sqrt(nb_freq);
945
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
946
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
947
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
948
                a= FFMIN(a,255);
949
                b= FFMIN(b,255);
950
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
951

    
952
                fill_rectangle(screen,
953
                            s->xpos, s->height-y, 1, 1,
954
                            fgcolor);
955
            }
956
        }
957
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
958
        s->xpos++;
959
        if(s->xpos >= s->width)
960
            s->xpos= s->xleft;
961
    }
962
}
963

    
964
static int video_open(VideoState *is){
965
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
966
    int w,h;
967

    
968
    if(is_full_screen) flags |= SDL_FULLSCREEN;
969
    else               flags |= SDL_RESIZABLE;
970

    
971
    if (is_full_screen && fs_screen_width) {
972
        w = fs_screen_width;
973
        h = fs_screen_height;
974
    } else if(!is_full_screen && screen_width){
975
        w = screen_width;
976
        h = screen_height;
977
#if CONFIG_AVFILTER
978
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
979
        w = is->out_video_filter->inputs[0]->w;
980
        h = is->out_video_filter->inputs[0]->h;
981
#else
982
    }else if (is->video_st && is->video_st->codec->width){
983
        w = is->video_st->codec->width;
984
        h = is->video_st->codec->height;
985
#endif
986
    } else {
987
        w = 640;
988
        h = 480;
989
    }
990
    if(screen && is->width == screen->w && screen->w == w
991
       && is->height== screen->h && screen->h == h)
992
        return 0;
993

    
994
#ifndef __APPLE__
995
    screen = SDL_SetVideoMode(w, h, 0, flags);
996
#else
997
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
998
    screen = SDL_SetVideoMode(w, h, 24, flags);
999
#endif
1000
    if (!screen) {
1001
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
1002
        return -1;
1003
    }
1004
    if (!window_title)
1005
        window_title = input_filename;
1006
    SDL_WM_SetCaption(window_title, window_title);
1007

    
1008
    is->width = screen->w;
1009
    is->height = screen->h;
1010

    
1011
    return 0;
1012
}
1013

    
1014
/* display the current picture, if any */
1015
static void video_display(VideoState *is)
1016
{
1017
    if(!screen)
1018
        video_open(cur_stream);
1019
    if (is->audio_st && is->show_audio)
1020
        video_audio_display(is);
1021
    else if (is->video_st)
1022
        video_image_display(is);
1023
}
1024

    
1025
static int refresh_thread(void *opaque)
1026
{
1027
    VideoState *is= opaque;
1028
    while(!is->abort_request){
1029
    SDL_Event event;
1030
    event.type = FF_REFRESH_EVENT;
1031
    event.user.data1 = opaque;
1032
        if(!is->refresh){
1033
            is->refresh=1;
1034
    SDL_PushEvent(&event);
1035
        }
1036
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1037
    }
1038
    return 0;
1039
}
1040

    
1041
/* get the current audio clock value */
1042
static double get_audio_clock(VideoState *is)
1043
{
1044
    double pts;
1045
    int hw_buf_size, bytes_per_sec;
1046
    pts = is->audio_clock;
1047
    hw_buf_size = audio_write_get_buf_size(is);
1048
    bytes_per_sec = 0;
1049
    if (is->audio_st) {
1050
        bytes_per_sec = is->audio_st->codec->sample_rate *
1051
            2 * is->audio_st->codec->channels;
1052
    }
1053
    if (bytes_per_sec)
1054
        pts -= (double)hw_buf_size / bytes_per_sec;
1055
    return pts;
1056
}
1057

    
1058
/* get the current video clock value */
1059
static double get_video_clock(VideoState *is)
1060
{
1061
    if (is->paused) {
1062
        return is->video_current_pts;
1063
    } else {
1064
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1065
    }
1066
}
1067

    
1068
/* get the current external clock value */
1069
static double get_external_clock(VideoState *is)
1070
{
1071
    int64_t ti;
1072
    ti = av_gettime();
1073
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1074
}
1075

    
1076
/* get the current master clock value */
1077
static double get_master_clock(VideoState *is)
1078
{
1079
    double val;
1080

    
1081
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1082
        if (is->video_st)
1083
            val = get_video_clock(is);
1084
        else
1085
            val = get_audio_clock(is);
1086
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1087
        if (is->audio_st)
1088
            val = get_audio_clock(is);
1089
        else
1090
            val = get_video_clock(is);
1091
    } else {
1092
        val = get_external_clock(is);
1093
    }
1094
    return val;
1095
}
1096

    
1097
/* seek in the stream */
1098
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1099
{
1100
    if (!is->seek_req) {
1101
        is->seek_pos = pos;
1102
        is->seek_rel = rel;
1103
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1104
        if (seek_by_bytes)
1105
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1106
        is->seek_req = 1;
1107
    }
1108
}
1109

    
1110
/* pause or resume the video */
1111
static void stream_pause(VideoState *is)
1112
{
1113
    if (is->paused) {
1114
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1115
        if(is->read_pause_return != AVERROR(ENOSYS)){
1116
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1117
        }
1118
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1119
    }
1120
    is->paused = !is->paused;
1121
}
1122

    
1123
static double compute_target_time(double frame_current_pts, VideoState *is)
1124
{
1125
    double delay, sync_threshold, diff;
1126

    
1127
    /* compute nominal delay */
1128
    delay = frame_current_pts - is->frame_last_pts;
1129
    if (delay <= 0 || delay >= 10.0) {
1130
        /* if incorrect delay, use previous one */
1131
        delay = is->frame_last_delay;
1132
    } else {
1133
        is->frame_last_delay = delay;
1134
    }
1135
    is->frame_last_pts = frame_current_pts;
1136

    
1137
    /* update delay to follow master synchronisation source */
1138
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1139
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1140
        /* if video is slave, we try to correct big delays by
1141
           duplicating or deleting a frame */
1142
        diff = get_video_clock(is) - get_master_clock(is);
1143

    
1144
        /* skip or repeat frame. We take into account the
1145
           delay to compute the threshold. I still don't know
1146
           if it is the best guess */
1147
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1148
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1149
            if (diff <= -sync_threshold)
1150
                delay = 0;
1151
            else if (diff >= sync_threshold)
1152
                delay = 2 * delay;
1153
        }
1154
    }
1155
    is->frame_timer += delay;
1156
#if defined(DEBUG_SYNC)
1157
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1158
            delay, actual_delay, frame_current_pts, -diff);
1159
#endif
1160

    
1161
    return is->frame_timer;
1162
}
1163

    
1164
/* called to display each frame */
1165
static void video_refresh_timer(void *opaque)
1166
{
1167
    VideoState *is = opaque;
1168
    VideoPicture *vp;
1169

    
1170
    SubPicture *sp, *sp2;
1171

    
1172
    if (is->video_st) {
1173
retry:
1174
        if (is->pictq_size == 0) {
1175
            //nothing to do, no picture to display in the que
1176
        } else {
1177
            double time= av_gettime()/1000000.0;
1178
            double next_target;
1179
            /* dequeue the picture */
1180
            vp = &is->pictq[is->pictq_rindex];
1181

    
1182
            if(time < vp->target_clock)
1183
                return;
1184
            /* update current video pts */
1185
            is->video_current_pts = vp->pts;
1186
            is->video_current_pts_drift = is->video_current_pts - time;
1187
            is->video_current_pos = vp->pos;
1188
            if(is->pictq_size > 1){
1189
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1190
                assert(nextvp->target_clock >= vp->target_clock);
1191
                next_target= nextvp->target_clock;
1192
            }else{
1193
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1194
            }
1195
            if(framedrop && time > next_target){
1196
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1197
                if(is->pictq_size > 1 || time > next_target + 0.5){
1198
                    /* update queue size and signal for next picture */
1199
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1200
                        is->pictq_rindex = 0;
1201

    
1202
                    SDL_LockMutex(is->pictq_mutex);
1203
                    is->pictq_size--;
1204
                    SDL_CondSignal(is->pictq_cond);
1205
                    SDL_UnlockMutex(is->pictq_mutex);
1206
                    goto retry;
1207
                }
1208
            }
1209

    
1210
            if(is->subtitle_st) {
1211
                if (is->subtitle_stream_changed) {
1212
                    SDL_LockMutex(is->subpq_mutex);
1213

    
1214
                    while (is->subpq_size) {
1215
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1216

    
1217
                        /* update queue size and signal for next picture */
1218
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1219
                            is->subpq_rindex = 0;
1220

    
1221
                        is->subpq_size--;
1222
                    }
1223
                    is->subtitle_stream_changed = 0;
1224

    
1225
                    SDL_CondSignal(is->subpq_cond);
1226
                    SDL_UnlockMutex(is->subpq_mutex);
1227
                } else {
1228
                    if (is->subpq_size > 0) {
1229
                        sp = &is->subpq[is->subpq_rindex];
1230

    
1231
                        if (is->subpq_size > 1)
1232
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1233
                        else
1234
                            sp2 = NULL;
1235

    
1236
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1237
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1238
                        {
1239
                            free_subpicture(sp);
1240

    
1241
                            /* update queue size and signal for next picture */
1242
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1243
                                is->subpq_rindex = 0;
1244

    
1245
                            SDL_LockMutex(is->subpq_mutex);
1246
                            is->subpq_size--;
1247
                            SDL_CondSignal(is->subpq_cond);
1248
                            SDL_UnlockMutex(is->subpq_mutex);
1249
                        }
1250
                    }
1251
                }
1252
            }
1253

    
1254
            /* display picture */
1255
            video_display(is);
1256

    
1257
            /* update queue size and signal for next picture */
1258
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1259
                is->pictq_rindex = 0;
1260

    
1261
            SDL_LockMutex(is->pictq_mutex);
1262
            is->pictq_size--;
1263
            SDL_CondSignal(is->pictq_cond);
1264
            SDL_UnlockMutex(is->pictq_mutex);
1265
        }
1266
    } else if (is->audio_st) {
1267
        /* draw the next audio frame */
1268

    
1269
        /* if only audio stream, then display the audio bars (better
1270
           than nothing, just to test the implementation */
1271

    
1272
        /* display picture */
1273
        video_display(is);
1274
    }
1275
    if (show_status) {
1276
        static int64_t last_time;
1277
        int64_t cur_time;
1278
        int aqsize, vqsize, sqsize;
1279
        double av_diff;
1280

    
1281
        cur_time = av_gettime();
1282
        if (!last_time || (cur_time - last_time) >= 30000) {
1283
            aqsize = 0;
1284
            vqsize = 0;
1285
            sqsize = 0;
1286
            if (is->audio_st)
1287
                aqsize = is->audioq.size;
1288
            if (is->video_st)
1289
                vqsize = is->videoq.size;
1290
            if (is->subtitle_st)
1291
                sqsize = is->subtitleq.size;
1292
            av_diff = 0;
1293
            if (is->audio_st && is->video_st)
1294
                av_diff = get_audio_clock(is) - get_video_clock(is);
1295
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1296
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1297
            fflush(stdout);
1298
            last_time = cur_time;
1299
        }
1300
    }
1301
}
1302

    
1303
/* allocate a picture (needs to do that in main thread to avoid
1304
   potential locking problems */
1305
static void alloc_picture(void *opaque)
1306
{
1307
    VideoState *is = opaque;
1308
    VideoPicture *vp;
1309

    
1310
    vp = &is->pictq[is->pictq_windex];
1311

    
1312
    if (vp->bmp)
1313
        SDL_FreeYUVOverlay(vp->bmp);
1314

    
1315
#if CONFIG_AVFILTER
1316
    if (vp->picref)
1317
        avfilter_unref_pic(vp->picref);
1318
    vp->picref = NULL;
1319

    
1320
    vp->width   = is->out_video_filter->inputs[0]->w;
1321
    vp->height  = is->out_video_filter->inputs[0]->h;
1322
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1323
#else
1324
    vp->width   = is->video_st->codec->width;
1325
    vp->height  = is->video_st->codec->height;
1326
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1327
#endif
1328

    
1329
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1330
                                   SDL_YV12_OVERLAY,
1331
                                   screen);
1332

    
1333
    SDL_LockMutex(is->pictq_mutex);
1334
    vp->allocated = 1;
1335
    SDL_CondSignal(is->pictq_cond);
1336
    SDL_UnlockMutex(is->pictq_mutex);
1337
}
1338

    
1339
/**
1340
 *
1341
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1342
 */
1343
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1344
{
1345
    VideoPicture *vp;
1346
    int dst_pix_fmt;
1347
#if CONFIG_AVFILTER
1348
    AVPicture pict_src;
1349
#endif
1350
    /* wait until we have space to put a new picture */
1351
    SDL_LockMutex(is->pictq_mutex);
1352

    
1353
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1354
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1355

    
1356
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1357
           !is->videoq.abort_request) {
1358
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359
    }
1360
    SDL_UnlockMutex(is->pictq_mutex);
1361

    
1362
    if (is->videoq.abort_request)
1363
        return -1;
1364

    
1365
    vp = &is->pictq[is->pictq_windex];
1366

    
1367
    /* alloc or resize hardware picture buffer */
1368
    if (!vp->bmp ||
1369
#if CONFIG_AVFILTER
1370
        vp->width  != is->out_video_filter->inputs[0]->w ||
1371
        vp->height != is->out_video_filter->inputs[0]->h) {
1372
#else
1373
        vp->width != is->video_st->codec->width ||
1374
        vp->height != is->video_st->codec->height) {
1375
#endif
1376
        SDL_Event event;
1377

    
1378
        vp->allocated = 0;
1379

    
1380
        /* the allocation must be done in the main thread to avoid
1381
           locking problems */
1382
        event.type = FF_ALLOC_EVENT;
1383
        event.user.data1 = is;
1384
        SDL_PushEvent(&event);
1385

    
1386
        /* wait until the picture is allocated */
1387
        SDL_LockMutex(is->pictq_mutex);
1388
        while (!vp->allocated && !is->videoq.abort_request) {
1389
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1390
        }
1391
        SDL_UnlockMutex(is->pictq_mutex);
1392

    
1393
        if (is->videoq.abort_request)
1394
            return -1;
1395
    }
1396

    
1397
    /* if the frame is not skipped, then display it */
1398
    if (vp->bmp) {
1399
        AVPicture pict;
1400
#if CONFIG_AVFILTER
1401
        if(vp->picref)
1402
            avfilter_unref_pic(vp->picref);
1403
        vp->picref = src_frame->opaque;
1404
#endif
1405

    
1406
        /* get a pointer on the bitmap */
1407
        SDL_LockYUVOverlay (vp->bmp);
1408

    
1409
        dst_pix_fmt = PIX_FMT_YUV420P;
1410
        memset(&pict,0,sizeof(AVPicture));
1411
        pict.data[0] = vp->bmp->pixels[0];
1412
        pict.data[1] = vp->bmp->pixels[2];
1413
        pict.data[2] = vp->bmp->pixels[1];
1414

    
1415
        pict.linesize[0] = vp->bmp->pitches[0];
1416
        pict.linesize[1] = vp->bmp->pitches[2];
1417
        pict.linesize[2] = vp->bmp->pitches[1];
1418

    
1419
#if CONFIG_AVFILTER
1420
        pict_src.data[0] = src_frame->data[0];
1421
        pict_src.data[1] = src_frame->data[1];
1422
        pict_src.data[2] = src_frame->data[2];
1423

    
1424
        pict_src.linesize[0] = src_frame->linesize[0];
1425
        pict_src.linesize[1] = src_frame->linesize[1];
1426
        pict_src.linesize[2] = src_frame->linesize[2];
1427

    
1428
        //FIXME use direct rendering
1429
        av_picture_copy(&pict, &pict_src,
1430
                        vp->pix_fmt, vp->width, vp->height);
1431
#else
1432
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1433
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1434
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1435
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1436
        if (is->img_convert_ctx == NULL) {
1437
            fprintf(stderr, "Cannot initialize the conversion context\n");
1438
            exit(1);
1439
        }
1440
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1441
                  0, vp->height, pict.data, pict.linesize);
1442
#endif
1443
        /* update the bitmap content */
1444
        SDL_UnlockYUVOverlay(vp->bmp);
1445

    
1446
        vp->pts = pts;
1447
        vp->pos = pos;
1448

    
1449
        /* now we can update the picture count */
1450
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1451
            is->pictq_windex = 0;
1452
        SDL_LockMutex(is->pictq_mutex);
1453
        vp->target_clock= compute_target_time(vp->pts, is);
1454

    
1455
        is->pictq_size++;
1456
        SDL_UnlockMutex(is->pictq_mutex);
1457
    }
1458
    return 0;
1459
}
1460

    
1461
/**
1462
 * compute the exact PTS for the picture if it is omitted in the stream
1463
 * @param pts1 the dts of the pkt / pts of the frame
1464
 */
1465
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1466
{
1467
    double frame_delay, pts;
1468

    
1469
    pts = pts1;
1470

    
1471
    if (pts != 0) {
1472
        /* update video clock with pts, if present */
1473
        is->video_clock = pts;
1474
    } else {
1475
        pts = is->video_clock;
1476
    }
1477
    /* update video clock for next frame */
1478
    frame_delay = av_q2d(is->video_st->codec->time_base);
1479
    /* for MPEG2, the frame can be repeated, so we update the
1480
       clock accordingly */
1481
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1482
    is->video_clock += frame_delay;
1483

    
1484
#if defined(DEBUG_SYNC) && 0
1485
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1486
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1487
#endif
1488
    return queue_picture(is, src_frame, pts, pos);
1489
}
1490

    
1491
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1492
{
1493
    int len1, got_picture, i;
1494

    
1495
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1496
            return -1;
1497

    
1498
        if(pkt->data == flush_pkt.data){
1499
            avcodec_flush_buffers(is->video_st->codec);
1500

    
1501
            SDL_LockMutex(is->pictq_mutex);
1502
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1503
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1504
                is->pictq[i].target_clock= 0;
1505
            }
1506
            while (is->pictq_size && !is->videoq.abort_request) {
1507
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1508
            }
1509
            is->video_current_pos= -1;
1510
            SDL_UnlockMutex(is->pictq_mutex);
1511

    
1512
            is->last_dts_for_fault_detection=
1513
            is->last_pts_for_fault_detection= INT64_MIN;
1514
            is->frame_last_pts= AV_NOPTS_VALUE;
1515
            is->frame_last_delay = 0;
1516
            is->frame_timer = (double)av_gettime() / 1000000.0;
1517
            is->skip_frames= 1;
1518
            is->skip_frames_index= 0;
1519
            return 0;
1520
        }
1521

    
1522
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1523
           this packet, if any */
1524
        is->video_st->codec->reordered_opaque= pkt->pts;
1525
        len1 = avcodec_decode_video2(is->video_st->codec,
1526
                                    frame, &got_picture,
1527
                                    pkt);
1528

    
1529
        if (got_picture) {
1530
            if(pkt->dts != AV_NOPTS_VALUE){
1531
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1532
                is->last_dts_for_fault_detection= pkt->dts;
1533
            }
1534
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1535
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1536
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1537
            }
1538
        }
1539

    
1540
        if(   (   decoder_reorder_pts==1
1541
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1542
               || pkt->dts == AV_NOPTS_VALUE)
1543
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1544
            *pts= frame->reordered_opaque;
1545
        else if(pkt->dts != AV_NOPTS_VALUE)
1546
            *pts= pkt->dts;
1547
        else
1548
            *pts= 0;
1549

    
1550
//            if (len1 < 0)
1551
//                break;
1552
    if (got_picture){
1553
        is->skip_frames_index += 1;
1554
        if(is->skip_frames_index >= is->skip_frames){
1555
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1556
            return 1;
1557
        }
1558

    
1559
    }
1560
    return 0;
1561
}
1562

    
1563
#if CONFIG_AVFILTER
1564
typedef struct {
1565
    VideoState *is;
1566
    AVFrame *frame;
1567
    int use_dr1;
1568
} FilterPriv;
1569

    
1570
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1571
{
1572
    AVFilterContext *ctx = codec->opaque;
1573
    AVFilterPicRef  *ref;
1574
    int perms = AV_PERM_WRITE;
1575
    int i, w, h, stride[4];
1576
    unsigned edge;
1577

    
1578
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1579
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1580
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1581
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1582
    }
1583
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1584

    
1585
    w = codec->width;
1586
    h = codec->height;
1587
    avcodec_align_dimensions2(codec, &w, &h, stride);
1588
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1589
    w += edge << 1;
1590
    h += edge << 1;
1591

    
1592
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1593
        return -1;
1594

    
1595
    ref->w = codec->width;
1596
    ref->h = codec->height;
1597
    for(i = 0; i < 4; i ++) {
1598
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w : 0;
1599
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h : 0;
1600

    
1601
        if (ref->data[i]) {
1602
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1603
        }
1604
        pic->data[i]     = ref->data[i];
1605
        pic->linesize[i] = ref->linesize[i];
1606
    }
1607
    pic->opaque = ref;
1608
    pic->age    = INT_MAX;
1609
    pic->type   = FF_BUFFER_TYPE_USER;
1610
    pic->reordered_opaque = codec->reordered_opaque;
1611
    return 0;
1612
}
1613

    
1614
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1615
{
1616
    memset(pic->data, 0, sizeof(pic->data));
1617
    avfilter_unref_pic(pic->opaque);
1618
}
1619

    
1620
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1621
{
1622
    AVFilterPicRef *ref = pic->opaque;
1623

    
1624
    if (pic->data[0] == NULL) {
1625
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1626
        return codec->get_buffer(codec, pic);
1627
    }
1628

    
1629
    if ((codec->width != ref->w) || (codec->height != ref->h) ||
1630
        (codec->pix_fmt != ref->pic->format)) {
1631
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1632
        return -1;
1633
    }
1634

    
1635
    pic->reordered_opaque = codec->reordered_opaque;
1636
    return 0;
1637
}
1638

    
1639
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1640
{
1641
    FilterPriv *priv = ctx->priv;
1642
    AVCodecContext *codec;
1643
    if(!opaque) return -1;
1644

    
1645
    priv->is = opaque;
1646
    codec    = priv->is->video_st->codec;
1647
    codec->opaque = ctx;
1648
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1649
        priv->use_dr1 = 1;
1650
        codec->get_buffer     = input_get_buffer;
1651
        codec->release_buffer = input_release_buffer;
1652
        codec->reget_buffer   = input_reget_buffer;
1653
    }
1654

    
1655
    priv->frame = avcodec_alloc_frame();
1656

    
1657
    return 0;
1658
}
1659

    
1660
static void input_uninit(AVFilterContext *ctx)
1661
{
1662
    FilterPriv *priv = ctx->priv;
1663
    av_free(priv->frame);
1664
}
1665

    
1666
static int input_request_frame(AVFilterLink *link)
1667
{
1668
    FilterPriv *priv = link->src->priv;
1669
    AVFilterPicRef *picref;
1670
    int64_t pts = 0;
1671
    AVPacket pkt;
1672
    int ret;
1673

    
1674
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1675
        av_free_packet(&pkt);
1676
    if (ret < 0)
1677
        return -1;
1678

    
1679
    if(priv->use_dr1) {
1680
        picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1681
    } else {
1682
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1683
        av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1684
                        picref->pic->format, link->w, link->h);
1685
    }
1686
    av_free_packet(&pkt);
1687

    
1688
    picref->pts = pts;
1689
    picref->pos = pkt.pos;
1690
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1691
    avfilter_start_frame(link, picref);
1692
    avfilter_draw_slice(link, 0, link->h, 1);
1693
    avfilter_end_frame(link);
1694

    
1695
    return 0;
1696
}
1697

    
1698
static int input_query_formats(AVFilterContext *ctx)
1699
{
1700
    FilterPriv *priv = ctx->priv;
1701
    enum PixelFormat pix_fmts[] = {
1702
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1703
    };
1704

    
1705
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1706
    return 0;
1707
}
1708

    
1709
static int input_config_props(AVFilterLink *link)
1710
{
1711
    FilterPriv *priv  = link->src->priv;
1712
    AVCodecContext *c = priv->is->video_st->codec;
1713

    
1714
    link->w = c->width;
1715
    link->h = c->height;
1716

    
1717
    return 0;
1718
}
1719

    
1720
static AVFilter input_filter =
1721
{
1722
    .name      = "ffplay_input",
1723

    
1724
    .priv_size = sizeof(FilterPriv),
1725

    
1726
    .init      = input_init,
1727
    .uninit    = input_uninit,
1728

    
1729
    .query_formats = input_query_formats,
1730

    
1731
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1732
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1733
                                    .type = AVMEDIA_TYPE_VIDEO,
1734
                                    .request_frame = input_request_frame,
1735
                                    .config_props  = input_config_props, },
1736
                                  { .name = NULL }},
1737
};
1738

    
1739
static void output_end_frame(AVFilterLink *link)
1740
{
1741
}
1742

    
1743
static int output_query_formats(AVFilterContext *ctx)
1744
{
1745
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1746

    
1747
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1748
    return 0;
1749
}
1750

    
1751
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1752
                                    int64_t *pts, int64_t *pos)
1753
{
1754
    AVFilterPicRef *pic;
1755

    
1756
    if(avfilter_request_frame(ctx->inputs[0]))
1757
        return -1;
1758
    if(!(pic = ctx->inputs[0]->cur_pic))
1759
        return -1;
1760
    ctx->inputs[0]->cur_pic = NULL;
1761

    
1762
    frame->opaque = pic;
1763
    *pts          = pic->pts;
1764
    *pos          = pic->pos;
1765

    
1766
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1767
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1768

    
1769
    return 1;
1770
}
1771

    
1772
static AVFilter output_filter =
1773
{
1774
    .name      = "ffplay_output",
1775

    
1776
    .query_formats = output_query_formats,
1777

    
1778
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1779
                                    .type          = AVMEDIA_TYPE_VIDEO,
1780
                                    .end_frame     = output_end_frame,
1781
                                    .min_perms     = AV_PERM_READ, },
1782
                                  { .name = NULL }},
1783
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1784
};
1785
#endif  /* CONFIG_AVFILTER */
1786

    
1787
static int video_thread(void *arg)
1788
{
1789
    VideoState *is = arg;
1790
    AVFrame *frame= avcodec_alloc_frame();
1791
    int64_t pts_int;
1792
    double pts;
1793
    int ret;
1794

    
1795
#if CONFIG_AVFILTER
1796
    int64_t pos;
1797
    char sws_flags_str[128];
1798
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1799
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1800
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1801
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1802

    
1803
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1804
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1805

    
1806
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1807
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1808

    
1809

    
1810
    if(vfilters) {
1811
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1812
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1813

    
1814
        outputs->name    = av_strdup("in");
1815
        outputs->filter  = filt_src;
1816
        outputs->pad_idx = 0;
1817
        outputs->next    = NULL;
1818

    
1819
        inputs->name    = av_strdup("out");
1820
        inputs->filter  = filt_out;
1821
        inputs->pad_idx = 0;
1822
        inputs->next    = NULL;
1823

    
1824
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1825
            goto the_end;
1826
        av_freep(&vfilters);
1827
    } else {
1828
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1829
    }
1830
    avfilter_graph_add_filter(graph, filt_src);
1831
    avfilter_graph_add_filter(graph, filt_out);
1832

    
1833
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1834
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1835
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1836

    
1837
    is->out_video_filter = filt_out;
1838
#endif
1839

    
1840
    for(;;) {
1841
#if !CONFIG_AVFILTER
1842
        AVPacket pkt;
1843
#endif
1844
        while (is->paused && !is->videoq.abort_request)
1845
            SDL_Delay(10);
1846
#if CONFIG_AVFILTER
1847
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1848
#else
1849
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1850
#endif
1851

    
1852
        if (ret < 0) goto the_end;
1853

    
1854
        if (!ret)
1855
            continue;
1856

    
1857
        pts = pts_int*av_q2d(is->video_st->time_base);
1858

    
1859
#if CONFIG_AVFILTER
1860
        ret = output_picture2(is, frame, pts, pos);
1861
#else
1862
        ret = output_picture2(is, frame, pts,  pkt.pos);
1863
        av_free_packet(&pkt);
1864
#endif
1865
        if (ret < 0)
1866
            goto the_end;
1867

    
1868
        if (step)
1869
            if (cur_stream)
1870
                stream_pause(cur_stream);
1871
    }
1872
 the_end:
1873
#if CONFIG_AVFILTER
1874
    avfilter_graph_destroy(graph);
1875
    av_freep(&graph);
1876
#endif
1877
    av_free(frame);
1878
    return 0;
1879
}
1880

    
1881
static int subtitle_thread(void *arg)
1882
{
1883
    VideoState *is = arg;
1884
    SubPicture *sp;
1885
    AVPacket pkt1, *pkt = &pkt1;
1886
    int len1, got_subtitle;
1887
    double pts;
1888
    int i, j;
1889
    int r, g, b, y, u, v, a;
1890

    
1891
    for(;;) {
1892
        while (is->paused && !is->subtitleq.abort_request) {
1893
            SDL_Delay(10);
1894
        }
1895
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1896
            break;
1897

    
1898
        if(pkt->data == flush_pkt.data){
1899
            avcodec_flush_buffers(is->subtitle_st->codec);
1900
            continue;
1901
        }
1902
        SDL_LockMutex(is->subpq_mutex);
1903
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1904
               !is->subtitleq.abort_request) {
1905
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1906
        }
1907
        SDL_UnlockMutex(is->subpq_mutex);
1908

    
1909
        if (is->subtitleq.abort_request)
1910
            goto the_end;
1911

    
1912
        sp = &is->subpq[is->subpq_windex];
1913

    
1914
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1915
           this packet, if any */
1916
        pts = 0;
1917
        if (pkt->pts != AV_NOPTS_VALUE)
1918
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1919

    
1920
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1921
                                    &sp->sub, &got_subtitle,
1922
                                    pkt);
1923
//            if (len1 < 0)
1924
//                break;
1925
        if (got_subtitle && sp->sub.format == 0) {
1926
            sp->pts = pts;
1927

    
1928
            for (i = 0; i < sp->sub.num_rects; i++)
1929
            {
1930
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1931
                {
1932
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1933
                    y = RGB_TO_Y_CCIR(r, g, b);
1934
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1935
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1936
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1937
                }
1938
            }
1939

    
1940
            /* now we can update the picture count */
1941
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1942
                is->subpq_windex = 0;
1943
            SDL_LockMutex(is->subpq_mutex);
1944
            is->subpq_size++;
1945
            SDL_UnlockMutex(is->subpq_mutex);
1946
        }
1947
        av_free_packet(pkt);
1948
//        if (step)
1949
//            if (cur_stream)
1950
//                stream_pause(cur_stream);
1951
    }
1952
 the_end:
1953
    return 0;
1954
}
1955

    
1956
/* copy samples for viewing in editor window */
1957
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1958
{
1959
    int size, len, channels;
1960

    
1961
    channels = is->audio_st->codec->channels;
1962

    
1963
    size = samples_size / sizeof(short);
1964
    while (size > 0) {
1965
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1966
        if (len > size)
1967
            len = size;
1968
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1969
        samples += len;
1970
        is->sample_array_index += len;
1971
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1972
            is->sample_array_index = 0;
1973
        size -= len;
1974
    }
1975
}
1976

    
1977
/* return the new audio buffer size (samples can be added or deleted
1978
   to get better sync if video or external master clock) */
1979
static int synchronize_audio(VideoState *is, short *samples,
1980
                             int samples_size1, double pts)
1981
{
1982
    int n, samples_size;
1983
    double ref_clock;
1984

    
1985
    n = 2 * is->audio_st->codec->channels;
1986
    samples_size = samples_size1;
1987

    
1988
    /* if not master, then we try to remove or add samples to correct the clock */
1989
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1990
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1991
        double diff, avg_diff;
1992
        int wanted_size, min_size, max_size, nb_samples;
1993

    
1994
        ref_clock = get_master_clock(is);
1995
        diff = get_audio_clock(is) - ref_clock;
1996

    
1997
        if (diff < AV_NOSYNC_THRESHOLD) {
1998
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1999
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2000
                /* not enough measures to have a correct estimate */
2001
                is->audio_diff_avg_count++;
2002
            } else {
2003
                /* estimate the A-V difference */
2004
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2005

    
2006
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2007
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2008
                    nb_samples = samples_size / n;
2009

    
2010
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2011
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2012
                    if (wanted_size < min_size)
2013
                        wanted_size = min_size;
2014
                    else if (wanted_size > max_size)
2015
                        wanted_size = max_size;
2016

    
2017
                    /* add or remove samples to correction the synchro */
2018
                    if (wanted_size < samples_size) {
2019
                        /* remove samples */
2020
                        samples_size = wanted_size;
2021
                    } else if (wanted_size > samples_size) {
2022
                        uint8_t *samples_end, *q;
2023
                        int nb;
2024

    
2025
                        /* add samples */
2026
                        nb = (samples_size - wanted_size);
2027
                        samples_end = (uint8_t *)samples + samples_size - n;
2028
                        q = samples_end + n;
2029
                        while (nb > 0) {
2030
                            memcpy(q, samples_end, n);
2031
                            q += n;
2032
                            nb -= n;
2033
                        }
2034
                        samples_size = wanted_size;
2035
                    }
2036
                }
2037
#if 0
2038
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2039
                       diff, avg_diff, samples_size - samples_size1,
2040
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2041
#endif
2042
            }
2043
        } else {
2044
            /* too big difference : may be initial PTS errors, so
2045
               reset A-V filter */
2046
            is->audio_diff_avg_count = 0;
2047
            is->audio_diff_cum = 0;
2048
        }
2049
    }
2050

    
2051
    return samples_size;
2052
}
2053

    
2054
/* decode one audio frame and returns its uncompressed size */
2055
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2056
{
2057
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2058
    AVPacket *pkt = &is->audio_pkt;
2059
    AVCodecContext *dec= is->audio_st->codec;
2060
    int n, len1, data_size;
2061
    double pts;
2062

    
2063
    for(;;) {
2064
        /* NOTE: the audio packet can contain several frames */
2065
        while (pkt_temp->size > 0) {
2066
            data_size = sizeof(is->audio_buf1);
2067
            len1 = avcodec_decode_audio3(dec,
2068
                                        (int16_t *)is->audio_buf1, &data_size,
2069
                                        pkt_temp);
2070
            if (len1 < 0) {
2071
                /* if error, we skip the frame */
2072
                pkt_temp->size = 0;
2073
                break;
2074
            }
2075

    
2076
            pkt_temp->data += len1;
2077
            pkt_temp->size -= len1;
2078
            if (data_size <= 0)
2079
                continue;
2080

    
2081
            if (dec->sample_fmt != is->audio_src_fmt) {
2082
                if (is->reformat_ctx)
2083
                    av_audio_convert_free(is->reformat_ctx);
2084
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2085
                                                         dec->sample_fmt, 1, NULL, 0);
2086
                if (!is->reformat_ctx) {
2087
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2088
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2089
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2090
                        break;
2091
                }
2092
                is->audio_src_fmt= dec->sample_fmt;
2093
            }
2094

    
2095
            if (is->reformat_ctx) {
2096
                const void *ibuf[6]= {is->audio_buf1};
2097
                void *obuf[6]= {is->audio_buf2};
2098
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2099
                int ostride[6]= {2};
2100
                int len= data_size/istride[0];
2101
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2102
                    printf("av_audio_convert() failed\n");
2103
                    break;
2104
                }
2105
                is->audio_buf= is->audio_buf2;
2106
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2107
                          remove this legacy cruft */
2108
                data_size= len*2;
2109
            }else{
2110
                is->audio_buf= is->audio_buf1;
2111
            }
2112

    
2113
            /* if no pts, then compute it */
2114
            pts = is->audio_clock;
2115
            *pts_ptr = pts;
2116
            n = 2 * dec->channels;
2117
            is->audio_clock += (double)data_size /
2118
                (double)(n * dec->sample_rate);
2119
#if defined(DEBUG_SYNC)
2120
            {
2121
                static double last_clock;
2122
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2123
                       is->audio_clock - last_clock,
2124
                       is->audio_clock, pts);
2125
                last_clock = is->audio_clock;
2126
            }
2127
#endif
2128
            return data_size;
2129
        }
2130

    
2131
        /* free the current packet */
2132
        if (pkt->data)
2133
            av_free_packet(pkt);
2134

    
2135
        if (is->paused || is->audioq.abort_request) {
2136
            return -1;
2137
        }
2138

    
2139
        /* read next packet */
2140
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2141
            return -1;
2142
        if(pkt->data == flush_pkt.data){
2143
            avcodec_flush_buffers(dec);
2144
            continue;
2145
        }
2146

    
2147
        pkt_temp->data = pkt->data;
2148
        pkt_temp->size = pkt->size;
2149

    
2150
        /* if update the audio clock with the pts */
2151
        if (pkt->pts != AV_NOPTS_VALUE) {
2152
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2153
        }
2154
    }
2155
}
2156

    
2157
/* get the current audio output buffer size, in samples. With SDL, we
2158
   cannot have a precise information */
2159
static int audio_write_get_buf_size(VideoState *is)
2160
{
2161
    return is->audio_buf_size - is->audio_buf_index;
2162
}
2163

    
2164

    
2165
/* prepare a new audio buffer */
2166
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2167
{
2168
    VideoState *is = opaque;
2169
    int audio_size, len1;
2170
    double pts;
2171

    
2172
    audio_callback_time = av_gettime();
2173

    
2174
    while (len > 0) {
2175
        if (is->audio_buf_index >= is->audio_buf_size) {
2176
           audio_size = audio_decode_frame(is, &pts);
2177
           if (audio_size < 0) {
2178
                /* if error, just output silence */
2179
               is->audio_buf = is->audio_buf1;
2180
               is->audio_buf_size = 1024;
2181
               memset(is->audio_buf, 0, is->audio_buf_size);
2182
           } else {
2183
               if (is->show_audio)
2184
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2185
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2186
                                              pts);
2187
               is->audio_buf_size = audio_size;
2188
           }
2189
           is->audio_buf_index = 0;
2190
        }
2191
        len1 = is->audio_buf_size - is->audio_buf_index;
2192
        if (len1 > len)
2193
            len1 = len;
2194
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2195
        len -= len1;
2196
        stream += len1;
2197
        is->audio_buf_index += len1;
2198
    }
2199
}
2200

    
2201
/* open a given stream. Return 0 if OK */
2202
static int stream_component_open(VideoState *is, int stream_index)
2203
{
2204
    AVFormatContext *ic = is->ic;
2205
    AVCodecContext *avctx;
2206
    AVCodec *codec;
2207
    SDL_AudioSpec wanted_spec, spec;
2208

    
2209
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2210
        return -1;
2211
    avctx = ic->streams[stream_index]->codec;
2212

    
2213
    /* prepare audio output */
2214
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2215
        if (avctx->channels > 0) {
2216
            avctx->request_channels = FFMIN(2, avctx->channels);
2217
        } else {
2218
            avctx->request_channels = 2;
2219
        }
2220
    }
2221

    
2222
    codec = avcodec_find_decoder(avctx->codec_id);
2223
    avctx->debug_mv = debug_mv;
2224
    avctx->debug = debug;
2225
    avctx->workaround_bugs = workaround_bugs;
2226
    avctx->lowres = lowres;
2227
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2228
    avctx->idct_algo= idct;
2229
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2230
    avctx->skip_frame= skip_frame;
2231
    avctx->skip_idct= skip_idct;
2232
    avctx->skip_loop_filter= skip_loop_filter;
2233
    avctx->error_recognition= error_recognition;
2234
    avctx->error_concealment= error_concealment;
2235
    avcodec_thread_init(avctx, thread_count);
2236

    
2237
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2238

    
2239
    if (!codec ||
2240
        avcodec_open(avctx, codec) < 0)
2241
        return -1;
2242

    
2243
    /* prepare audio output */
2244
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2245
        wanted_spec.freq = avctx->sample_rate;
2246
        wanted_spec.format = AUDIO_S16SYS;
2247
        wanted_spec.channels = avctx->channels;
2248
        wanted_spec.silence = 0;
2249
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2250
        wanted_spec.callback = sdl_audio_callback;
2251
        wanted_spec.userdata = is;
2252
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2253
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2254
            return -1;
2255
        }
2256
        is->audio_hw_buf_size = spec.size;
2257
        is->audio_src_fmt= SAMPLE_FMT_S16;
2258
    }
2259

    
2260
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2261
    switch(avctx->codec_type) {
2262
    case AVMEDIA_TYPE_AUDIO:
2263
        is->audio_stream = stream_index;
2264
        is->audio_st = ic->streams[stream_index];
2265
        is->audio_buf_size = 0;
2266
        is->audio_buf_index = 0;
2267

    
2268
        /* init averaging filter */
2269
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2270
        is->audio_diff_avg_count = 0;
2271
        /* since we do not have a precise anough audio fifo fullness,
2272
           we correct audio sync only if larger than this threshold */
2273
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2274

    
2275
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2276
        packet_queue_init(&is->audioq);
2277
        SDL_PauseAudio(0);
2278
        break;
2279
    case AVMEDIA_TYPE_VIDEO:
2280
        is->video_stream = stream_index;
2281
        is->video_st = ic->streams[stream_index];
2282

    
2283
//        is->video_current_pts_time = av_gettime();
2284

    
2285
        packet_queue_init(&is->videoq);
2286
        is->video_tid = SDL_CreateThread(video_thread, is);
2287
        break;
2288
    case AVMEDIA_TYPE_SUBTITLE:
2289
        is->subtitle_stream = stream_index;
2290
        is->subtitle_st = ic->streams[stream_index];
2291
        packet_queue_init(&is->subtitleq);
2292

    
2293
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2294
        break;
2295
    default:
2296
        break;
2297
    }
2298
    return 0;
2299
}
2300

    
2301
static void stream_component_close(VideoState *is, int stream_index)
2302
{
2303
    AVFormatContext *ic = is->ic;
2304
    AVCodecContext *avctx;
2305

    
2306
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2307
        return;
2308
    avctx = ic->streams[stream_index]->codec;
2309

    
2310
    switch(avctx->codec_type) {
2311
    case AVMEDIA_TYPE_AUDIO:
2312
        packet_queue_abort(&is->audioq);
2313

    
2314
        SDL_CloseAudio();
2315

    
2316
        packet_queue_end(&is->audioq);
2317
        if (is->reformat_ctx)
2318
            av_audio_convert_free(is->reformat_ctx);
2319
        is->reformat_ctx = NULL;
2320
        break;
2321
    case AVMEDIA_TYPE_VIDEO:
2322
        packet_queue_abort(&is->videoq);
2323

    
2324
        /* note: we also signal this mutex to make sure we deblock the
2325
           video thread in all cases */
2326
        SDL_LockMutex(is->pictq_mutex);
2327
        SDL_CondSignal(is->pictq_cond);
2328
        SDL_UnlockMutex(is->pictq_mutex);
2329

    
2330
        SDL_WaitThread(is->video_tid, NULL);
2331

    
2332
        packet_queue_end(&is->videoq);
2333
        break;
2334
    case AVMEDIA_TYPE_SUBTITLE:
2335
        packet_queue_abort(&is->subtitleq);
2336

    
2337
        /* note: we also signal this mutex to make sure we deblock the
2338
           video thread in all cases */
2339
        SDL_LockMutex(is->subpq_mutex);
2340
        is->subtitle_stream_changed = 1;
2341

    
2342
        SDL_CondSignal(is->subpq_cond);
2343
        SDL_UnlockMutex(is->subpq_mutex);
2344

    
2345
        SDL_WaitThread(is->subtitle_tid, NULL);
2346

    
2347
        packet_queue_end(&is->subtitleq);
2348
        break;
2349
    default:
2350
        break;
2351
    }
2352

    
2353
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2354
    avcodec_close(avctx);
2355
    switch(avctx->codec_type) {
2356
    case AVMEDIA_TYPE_AUDIO:
2357
        is->audio_st = NULL;
2358
        is->audio_stream = -1;
2359
        break;
2360
    case AVMEDIA_TYPE_VIDEO:
2361
        is->video_st = NULL;
2362
        is->video_stream = -1;
2363
        break;
2364
    case AVMEDIA_TYPE_SUBTITLE:
2365
        is->subtitle_st = NULL;
2366
        is->subtitle_stream = -1;
2367
        break;
2368
    default:
2369
        break;
2370
    }
2371
}
2372

    
2373
/* since we have only one decoding thread, we can use a global
2374
   variable instead of a thread local variable */
2375
static VideoState *global_video_state;
2376

    
2377
static int decode_interrupt_cb(void)
2378
{
2379
    return (global_video_state && global_video_state->abort_request);
2380
}
2381

    
2382
/* this thread gets the stream from the disk or the network */
2383
static int decode_thread(void *arg)
2384
{
2385
    VideoState *is = arg;
2386
    AVFormatContext *ic;
2387
    int err, i, ret;
2388
    int st_index[AVMEDIA_TYPE_NB];
2389
    int st_count[AVMEDIA_TYPE_NB]={0};
2390
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2391
    AVPacket pkt1, *pkt = &pkt1;
2392
    AVFormatParameters params, *ap = &params;
2393
    int eof=0;
2394
    int pkt_in_play_range = 0;
2395

    
2396
    ic = avformat_alloc_context();
2397

    
2398
    memset(st_index, -1, sizeof(st_index));
2399
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2400
    is->video_stream = -1;
2401
    is->audio_stream = -1;
2402
    is->subtitle_stream = -1;
2403

    
2404
    global_video_state = is;
2405
    url_set_interrupt_cb(decode_interrupt_cb);
2406

    
2407
    memset(ap, 0, sizeof(*ap));
2408

    
2409
    ap->prealloced_context = 1;
2410
    ap->width = frame_width;
2411
    ap->height= frame_height;
2412
    ap->time_base= (AVRational){1, 25};
2413
    ap->pix_fmt = frame_pix_fmt;
2414

    
2415
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2416

    
2417
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2418
    if (err < 0) {
2419
        print_error(is->filename, err);
2420
        ret = -1;
2421
        goto fail;
2422
    }
2423
    is->ic = ic;
2424

    
2425
    if(genpts)
2426
        ic->flags |= AVFMT_FLAG_GENPTS;
2427

    
2428
    err = av_find_stream_info(ic);
2429
    if (err < 0) {
2430
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2431
        ret = -1;
2432
        goto fail;
2433
    }
2434
    if(ic->pb)
2435
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2436

    
2437
    if(seek_by_bytes<0)
2438
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2439

    
2440
    /* if seeking requested, we execute it */
2441
    if (start_time != AV_NOPTS_VALUE) {
2442
        int64_t timestamp;
2443

    
2444
        timestamp = start_time;
2445
        /* add the stream start time */
2446
        if (ic->start_time != AV_NOPTS_VALUE)
2447
            timestamp += ic->start_time;
2448
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2449
        if (ret < 0) {
2450
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2451
                    is->filename, (double)timestamp / AV_TIME_BASE);
2452
        }
2453
    }
2454

    
2455
    for(i = 0; i < ic->nb_streams; i++) {
2456
        AVStream *st= ic->streams[i];
2457
        AVCodecContext *avctx = st->codec;
2458
        ic->streams[i]->discard = AVDISCARD_ALL;
2459
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2460
            continue;
2461
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2462
            continue;
2463

    
2464
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2465
            continue;
2466
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2467

    
2468
        switch(avctx->codec_type) {
2469
        case AVMEDIA_TYPE_AUDIO:
2470
            if (!audio_disable)
2471
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2472
            break;
2473
        case AVMEDIA_TYPE_VIDEO:
2474
        case AVMEDIA_TYPE_SUBTITLE:
2475
            if (!video_disable)
2476
                st_index[avctx->codec_type] = i;
2477
            break;
2478
        default:
2479
            break;
2480
        }
2481
    }
2482
    if (show_status) {
2483
        dump_format(ic, 0, is->filename, 0);
2484
    }
2485

    
2486
    /* open the streams */
2487
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2488
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2489
    }
2490

    
2491
    ret=-1;
2492
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2493
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2494
    }
2495
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2496
    if(ret<0) {
2497
        if (!display_disable)
2498
            is->show_audio = 2;
2499
    }
2500

    
2501
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2502
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2503
    }
2504

    
2505
    if (is->video_stream < 0 && is->audio_stream < 0) {
2506
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2507
        ret = -1;
2508
        goto fail;
2509
    }
2510

    
2511
    for(;;) {
2512
        if (is->abort_request)
2513
            break;
2514
        if (is->paused != is->last_paused) {
2515
            is->last_paused = is->paused;
2516
            if (is->paused)
2517
                is->read_pause_return= av_read_pause(ic);
2518
            else
2519
                av_read_play(ic);
2520
        }
2521
#if CONFIG_RTSP_DEMUXER
2522
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2523
            /* wait 10 ms to avoid trying to get another packet */
2524
            /* XXX: horrible */
2525
            SDL_Delay(10);
2526
            continue;
2527
        }
2528
#endif
2529
        if (is->seek_req) {
2530
            int64_t seek_target= is->seek_pos;
2531
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2532
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2533
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2534
//      of the seek_pos/seek_rel variables
2535

    
2536
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2537
            if (ret < 0) {
2538
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2539
            }else{
2540
                if (is->audio_stream >= 0) {
2541
                    packet_queue_flush(&is->audioq);
2542
                    packet_queue_put(&is->audioq, &flush_pkt);
2543
                }
2544
                if (is->subtitle_stream >= 0) {
2545
                    packet_queue_flush(&is->subtitleq);
2546
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2547
                }
2548
                if (is->video_stream >= 0) {
2549
                    packet_queue_flush(&is->videoq);
2550
                    packet_queue_put(&is->videoq, &flush_pkt);
2551
                }
2552
            }
2553
            is->seek_req = 0;
2554
            eof= 0;
2555
        }
2556

    
2557
        /* if the queue are full, no need to read more */
2558
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2559
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2560
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2561
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2562
            /* wait 10 ms */
2563
            SDL_Delay(10);
2564
            continue;
2565
        }
2566
        if(url_feof(ic->pb) || eof) {
2567
            if(is->video_stream >= 0){
2568
                av_init_packet(pkt);
2569
                pkt->data=NULL;
2570
                pkt->size=0;
2571
                pkt->stream_index= is->video_stream;
2572
                packet_queue_put(&is->videoq, pkt);
2573
            }
2574
            SDL_Delay(10);
2575
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2576
                if(loop!=1 && (!loop || --loop)){
2577
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2578
                }else if(autoexit){
2579
                    ret=AVERROR_EOF;
2580
                    goto fail;
2581
                }
2582
            }
2583
            continue;
2584
        }
2585
        ret = av_read_frame(ic, pkt);
2586
        if (ret < 0) {
2587
            if (ret == AVERROR_EOF)
2588
                eof=1;
2589
            if (url_ferror(ic->pb))
2590
                break;
2591
            SDL_Delay(100); /* wait for user event */
2592
            continue;
2593
        }
2594
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2595
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2596
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2597
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2598
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2599
                <= ((double)duration/1000000);
2600
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2601
            packet_queue_put(&is->audioq, pkt);
2602
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2603
            packet_queue_put(&is->videoq, pkt);
2604
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2605
            packet_queue_put(&is->subtitleq, pkt);
2606
        } else {
2607
            av_free_packet(pkt);
2608
        }
2609
    }
2610
    /* wait until the end */
2611
    while (!is->abort_request) {
2612
        SDL_Delay(100);
2613
    }
2614

    
2615
    ret = 0;
2616
 fail:
2617
    /* disable interrupting */
2618
    global_video_state = NULL;
2619

    
2620
    /* close each stream */
2621
    if (is->audio_stream >= 0)
2622
        stream_component_close(is, is->audio_stream);
2623
    if (is->video_stream >= 0)
2624
        stream_component_close(is, is->video_stream);
2625
    if (is->subtitle_stream >= 0)
2626
        stream_component_close(is, is->subtitle_stream);
2627
    if (is->ic) {
2628
        av_close_input_file(is->ic);
2629
        is->ic = NULL; /* safety */
2630
    }
2631
    url_set_interrupt_cb(NULL);
2632

    
2633
    if (ret != 0) {
2634
        SDL_Event event;
2635

    
2636
        event.type = FF_QUIT_EVENT;
2637
        event.user.data1 = is;
2638
        SDL_PushEvent(&event);
2639
    }
2640
    return 0;
2641
}
2642

    
2643
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2644
{
2645
    VideoState *is;
2646

    
2647
    is = av_mallocz(sizeof(VideoState));
2648
    if (!is)
2649
        return NULL;
2650
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2651
    is->iformat = iformat;
2652
    is->ytop = 0;
2653
    is->xleft = 0;
2654

    
2655
    /* start video display */
2656
    is->pictq_mutex = SDL_CreateMutex();
2657
    is->pictq_cond = SDL_CreateCond();
2658

    
2659
    is->subpq_mutex = SDL_CreateMutex();
2660
    is->subpq_cond = SDL_CreateCond();
2661

    
2662
    is->av_sync_type = av_sync_type;
2663
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2664
    if (!is->parse_tid) {
2665
        av_free(is);
2666
        return NULL;
2667
    }
2668
    return is;
2669
}
2670

    
2671
static void stream_close(VideoState *is)
2672
{
2673
    VideoPicture *vp;
2674
    int i;
2675
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2676
    is->abort_request = 1;
2677
    SDL_WaitThread(is->parse_tid, NULL);
2678
    SDL_WaitThread(is->refresh_tid, NULL);
2679

    
2680
    /* free all pictures */
2681
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2682
        vp = &is->pictq[i];
2683
#if CONFIG_AVFILTER
2684
        if (vp->picref) {
2685
            avfilter_unref_pic(vp->picref);
2686
            vp->picref = NULL;
2687
        }
2688
#endif
2689
        if (vp->bmp) {
2690
            SDL_FreeYUVOverlay(vp->bmp);
2691
            vp->bmp = NULL;
2692
        }
2693
    }
2694
    SDL_DestroyMutex(is->pictq_mutex);
2695
    SDL_DestroyCond(is->pictq_cond);
2696
    SDL_DestroyMutex(is->subpq_mutex);
2697
    SDL_DestroyCond(is->subpq_cond);
2698
#if !CONFIG_AVFILTER
2699
    if (is->img_convert_ctx)
2700
        sws_freeContext(is->img_convert_ctx);
2701
#endif
2702
    av_free(is);
2703
}
2704

    
2705
static void stream_cycle_channel(VideoState *is, int codec_type)
2706
{
2707
    AVFormatContext *ic = is->ic;
2708
    int start_index, stream_index;
2709
    AVStream *st;
2710

    
2711
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2712
        start_index = is->video_stream;
2713
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2714
        start_index = is->audio_stream;
2715
    else
2716
        start_index = is->subtitle_stream;
2717
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2718
        return;
2719
    stream_index = start_index;
2720
    for(;;) {
2721
        if (++stream_index >= is->ic->nb_streams)
2722
        {
2723
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2724
            {
2725
                stream_index = -1;
2726
                goto the_end;
2727
            } else
2728
                stream_index = 0;
2729
        }
2730
        if (stream_index == start_index)
2731
            return;
2732
        st = ic->streams[stream_index];
2733
        if (st->codec->codec_type == codec_type) {
2734
            /* check that parameters are OK */
2735
            switch(codec_type) {
2736
            case AVMEDIA_TYPE_AUDIO:
2737
                if (st->codec->sample_rate != 0 &&
2738
                    st->codec->channels != 0)
2739
                    goto the_end;
2740
                break;
2741
            case AVMEDIA_TYPE_VIDEO:
2742
            case AVMEDIA_TYPE_SUBTITLE:
2743
                goto the_end;
2744
            default:
2745
                break;
2746
            }
2747
        }
2748
    }
2749
 the_end:
2750
    stream_component_close(is, start_index);
2751
    stream_component_open(is, stream_index);
2752
}
2753

    
2754

    
2755
static void toggle_full_screen(void)
2756
{
2757
    is_full_screen = !is_full_screen;
2758
    if (!fs_screen_width) {
2759
        /* use default SDL method */
2760
//        SDL_WM_ToggleFullScreen(screen);
2761
    }
2762
    video_open(cur_stream);
2763
}
2764

    
2765
static void toggle_pause(void)
2766
{
2767
    if (cur_stream)
2768
        stream_pause(cur_stream);
2769
    step = 0;
2770
}
2771

    
2772
static void step_to_next_frame(void)
2773
{
2774
    if (cur_stream) {
2775
        /* if the stream is paused unpause it, then step */
2776
        if (cur_stream->paused)
2777
            stream_pause(cur_stream);
2778
    }
2779
    step = 1;
2780
}
2781

    
2782
static void do_exit(void)
2783
{
2784
    int i;
2785
    if (cur_stream) {
2786
        stream_close(cur_stream);
2787
        cur_stream = NULL;
2788
    }
2789
    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2790
        av_free(avcodec_opts[i]);
2791
    av_free(avformat_opts);
2792
    av_free(sws_opts);
2793
#if CONFIG_AVFILTER
2794
    avfilter_uninit();
2795
#endif
2796
    if (show_status)
2797
        printf("\n");
2798
    SDL_Quit();
2799
    exit(0);
2800
}
2801

    
2802
static void toggle_audio_display(void)
2803
{
2804
    if (cur_stream) {
2805
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2806
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2807
        fill_rectangle(screen,
2808
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2809
                    bgcolor);
2810
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2811
    }
2812
}
2813

    
2814
/* handle an event sent by the GUI */
2815
static void event_loop(void)
2816
{
2817
    SDL_Event event;
2818
    double incr, pos, frac;
2819

    
2820
    for(;;) {
2821
        double x;
2822
        SDL_WaitEvent(&event);
2823
        switch(event.type) {
2824
        case SDL_KEYDOWN:
2825
            if (exit_on_keydown) {
2826
                do_exit();
2827
                break;
2828
            }
2829
            switch(event.key.keysym.sym) {
2830
            case SDLK_ESCAPE:
2831
            case SDLK_q:
2832
                do_exit();
2833
                break;
2834
            case SDLK_f:
2835
                toggle_full_screen();
2836
                break;
2837
            case SDLK_p:
2838
            case SDLK_SPACE:
2839
                toggle_pause();
2840
                break;
2841
            case SDLK_s: //S: Step to next frame
2842
                step_to_next_frame();
2843
                break;
2844
            case SDLK_a:
2845
                if (cur_stream)
2846
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2847
                break;
2848
            case SDLK_v:
2849
                if (cur_stream)
2850
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2851
                break;
2852
            case SDLK_t:
2853
                if (cur_stream)
2854
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2855
                break;
2856
            case SDLK_w:
2857
                toggle_audio_display();
2858
                break;
2859
            case SDLK_LEFT:
2860
                incr = -10.0;
2861
                goto do_seek;
2862
            case SDLK_RIGHT:
2863
                incr = 10.0;
2864
                goto do_seek;
2865
            case SDLK_UP:
2866
                incr = 60.0;
2867
                goto do_seek;
2868
            case SDLK_DOWN:
2869
                incr = -60.0;
2870
            do_seek:
2871
                if (cur_stream) {
2872
                    if (seek_by_bytes) {
2873
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2874
                            pos= cur_stream->video_current_pos;
2875
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2876
                            pos= cur_stream->audio_pkt.pos;
2877
                        }else
2878
                            pos = url_ftell(cur_stream->ic->pb);
2879
                        if (cur_stream->ic->bit_rate)
2880
                            incr *= cur_stream->ic->bit_rate / 8.0;
2881
                        else
2882
                            incr *= 180000.0;
2883
                        pos += incr;
2884
                        stream_seek(cur_stream, pos, incr, 1);
2885
                    } else {
2886
                        pos = get_master_clock(cur_stream);
2887
                        pos += incr;
2888
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2889
                    }
2890
                }
2891
                break;
2892
            default:
2893
                break;
2894
            }
2895
            break;
2896
        case SDL_MOUSEBUTTONDOWN:
2897
            if (exit_on_mousedown) {
2898
                do_exit();
2899
                break;
2900
            }
2901
        case SDL_MOUSEMOTION:
2902
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2903
                x= event.button.x;
2904
            }else{
2905
                if(event.motion.state != SDL_PRESSED)
2906
                    break;
2907
                x= event.motion.x;
2908
            }
2909
            if (cur_stream) {
2910
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2911
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2912
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2913
                }else{
2914
                    int64_t ts;
2915
                    int ns, hh, mm, ss;
2916
                    int tns, thh, tmm, tss;
2917
                    tns = cur_stream->ic->duration/1000000LL;
2918
                    thh = tns/3600;
2919
                    tmm = (tns%3600)/60;
2920
                    tss = (tns%60);
2921
                    frac = x/cur_stream->width;
2922
                    ns = frac*tns;
2923
                    hh = ns/3600;
2924
                    mm = (ns%3600)/60;
2925
                    ss = (ns%60);
2926
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2927
                            hh, mm, ss, thh, tmm, tss);
2928
                    ts = frac*cur_stream->ic->duration;
2929
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2930
                        ts += cur_stream->ic->start_time;
2931
                    stream_seek(cur_stream, ts, 0, 0);
2932
                }
2933
            }
2934
            break;
2935
        case SDL_VIDEORESIZE:
2936
            if (cur_stream) {
2937
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2938
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2939
                screen_width = cur_stream->width = event.resize.w;
2940
                screen_height= cur_stream->height= event.resize.h;
2941
            }
2942
            break;
2943
        case SDL_QUIT:
2944
        case FF_QUIT_EVENT:
2945
            do_exit();
2946
            break;
2947
        case FF_ALLOC_EVENT:
2948
            video_open(event.user.data1);
2949
            alloc_picture(event.user.data1);
2950
            break;
2951
        case FF_REFRESH_EVENT:
2952
            video_refresh_timer(event.user.data1);
2953
            cur_stream->refresh=0;
2954
            break;
2955
        default:
2956
            break;
2957
        }
2958
    }
2959
}
2960

    
2961
static void opt_frame_size(const char *arg)
2962
{
2963
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2964
        fprintf(stderr, "Incorrect frame size\n");
2965
        exit(1);
2966
    }
2967
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2968
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2969
        exit(1);
2970
    }
2971
}
2972

    
2973
static int opt_width(const char *opt, const char *arg)
2974
{
2975
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2976
    return 0;
2977
}
2978

    
2979
static int opt_height(const char *opt, const char *arg)
2980
{
2981
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2982
    return 0;
2983
}
2984

    
2985
static void opt_format(const char *arg)
2986
{
2987
    file_iformat = av_find_input_format(arg);
2988
    if (!file_iformat) {
2989
        fprintf(stderr, "Unknown input format: %s\n", arg);
2990
        exit(1);
2991
    }
2992
}
2993

    
2994
static void opt_frame_pix_fmt(const char *arg)
2995
{
2996
    frame_pix_fmt = av_get_pix_fmt(arg);
2997
}
2998

    
2999
static int opt_sync(const char *opt, const char *arg)
3000
{
3001
    if (!strcmp(arg, "audio"))
3002
        av_sync_type = AV_SYNC_AUDIO_MASTER;
3003
    else if (!strcmp(arg, "video"))
3004
        av_sync_type = AV_SYNC_VIDEO_MASTER;
3005
    else if (!strcmp(arg, "ext"))
3006
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
3007
    else {
3008
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
3009
        exit(1);
3010
    }
3011
    return 0;
3012
}
3013

    
3014
static int opt_seek(const char *opt, const char *arg)
3015
{
3016
    start_time = parse_time_or_die(opt, arg, 1);
3017
    return 0;
3018
}
3019

    
3020
static int opt_duration(const char *opt, const char *arg)
3021
{
3022
    duration = parse_time_or_die(opt, arg, 1);
3023
    return 0;
3024
}
3025

    
3026
static int opt_debug(const char *opt, const char *arg)
3027
{
3028
    av_log_set_level(99);
3029
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3030
    return 0;
3031
}
3032

    
3033
static int opt_vismv(const char *opt, const char *arg)
3034
{
3035
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3036
    return 0;
3037
}
3038

    
3039
static int opt_thread_count(const char *opt, const char *arg)
3040
{
3041
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3042
#if !HAVE_THREADS
3043
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3044
#endif
3045
    return 0;
3046
}
3047

    
3048
static const OptionDef options[] = {
3049
#include "cmdutils_common_opts.h"
3050
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3051
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3052
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3053
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3054
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3055
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3056
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3057
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3058
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3059
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3060
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3061
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3062
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3063
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3064
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3065
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3066
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3067
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3068
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3069
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3070
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3071
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3072
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3073
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3074
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3075
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3076
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3077
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3078
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3079
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3080
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3081
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3082
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3083
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3084
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3085
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3086
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3087
#if CONFIG_AVFILTER
3088
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3089
#endif
3090
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3091
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3092
    { NULL, },
3093
};
3094

    
3095
static void show_usage(void)
3096
{
3097
    printf("Simple media player\n");
3098
    printf("usage: ffplay [options] input_file\n");
3099
    printf("\n");
3100
}
3101

    
3102
static void show_help(void)
3103
{
3104
    show_usage();
3105
    show_help_options(options, "Main options:\n",
3106
                      OPT_EXPERT, 0);
3107
    show_help_options(options, "\nAdvanced options:\n",
3108
                      OPT_EXPERT, OPT_EXPERT);
3109
    printf("\nWhile playing:\n"
3110
           "q, ESC              quit\n"
3111
           "f                   toggle full screen\n"
3112
           "p, SPC              pause\n"
3113
           "a                   cycle audio channel\n"
3114
           "v                   cycle video channel\n"
3115
           "t                   cycle subtitle channel\n"
3116
           "w                   show audio waves\n"
3117
           "s                   activate frame-step mode\n"
3118
           "left/right          seek backward/forward 10 seconds\n"
3119
           "down/up             seek backward/forward 1 minute\n"
3120
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3121
           );
3122
}
3123

    
3124
static void opt_input_file(const char *filename)
3125
{
3126
    if (input_filename) {
3127
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3128
                filename, input_filename);
3129
        exit(1);
3130
    }
3131
    if (!strcmp(filename, "-"))
3132
        filename = "pipe:";
3133
    input_filename = filename;
3134
}
3135

    
3136
/* Called from the main */
3137
int main(int argc, char **argv)
3138
{
3139
    int flags, i;
3140

    
3141
    /* register all codecs, demux and protocols */
3142
    avcodec_register_all();
3143
#if CONFIG_AVDEVICE
3144
    avdevice_register_all();
3145
#endif
3146
#if CONFIG_AVFILTER
3147
    avfilter_register_all();
3148
#endif
3149
    av_register_all();
3150

    
3151
    for(i=0; i<AVMEDIA_TYPE_NB; i++){
3152
        avcodec_opts[i]= avcodec_alloc_context2(i);
3153
    }
3154
    avformat_opts = avformat_alloc_context();
3155
#if !CONFIG_AVFILTER
3156
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3157
#endif
3158

    
3159
    show_banner();
3160

    
3161
    parse_options(argc, argv, options, opt_input_file);
3162

    
3163
    if (!input_filename) {
3164
        show_usage();
3165
        fprintf(stderr, "An input file must be specified\n");
3166
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3167
        exit(1);
3168
    }
3169

    
3170
    if (display_disable) {
3171
        video_disable = 1;
3172
    }
3173
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3174
#if !defined(__MINGW32__) && !defined(__APPLE__)
3175
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3176
#endif
3177
    if (SDL_Init (flags)) {
3178
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3179
        exit(1);
3180
    }
3181

    
3182
    if (!display_disable) {
3183
#if HAVE_SDL_VIDEO_SIZE
3184
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3185
        fs_screen_width = vi->current_w;
3186
        fs_screen_height = vi->current_h;
3187
#endif
3188
    }
3189

    
3190
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3191
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3192
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3193

    
3194
    av_init_packet(&flush_pkt);
3195
    flush_pkt.data= "FLUSH";
3196

    
3197
    cur_stream = stream_open(input_filename, file_iformat);
3198

    
3199
    event_loop();
3200

    
3201
    /* never returns */
3202

    
3203
    return 0;
3204
}