Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 12bd3c1f

History | View | Annotate | Download (98.8 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <inttypes.h>
24
#include <math.h>
25
#include <limits.h>
26
#include "libavutil/avstring.h"
27
#include "libavutil/pixdesc.h"
28
#include "libavformat/avformat.h"
29
#include "libavdevice/avdevice.h"
30
#include "libswscale/swscale.h"
31
#include "libavcodec/audioconvert.h"
32
#include "libavcodec/colorspace.h"
33
#include "libavcodec/opt.h"
34
#include "libavcodec/avfft.h"
35

    
36
#if CONFIG_AVFILTER
37
# include "libavfilter/avfilter.h"
38
# include "libavfilter/avfiltergraph.h"
39
# include "libavfilter/graphparser.h"
40
#endif
41

    
42
#include "cmdutils.h"
43

    
44
#include <SDL.h>
45
#include <SDL_thread.h>
46

    
47
#ifdef __MINGW32__
48
#undef main /* We don't want SDL to override our main() */
49
#endif
50

    
51
#include <unistd.h>
52
#include <assert.h>
53

    
54
const char program_name[] = "FFplay";
55
const int program_birth_year = 2003;
56

    
57
//#define DEBUG_SYNC
58

    
59
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61
#define MIN_FRAMES 5
62

    
63
/* SDL audio buffer size, in samples. Should be small to have precise
64
   A/V sync as SDL does not have hardware buffer fullness info. */
65
#define SDL_AUDIO_BUFFER_SIZE 1024
66

    
67
/* no AV sync correction is done if below the AV sync threshold */
68
#define AV_SYNC_THRESHOLD 0.01
69
/* no AV correction is done if too big error */
70
#define AV_NOSYNC_THRESHOLD 10.0
71

    
72
#define FRAME_SKIP_FACTOR 0.05
73

    
74
/* maximum audio speed change to get correct sync */
75
#define SAMPLE_CORRECTION_PERCENT_MAX 10
76

    
77
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78
#define AUDIO_DIFF_AVG_NB   20
79

    
80
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81
#define SAMPLE_ARRAY_SIZE (2*65536)
82

    
83
static int sws_flags = SWS_BICUBIC;
84

    
85
typedef struct PacketQueue {
86
    AVPacketList *first_pkt, *last_pkt;
87
    int nb_packets;
88
    int size;
89
    int abort_request;
90
    SDL_mutex *mutex;
91
    SDL_cond *cond;
92
} PacketQueue;
93

    
94
#define VIDEO_PICTURE_QUEUE_SIZE 2
95
#define SUBPICTURE_QUEUE_SIZE 4
96

    
97
typedef struct VideoPicture {
98
    double pts;                                  ///<presentation time stamp for this picture
99
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
100
    int64_t pos;                                 ///<byte position in file
101
    SDL_Overlay *bmp;
102
    int width, height; /* source height & width */
103
    int allocated;
104
    enum PixelFormat pix_fmt;
105

    
106
#if CONFIG_AVFILTER
107
    AVFilterPicRef *picref;
108
#endif
109
} VideoPicture;
110

    
111
typedef struct SubPicture {
112
    double pts; /* presentation time stamp for this picture */
113
    AVSubtitle sub;
114
} SubPicture;
115

    
116
enum {
117
    AV_SYNC_AUDIO_MASTER, /* default choice */
118
    AV_SYNC_VIDEO_MASTER,
119
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
120
};
121

    
122
typedef struct VideoState {
123
    SDL_Thread *parse_tid;
124
    SDL_Thread *video_tid;
125
    SDL_Thread *refresh_tid;
126
    AVInputFormat *iformat;
127
    int no_background;
128
    int abort_request;
129
    int paused;
130
    int last_paused;
131
    int seek_req;
132
    int seek_flags;
133
    int64_t seek_pos;
134
    int64_t seek_rel;
135
    int read_pause_return;
136
    AVFormatContext *ic;
137
    int dtg_active_format;
138

    
139
    int audio_stream;
140

    
141
    int av_sync_type;
142
    double external_clock; /* external clock base */
143
    int64_t external_clock_time;
144

    
145
    double audio_clock;
146
    double audio_diff_cum; /* used for AV difference average computation */
147
    double audio_diff_avg_coef;
148
    double audio_diff_threshold;
149
    int audio_diff_avg_count;
150
    AVStream *audio_st;
151
    PacketQueue audioq;
152
    int audio_hw_buf_size;
153
    /* samples output by the codec. we reserve more space for avsync
154
       compensation */
155
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
156
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157
    uint8_t *audio_buf;
158
    unsigned int audio_buf_size; /* in bytes */
159
    int audio_buf_index; /* in bytes */
160
    AVPacket audio_pkt_temp;
161
    AVPacket audio_pkt;
162
    enum SampleFormat audio_src_fmt;
163
    AVAudioConvert *reformat_ctx;
164

    
165
    int show_audio; /* if true, display audio samples */
166
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
167
    int sample_array_index;
168
    int last_i_start;
169
    RDFTContext *rdft;
170
    int rdft_bits;
171
    int xpos;
172

    
173
    SDL_Thread *subtitle_tid;
174
    int subtitle_stream;
175
    int subtitle_stream_changed;
176
    AVStream *subtitle_st;
177
    PacketQueue subtitleq;
178
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
179
    int subpq_size, subpq_rindex, subpq_windex;
180
    SDL_mutex *subpq_mutex;
181
    SDL_cond *subpq_cond;
182

    
183
    double frame_timer;
184
    double frame_last_pts;
185
    double frame_last_delay;
186
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
187
    int video_stream;
188
    AVStream *video_st;
189
    PacketQueue videoq;
190
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
191
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
192
    int64_t video_current_pos;                   ///<current displayed file pos
193
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
194
    int pictq_size, pictq_rindex, pictq_windex;
195
    SDL_mutex *pictq_mutex;
196
    SDL_cond *pictq_cond;
197
#if !CONFIG_AVFILTER
198
    struct SwsContext *img_convert_ctx;
199
#endif
200

    
201
    //    QETimer *video_timer;
202
    char filename[1024];
203
    int width, height, xleft, ytop;
204

    
205
    int64_t faulty_pts;
206
    int64_t faulty_dts;
207
    int64_t last_dts_for_fault_detection;
208
    int64_t last_pts_for_fault_detection;
209

    
210
#if CONFIG_AVFILTER
211
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
212
#endif
213

    
214
    float skip_frames;
215
    float skip_frames_index;
216
    int refresh;
217
} VideoState;
218

    
219
static void show_help(void);
220
static int audio_write_get_buf_size(VideoState *is);
221

    
222
/* options specified by the user */
223
static AVInputFormat *file_iformat;
224
static const char *input_filename;
225
static const char *window_title;
226
static int fs_screen_width;
227
static int fs_screen_height;
228
static int screen_width = 0;
229
static int screen_height = 0;
230
static int frame_width = 0;
231
static int frame_height = 0;
232
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
233
static int audio_disable;
234
static int video_disable;
235
static int wanted_stream[AVMEDIA_TYPE_NB]={
236
    [AVMEDIA_TYPE_AUDIO]=-1,
237
    [AVMEDIA_TYPE_VIDEO]=-1,
238
    [AVMEDIA_TYPE_SUBTITLE]=-1,
239
};
240
static int seek_by_bytes=-1;
241
static int display_disable;
242
static int show_status = 1;
243
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
244
static int64_t start_time = AV_NOPTS_VALUE;
245
static int64_t duration = AV_NOPTS_VALUE;
246
static int debug = 0;
247
static int debug_mv = 0;
248
static int step = 0;
249
static int thread_count = 1;
250
static int workaround_bugs = 1;
251
static int fast = 0;
252
static int genpts = 0;
253
static int lowres = 0;
254
static int idct = FF_IDCT_AUTO;
255
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258
static int error_recognition = FF_ER_CAREFUL;
259
static int error_concealment = 3;
260
static int decoder_reorder_pts= -1;
261
static int autoexit;
262
static int loop=1;
263
static int framedrop=1;
264

    
265
static int rdftspeed=20;
266
#if CONFIG_AVFILTER
267
static char *vfilters = NULL;
268
#endif
269

    
270
/* current context */
271
static int is_full_screen;
272
static VideoState *cur_stream;
273
static int64_t audio_callback_time;
274

    
275
static AVPacket flush_pkt;
276

    
277
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
278
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280

    
281
static SDL_Surface *screen;
282

    
283
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284

    
285
/* packet queue handling */
286
static void packet_queue_init(PacketQueue *q)
287
{
288
    memset(q, 0, sizeof(PacketQueue));
289
    q->mutex = SDL_CreateMutex();
290
    q->cond = SDL_CreateCond();
291
    packet_queue_put(q, &flush_pkt);
292
}
293

    
294
static void packet_queue_flush(PacketQueue *q)
295
{
296
    AVPacketList *pkt, *pkt1;
297

    
298
    SDL_LockMutex(q->mutex);
299
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300
        pkt1 = pkt->next;
301
        av_free_packet(&pkt->pkt);
302
        av_freep(&pkt);
303
    }
304
    q->last_pkt = NULL;
305
    q->first_pkt = NULL;
306
    q->nb_packets = 0;
307
    q->size = 0;
308
    SDL_UnlockMutex(q->mutex);
309
}
310

    
311
static void packet_queue_end(PacketQueue *q)
312
{
313
    packet_queue_flush(q);
314
    SDL_DestroyMutex(q->mutex);
315
    SDL_DestroyCond(q->cond);
316
}
317

    
318
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319
{
320
    AVPacketList *pkt1;
321

    
322
    /* duplicate the packet */
323
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324
        return -1;
325

    
326
    pkt1 = av_malloc(sizeof(AVPacketList));
327
    if (!pkt1)
328
        return -1;
329
    pkt1->pkt = *pkt;
330
    pkt1->next = NULL;
331

    
332

    
333
    SDL_LockMutex(q->mutex);
334

    
335
    if (!q->last_pkt)
336

    
337
        q->first_pkt = pkt1;
338
    else
339
        q->last_pkt->next = pkt1;
340
    q->last_pkt = pkt1;
341
    q->nb_packets++;
342
    q->size += pkt1->pkt.size + sizeof(*pkt1);
343
    /* XXX: should duplicate packet data in DV case */
344
    SDL_CondSignal(q->cond);
345

    
346
    SDL_UnlockMutex(q->mutex);
347
    return 0;
348
}
349

    
350
static void packet_queue_abort(PacketQueue *q)
351
{
352
    SDL_LockMutex(q->mutex);
353

    
354
    q->abort_request = 1;
355

    
356
    SDL_CondSignal(q->cond);
357

    
358
    SDL_UnlockMutex(q->mutex);
359
}
360

    
361
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363
{
364
    AVPacketList *pkt1;
365
    int ret;
366

    
367
    SDL_LockMutex(q->mutex);
368

    
369
    for(;;) {
370
        if (q->abort_request) {
371
            ret = -1;
372
            break;
373
        }
374

    
375
        pkt1 = q->first_pkt;
376
        if (pkt1) {
377
            q->first_pkt = pkt1->next;
378
            if (!q->first_pkt)
379
                q->last_pkt = NULL;
380
            q->nb_packets--;
381
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
382
            *pkt = pkt1->pkt;
383
            av_free(pkt1);
384
            ret = 1;
385
            break;
386
        } else if (!block) {
387
            ret = 0;
388
            break;
389
        } else {
390
            SDL_CondWait(q->cond, q->mutex);
391
        }
392
    }
393
    SDL_UnlockMutex(q->mutex);
394
    return ret;
395
}
396

    
397
static inline void fill_rectangle(SDL_Surface *screen,
398
                                  int x, int y, int w, int h, int color)
399
{
400
    SDL_Rect rect;
401
    rect.x = x;
402
    rect.y = y;
403
    rect.w = w;
404
    rect.h = h;
405
    SDL_FillRect(screen, &rect, color);
406
}
407

    
408
#if 0
409
/* draw only the border of a rectangle */
410
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
411
{
412
    int w1, w2, h1, h2;
413

414
    /* fill the background */
415
    w1 = x;
416
    if (w1 < 0)
417
        w1 = 0;
418
    w2 = s->width - (x + w);
419
    if (w2 < 0)
420
        w2 = 0;
421
    h1 = y;
422
    if (h1 < 0)
423
        h1 = 0;
424
    h2 = s->height - (y + h);
425
    if (h2 < 0)
426
        h2 = 0;
427
    fill_rectangle(screen,
428
                   s->xleft, s->ytop,
429
                   w1, s->height,
430
                   color);
431
    fill_rectangle(screen,
432
                   s->xleft + s->width - w2, s->ytop,
433
                   w2, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + w1, s->ytop,
437
                   s->width - w1 - w2, h1,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop + s->height - h2,
441
                   s->width - w1 - w2, h2,
442
                   color);
443
}
444
#endif
445

    
446
#define ALPHA_BLEND(a, oldp, newp, s)\
447
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
448

    
449
#define RGBA_IN(r, g, b, a, s)\
450
{\
451
    unsigned int v = ((const uint32_t *)(s))[0];\
452
    a = (v >> 24) & 0xff;\
453
    r = (v >> 16) & 0xff;\
454
    g = (v >> 8) & 0xff;\
455
    b = v & 0xff;\
456
}
457

    
458
#define YUVA_IN(y, u, v, a, s, pal)\
459
{\
460
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
461
    a = (val >> 24) & 0xff;\
462
    y = (val >> 16) & 0xff;\
463
    u = (val >> 8) & 0xff;\
464
    v = val & 0xff;\
465
}
466

    
467
#define YUVA_OUT(d, y, u, v, a)\
468
{\
469
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
470
}
471

    
472

    
473
#define BPP 1
474

    
475
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
476
{
477
    int wrap, wrap3, width2, skip2;
478
    int y, u, v, a, u1, v1, a1, w, h;
479
    uint8_t *lum, *cb, *cr;
480
    const uint8_t *p;
481
    const uint32_t *pal;
482
    int dstx, dsty, dstw, dsth;
483

    
484
    dstw = av_clip(rect->w, 0, imgw);
485
    dsth = av_clip(rect->h, 0, imgh);
486
    dstx = av_clip(rect->x, 0, imgw - dstw);
487
    dsty = av_clip(rect->y, 0, imgh - dsth);
488
    lum = dst->data[0] + dsty * dst->linesize[0];
489
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
490
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
491

    
492
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
493
    skip2 = dstx >> 1;
494
    wrap = dst->linesize[0];
495
    wrap3 = rect->pict.linesize[0];
496
    p = rect->pict.data[0];
497
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
498

    
499
    if (dsty & 1) {
500
        lum += dstx;
501
        cb += skip2;
502
        cr += skip2;
503

    
504
        if (dstx & 1) {
505
            YUVA_IN(y, u, v, a, p, pal);
506
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509
            cb++;
510
            cr++;
511
            lum++;
512
            p += BPP;
513
        }
514
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
515
            YUVA_IN(y, u, v, a, p, pal);
516
            u1 = u;
517
            v1 = v;
518
            a1 = a;
519
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520

    
521
            YUVA_IN(y, u, v, a, p + BPP, pal);
522
            u1 += u;
523
            v1 += v;
524
            a1 += a;
525
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528
            cb++;
529
            cr++;
530
            p += 2 * BPP;
531
            lum += 2;
532
        }
533
        if (w) {
534
            YUVA_IN(y, u, v, a, p, pal);
535
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
537
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
538
            p++;
539
            lum++;
540
        }
541
        p += wrap3 - dstw * BPP;
542
        lum += wrap - dstw - dstx;
543
        cb += dst->linesize[1] - width2 - skip2;
544
        cr += dst->linesize[2] - width2 - skip2;
545
    }
546
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
547
        lum += dstx;
548
        cb += skip2;
549
        cr += skip2;
550

    
551
        if (dstx & 1) {
552
            YUVA_IN(y, u, v, a, p, pal);
553
            u1 = u;
554
            v1 = v;
555
            a1 = a;
556
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
            p += wrap3;
558
            lum += wrap;
559
            YUVA_IN(y, u, v, a, p, pal);
560
            u1 += u;
561
            v1 += v;
562
            a1 += a;
563
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566
            cb++;
567
            cr++;
568
            p += -wrap3 + BPP;
569
            lum += -wrap + 1;
570
        }
571
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
572
            YUVA_IN(y, u, v, a, p, pal);
573
            u1 = u;
574
            v1 = v;
575
            a1 = a;
576
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577

    
578
            YUVA_IN(y, u, v, a, p + BPP, pal);
579
            u1 += u;
580
            v1 += v;
581
            a1 += a;
582
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
583
            p += wrap3;
584
            lum += wrap;
585

    
586
            YUVA_IN(y, u, v, a, p, pal);
587
            u1 += u;
588
            v1 += v;
589
            a1 += a;
590
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591

    
592
            YUVA_IN(y, u, v, a, p + BPP, pal);
593
            u1 += u;
594
            v1 += v;
595
            a1 += a;
596
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
597

    
598
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
599
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
600

    
601
            cb++;
602
            cr++;
603
            p += -wrap3 + 2 * BPP;
604
            lum += -wrap + 2;
605
        }
606
        if (w) {
607
            YUVA_IN(y, u, v, a, p, pal);
608
            u1 = u;
609
            v1 = v;
610
            a1 = a;
611
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612
            p += wrap3;
613
            lum += wrap;
614
            YUVA_IN(y, u, v, a, p, pal);
615
            u1 += u;
616
            v1 += v;
617
            a1 += a;
618
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621
            cb++;
622
            cr++;
623
            p += -wrap3 + BPP;
624
            lum += -wrap + 1;
625
        }
626
        p += wrap3 + (wrap3 - dstw * BPP);
627
        lum += wrap + (wrap - dstw - dstx);
628
        cb += dst->linesize[1] - width2 - skip2;
629
        cr += dst->linesize[2] - width2 - skip2;
630
    }
631
    /* handle odd height */
632
    if (h) {
633
        lum += dstx;
634
        cb += skip2;
635
        cr += skip2;
636

    
637
        if (dstx & 1) {
638
            YUVA_IN(y, u, v, a, p, pal);
639
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642
            cb++;
643
            cr++;
644
            lum++;
645
            p += BPP;
646
        }
647
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
648
            YUVA_IN(y, u, v, a, p, pal);
649
            u1 = u;
650
            v1 = v;
651
            a1 = a;
652
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
653

    
654
            YUVA_IN(y, u, v, a, p + BPP, pal);
655
            u1 += u;
656
            v1 += v;
657
            a1 += a;
658
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
659
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
660
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
661
            cb++;
662
            cr++;
663
            p += 2 * BPP;
664
            lum += 2;
665
        }
666
        if (w) {
667
            YUVA_IN(y, u, v, a, p, pal);
668
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
670
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
671
        }
672
    }
673
}
674

    
675
static void free_subpicture(SubPicture *sp)
676
{
677
    int i;
678

    
679
    for (i = 0; i < sp->sub.num_rects; i++)
680
    {
681
        av_freep(&sp->sub.rects[i]->pict.data[0]);
682
        av_freep(&sp->sub.rects[i]->pict.data[1]);
683
        av_freep(&sp->sub.rects[i]);
684
    }
685

    
686
    av_free(sp->sub.rects);
687

    
688
    memset(&sp->sub, 0, sizeof(AVSubtitle));
689
}
690

    
691
static void video_image_display(VideoState *is)
692
{
693
    VideoPicture *vp;
694
    SubPicture *sp;
695
    AVPicture pict;
696
    float aspect_ratio;
697
    int width, height, x, y;
698
    SDL_Rect rect;
699
    int i;
700

    
701
    vp = &is->pictq[is->pictq_rindex];
702
    if (vp->bmp) {
703
#if CONFIG_AVFILTER
704
         if (vp->picref->pixel_aspect.num == 0)
705
             aspect_ratio = 0;
706
         else
707
             aspect_ratio = av_q2d(vp->picref->pixel_aspect);
708
#else
709

    
710
        /* XXX: use variable in the frame */
711
        if (is->video_st->sample_aspect_ratio.num)
712
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
713
        else if (is->video_st->codec->sample_aspect_ratio.num)
714
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
715
        else
716
            aspect_ratio = 0;
717
#endif
718
        if (aspect_ratio <= 0.0)
719
            aspect_ratio = 1.0;
720
        aspect_ratio *= (float)vp->width / (float)vp->height;
721
        /* if an active format is indicated, then it overrides the
722
           mpeg format */
723
#if 0
724
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
725
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
726
            printf("dtg_active_format=%d\n", is->dtg_active_format);
727
        }
728
#endif
729
#if 0
730
        switch(is->video_st->codec->dtg_active_format) {
731
        case FF_DTG_AFD_SAME:
732
        default:
733
            /* nothing to do */
734
            break;
735
        case FF_DTG_AFD_4_3:
736
            aspect_ratio = 4.0 / 3.0;
737
            break;
738
        case FF_DTG_AFD_16_9:
739
            aspect_ratio = 16.0 / 9.0;
740
            break;
741
        case FF_DTG_AFD_14_9:
742
            aspect_ratio = 14.0 / 9.0;
743
            break;
744
        case FF_DTG_AFD_4_3_SP_14_9:
745
            aspect_ratio = 14.0 / 9.0;
746
            break;
747
        case FF_DTG_AFD_16_9_SP_14_9:
748
            aspect_ratio = 14.0 / 9.0;
749
            break;
750
        case FF_DTG_AFD_SP_4_3:
751
            aspect_ratio = 4.0 / 3.0;
752
            break;
753
        }
754
#endif
755

    
756
        if (is->subtitle_st)
757
        {
758
            if (is->subpq_size > 0)
759
            {
760
                sp = &is->subpq[is->subpq_rindex];
761

    
762
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
763
                {
764
                    SDL_LockYUVOverlay (vp->bmp);
765

    
766
                    pict.data[0] = vp->bmp->pixels[0];
767
                    pict.data[1] = vp->bmp->pixels[2];
768
                    pict.data[2] = vp->bmp->pixels[1];
769

    
770
                    pict.linesize[0] = vp->bmp->pitches[0];
771
                    pict.linesize[1] = vp->bmp->pitches[2];
772
                    pict.linesize[2] = vp->bmp->pitches[1];
773

    
774
                    for (i = 0; i < sp->sub.num_rects; i++)
775
                        blend_subrect(&pict, sp->sub.rects[i],
776
                                      vp->bmp->w, vp->bmp->h);
777

    
778
                    SDL_UnlockYUVOverlay (vp->bmp);
779
                }
780
            }
781
        }
782

    
783

    
784
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
785
        height = is->height;
786
        width = ((int)rint(height * aspect_ratio)) & ~1;
787
        if (width > is->width) {
788
            width = is->width;
789
            height = ((int)rint(width / aspect_ratio)) & ~1;
790
        }
791
        x = (is->width - width) / 2;
792
        y = (is->height - height) / 2;
793
        if (!is->no_background) {
794
            /* fill the background */
795
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
796
        } else {
797
            is->no_background = 0;
798
        }
799
        rect.x = is->xleft + x;
800
        rect.y = is->ytop  + y;
801
        rect.w = width;
802
        rect.h = height;
803
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
804
    } else {
805
#if 0
806
        fill_rectangle(screen,
807
                       is->xleft, is->ytop, is->width, is->height,
808
                       QERGB(0x00, 0x00, 0x00));
809
#endif
810
    }
811
}
812

    
813
static inline int compute_mod(int a, int b)
814
{
815
    a = a % b;
816
    if (a >= 0)
817
        return a;
818
    else
819
        return a + b;
820
}
821

    
822
static void video_audio_display(VideoState *s)
823
{
824
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
825
    int ch, channels, h, h2, bgcolor, fgcolor;
826
    int16_t time_diff;
827
    int rdft_bits, nb_freq;
828

    
829
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
830
        ;
831
    nb_freq= 1<<(rdft_bits-1);
832

    
833
    /* compute display index : center on currently output samples */
834
    channels = s->audio_st->codec->channels;
835
    nb_display_channels = channels;
836
    if (!s->paused) {
837
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
838
        n = 2 * channels;
839
        delay = audio_write_get_buf_size(s);
840
        delay /= n;
841

    
842
        /* to be more precise, we take into account the time spent since
843
           the last buffer computation */
844
        if (audio_callback_time) {
845
            time_diff = av_gettime() - audio_callback_time;
846
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
847
        }
848

    
849
        delay += 2*data_used;
850
        if (delay < data_used)
851
            delay = data_used;
852

    
853
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
854
        if(s->show_audio==1){
855
            h= INT_MIN;
856
            for(i=0; i<1000; i+=channels){
857
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
858
                int a= s->sample_array[idx];
859
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
860
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
861
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
862
                int score= a-d;
863
                if(h<score && (b^c)<0){
864
                    h= score;
865
                    i_start= idx;
866
                }
867
            }
868
        }
869

    
870
        s->last_i_start = i_start;
871
    } else {
872
        i_start = s->last_i_start;
873
    }
874

    
875
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
876
    if(s->show_audio==1){
877
        fill_rectangle(screen,
878
                       s->xleft, s->ytop, s->width, s->height,
879
                       bgcolor);
880

    
881
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
882

    
883
        /* total height for one channel */
884
        h = s->height / nb_display_channels;
885
        /* graph height / 2 */
886
        h2 = (h * 9) / 20;
887
        for(ch = 0;ch < nb_display_channels; ch++) {
888
            i = i_start + ch;
889
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
890
            for(x = 0; x < s->width; x++) {
891
                y = (s->sample_array[i] * h2) >> 15;
892
                if (y < 0) {
893
                    y = -y;
894
                    ys = y1 - y;
895
                } else {
896
                    ys = y1;
897
                }
898
                fill_rectangle(screen,
899
                               s->xleft + x, ys, 1, y,
900
                               fgcolor);
901
                i += channels;
902
                if (i >= SAMPLE_ARRAY_SIZE)
903
                    i -= SAMPLE_ARRAY_SIZE;
904
            }
905
        }
906

    
907
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
908

    
909
        for(ch = 1;ch < nb_display_channels; ch++) {
910
            y = s->ytop + ch * h;
911
            fill_rectangle(screen,
912
                           s->xleft, y, s->width, 1,
913
                           fgcolor);
914
        }
915
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
916
    }else{
917
        nb_display_channels= FFMIN(nb_display_channels, 2);
918
        if(rdft_bits != s->rdft_bits){
919
            av_rdft_end(s->rdft);
920
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
921
            s->rdft_bits= rdft_bits;
922
        }
923
        {
924
            FFTSample data[2][2*nb_freq];
925
            for(ch = 0;ch < nb_display_channels; ch++) {
926
                i = i_start + ch;
927
                for(x = 0; x < 2*nb_freq; x++) {
928
                    double w= (x-nb_freq)*(1.0/nb_freq);
929
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
930
                    i += channels;
931
                    if (i >= SAMPLE_ARRAY_SIZE)
932
                        i -= SAMPLE_ARRAY_SIZE;
933
                }
934
                av_rdft_calc(s->rdft, data[ch]);
935
            }
936
            //least efficient way to do this, we should of course directly access it but its more than fast enough
937
            for(y=0; y<s->height; y++){
938
                double w= 1/sqrt(nb_freq);
939
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
940
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
941
                a= FFMIN(a,255);
942
                b= FFMIN(b,255);
943
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944

    
945
                fill_rectangle(screen,
946
                            s->xpos, s->height-y, 1, 1,
947
                            fgcolor);
948
            }
949
        }
950
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951
        s->xpos++;
952
        if(s->xpos >= s->width)
953
            s->xpos= s->xleft;
954
    }
955
}
956

    
957
static int video_open(VideoState *is){
958
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959
    int w,h;
960

    
961
    if(is_full_screen) flags |= SDL_FULLSCREEN;
962
    else               flags |= SDL_RESIZABLE;
963

    
964
    if (is_full_screen && fs_screen_width) {
965
        w = fs_screen_width;
966
        h = fs_screen_height;
967
    } else if(!is_full_screen && screen_width){
968
        w = screen_width;
969
        h = screen_height;
970
#if CONFIG_AVFILTER
971
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972
        w = is->out_video_filter->inputs[0]->w;
973
        h = is->out_video_filter->inputs[0]->h;
974
#else
975
    }else if (is->video_st && is->video_st->codec->width){
976
        w = is->video_st->codec->width;
977
        h = is->video_st->codec->height;
978
#endif
979
    } else {
980
        w = 640;
981
        h = 480;
982
    }
983
    if(screen && is->width == screen->w && screen->w == w
984
       && is->height== screen->h && screen->h == h)
985
        return 0;
986

    
987
#ifndef __APPLE__
988
    screen = SDL_SetVideoMode(w, h, 0, flags);
989
#else
990
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991
    screen = SDL_SetVideoMode(w, h, 24, flags);
992
#endif
993
    if (!screen) {
994
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
995
        return -1;
996
    }
997
    if (!window_title)
998
        window_title = input_filename;
999
    SDL_WM_SetCaption(window_title, window_title);
1000

    
1001
    is->width = screen->w;
1002
    is->height = screen->h;
1003

    
1004
    return 0;
1005
}
1006

    
1007
/* display the current picture, if any */
1008
static void video_display(VideoState *is)
1009
{
1010
    if(!screen)
1011
        video_open(cur_stream);
1012
    if (is->audio_st && is->show_audio)
1013
        video_audio_display(is);
1014
    else if (is->video_st)
1015
        video_image_display(is);
1016
}
1017

    
1018
static int refresh_thread(void *opaque)
1019
{
1020
    VideoState *is= opaque;
1021
    while(!is->abort_request){
1022
    SDL_Event event;
1023
    event.type = FF_REFRESH_EVENT;
1024
    event.user.data1 = opaque;
1025
        if(!is->refresh){
1026
            is->refresh=1;
1027
    SDL_PushEvent(&event);
1028
        }
1029
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030
    }
1031
    return 0;
1032
}
1033

    
1034
/* get the current audio clock value */
1035
static double get_audio_clock(VideoState *is)
1036
{
1037
    double pts;
1038
    int hw_buf_size, bytes_per_sec;
1039
    pts = is->audio_clock;
1040
    hw_buf_size = audio_write_get_buf_size(is);
1041
    bytes_per_sec = 0;
1042
    if (is->audio_st) {
1043
        bytes_per_sec = is->audio_st->codec->sample_rate *
1044
            2 * is->audio_st->codec->channels;
1045
    }
1046
    if (bytes_per_sec)
1047
        pts -= (double)hw_buf_size / bytes_per_sec;
1048
    return pts;
1049
}
1050

    
1051
/* get the current video clock value */
1052
static double get_video_clock(VideoState *is)
1053
{
1054
    if (is->paused) {
1055
        return is->video_current_pts;
1056
    } else {
1057
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058
    }
1059
}
1060

    
1061
/* get the current external clock value */
1062
static double get_external_clock(VideoState *is)
1063
{
1064
    int64_t ti;
1065
    ti = av_gettime();
1066
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067
}
1068

    
1069
/* get the current master clock value */
1070
static double get_master_clock(VideoState *is)
1071
{
1072
    double val;
1073

    
1074
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075
        if (is->video_st)
1076
            val = get_video_clock(is);
1077
        else
1078
            val = get_audio_clock(is);
1079
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080
        if (is->audio_st)
1081
            val = get_audio_clock(is);
1082
        else
1083
            val = get_video_clock(is);
1084
    } else {
1085
        val = get_external_clock(is);
1086
    }
1087
    return val;
1088
}
1089

    
1090
/* seek in the stream */
1091
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092
{
1093
    if (!is->seek_req) {
1094
        is->seek_pos = pos;
1095
        is->seek_rel = rel;
1096
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097
        if (seek_by_bytes)
1098
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1099
        is->seek_req = 1;
1100
    }
1101
}
1102

    
1103
/* pause or resume the video */
1104
static void stream_pause(VideoState *is)
1105
{
1106
    if (is->paused) {
1107
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108
        if(is->read_pause_return != AVERROR(ENOSYS)){
1109
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110
        }
1111
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112
    }
1113
    is->paused = !is->paused;
1114
}
1115

    
1116
static double compute_target_time(double frame_current_pts, VideoState *is)
1117
{
1118
    double delay, sync_threshold, diff;
1119

    
1120
    /* compute nominal delay */
1121
    delay = frame_current_pts - is->frame_last_pts;
1122
    if (delay <= 0 || delay >= 10.0) {
1123
        /* if incorrect delay, use previous one */
1124
        delay = is->frame_last_delay;
1125
    } else {
1126
        is->frame_last_delay = delay;
1127
    }
1128
    is->frame_last_pts = frame_current_pts;
1129

    
1130
    /* update delay to follow master synchronisation source */
1131
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133
        /* if video is slave, we try to correct big delays by
1134
           duplicating or deleting a frame */
1135
        diff = get_video_clock(is) - get_master_clock(is);
1136

    
1137
        /* skip or repeat frame. We take into account the
1138
           delay to compute the threshold. I still don't know
1139
           if it is the best guess */
1140
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142
            if (diff <= -sync_threshold)
1143
                delay = 0;
1144
            else if (diff >= sync_threshold)
1145
                delay = 2 * delay;
1146
        }
1147
    }
1148
    is->frame_timer += delay;
1149
#if defined(DEBUG_SYNC)
1150
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151
            delay, actual_delay, frame_current_pts, -diff);
1152
#endif
1153

    
1154
    return is->frame_timer;
1155
}
1156

    
1157
/* called to display each frame */
1158
static void video_refresh_timer(void *opaque)
1159
{
1160
    VideoState *is = opaque;
1161
    VideoPicture *vp;
1162

    
1163
    SubPicture *sp, *sp2;
1164

    
1165
    if (is->video_st) {
1166
retry:
1167
        if (is->pictq_size == 0) {
1168
            //nothing to do, no picture to display in the que
1169
        } else {
1170
            double time= av_gettime()/1000000.0;
1171
            double next_target;
1172
            /* dequeue the picture */
1173
            vp = &is->pictq[is->pictq_rindex];
1174

    
1175
            if(time < vp->target_clock)
1176
                return;
1177
            /* update current video pts */
1178
            is->video_current_pts = vp->pts;
1179
            is->video_current_pts_drift = is->video_current_pts - time;
1180
            is->video_current_pos = vp->pos;
1181
            if(is->pictq_size > 1){
1182
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183
                assert(nextvp->target_clock >= vp->target_clock);
1184
                next_target= nextvp->target_clock;
1185
            }else{
1186
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187
            }
1188
            if(framedrop && time > next_target){
1189
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190
                if(is->pictq_size > 1 || time > next_target + 0.5){
1191
                    /* update queue size and signal for next picture */
1192
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193
                        is->pictq_rindex = 0;
1194

    
1195
                    SDL_LockMutex(is->pictq_mutex);
1196
                    is->pictq_size--;
1197
                    SDL_CondSignal(is->pictq_cond);
1198
                    SDL_UnlockMutex(is->pictq_mutex);
1199
                    goto retry;
1200
                }
1201
            }
1202

    
1203
            if(is->subtitle_st) {
1204
                if (is->subtitle_stream_changed) {
1205
                    SDL_LockMutex(is->subpq_mutex);
1206

    
1207
                    while (is->subpq_size) {
1208
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1209

    
1210
                        /* update queue size and signal for next picture */
1211
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212
                            is->subpq_rindex = 0;
1213

    
1214
                        is->subpq_size--;
1215
                    }
1216
                    is->subtitle_stream_changed = 0;
1217

    
1218
                    SDL_CondSignal(is->subpq_cond);
1219
                    SDL_UnlockMutex(is->subpq_mutex);
1220
                } else {
1221
                    if (is->subpq_size > 0) {
1222
                        sp = &is->subpq[is->subpq_rindex];
1223

    
1224
                        if (is->subpq_size > 1)
1225
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226
                        else
1227
                            sp2 = NULL;
1228

    
1229
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231
                        {
1232
                            free_subpicture(sp);
1233

    
1234
                            /* update queue size and signal for next picture */
1235
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236
                                is->subpq_rindex = 0;
1237

    
1238
                            SDL_LockMutex(is->subpq_mutex);
1239
                            is->subpq_size--;
1240
                            SDL_CondSignal(is->subpq_cond);
1241
                            SDL_UnlockMutex(is->subpq_mutex);
1242
                        }
1243
                    }
1244
                }
1245
            }
1246

    
1247
            /* display picture */
1248
            video_display(is);
1249

    
1250
            /* update queue size and signal for next picture */
1251
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252
                is->pictq_rindex = 0;
1253

    
1254
            SDL_LockMutex(is->pictq_mutex);
1255
            is->pictq_size--;
1256
            SDL_CondSignal(is->pictq_cond);
1257
            SDL_UnlockMutex(is->pictq_mutex);
1258
        }
1259
    } else if (is->audio_st) {
1260
        /* draw the next audio frame */
1261

    
1262
        /* if only audio stream, then display the audio bars (better
1263
           than nothing, just to test the implementation */
1264

    
1265
        /* display picture */
1266
        video_display(is);
1267
    }
1268
    if (show_status) {
1269
        static int64_t last_time;
1270
        int64_t cur_time;
1271
        int aqsize, vqsize, sqsize;
1272
        double av_diff;
1273

    
1274
        cur_time = av_gettime();
1275
        if (!last_time || (cur_time - last_time) >= 30000) {
1276
            aqsize = 0;
1277
            vqsize = 0;
1278
            sqsize = 0;
1279
            if (is->audio_st)
1280
                aqsize = is->audioq.size;
1281
            if (is->video_st)
1282
                vqsize = is->videoq.size;
1283
            if (is->subtitle_st)
1284
                sqsize = is->subtitleq.size;
1285
            av_diff = 0;
1286
            if (is->audio_st && is->video_st)
1287
                av_diff = get_audio_clock(is) - get_video_clock(is);
1288
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290
            fflush(stdout);
1291
            last_time = cur_time;
1292
        }
1293
    }
1294
}
1295

    
1296
/* allocate a picture (needs to do that in main thread to avoid
1297
   potential locking problems */
1298
static void alloc_picture(void *opaque)
1299
{
1300
    VideoState *is = opaque;
1301
    VideoPicture *vp;
1302

    
1303
    vp = &is->pictq[is->pictq_windex];
1304

    
1305
    if (vp->bmp)
1306
        SDL_FreeYUVOverlay(vp->bmp);
1307

    
1308
#if CONFIG_AVFILTER
1309
    if (vp->picref)
1310
        avfilter_unref_pic(vp->picref);
1311
    vp->picref = NULL;
1312

    
1313
    vp->width   = is->out_video_filter->inputs[0]->w;
1314
    vp->height  = is->out_video_filter->inputs[0]->h;
1315
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1316
#else
1317
    vp->width   = is->video_st->codec->width;
1318
    vp->height  = is->video_st->codec->height;
1319
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1320
#endif
1321

    
1322
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1323
                                   SDL_YV12_OVERLAY,
1324
                                   screen);
1325

    
1326
    SDL_LockMutex(is->pictq_mutex);
1327
    vp->allocated = 1;
1328
    SDL_CondSignal(is->pictq_cond);
1329
    SDL_UnlockMutex(is->pictq_mutex);
1330
}
1331

    
1332
/**
1333
 *
1334
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1335
 */
1336
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1337
{
1338
    VideoPicture *vp;
1339
    int dst_pix_fmt;
1340
#if CONFIG_AVFILTER
1341
    AVPicture pict_src;
1342
#endif
1343
    /* wait until we have space to put a new picture */
1344
    SDL_LockMutex(is->pictq_mutex);
1345

    
1346
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1347
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1348

    
1349
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1350
           !is->videoq.abort_request) {
1351
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352
    }
1353
    SDL_UnlockMutex(is->pictq_mutex);
1354

    
1355
    if (is->videoq.abort_request)
1356
        return -1;
1357

    
1358
    vp = &is->pictq[is->pictq_windex];
1359

    
1360
    /* alloc or resize hardware picture buffer */
1361
    if (!vp->bmp ||
1362
#if CONFIG_AVFILTER
1363
        vp->width  != is->out_video_filter->inputs[0]->w ||
1364
        vp->height != is->out_video_filter->inputs[0]->h) {
1365
#else
1366
        vp->width != is->video_st->codec->width ||
1367
        vp->height != is->video_st->codec->height) {
1368
#endif
1369
        SDL_Event event;
1370

    
1371
        vp->allocated = 0;
1372

    
1373
        /* the allocation must be done in the main thread to avoid
1374
           locking problems */
1375
        event.type = FF_ALLOC_EVENT;
1376
        event.user.data1 = is;
1377
        SDL_PushEvent(&event);
1378

    
1379
        /* wait until the picture is allocated */
1380
        SDL_LockMutex(is->pictq_mutex);
1381
        while (!vp->allocated && !is->videoq.abort_request) {
1382
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383
        }
1384
        SDL_UnlockMutex(is->pictq_mutex);
1385

    
1386
        if (is->videoq.abort_request)
1387
            return -1;
1388
    }
1389

    
1390
    /* if the frame is not skipped, then display it */
1391
    if (vp->bmp) {
1392
        AVPicture pict;
1393
#if CONFIG_AVFILTER
1394
        if(vp->picref)
1395
            avfilter_unref_pic(vp->picref);
1396
        vp->picref = src_frame->opaque;
1397
#endif
1398

    
1399
        /* get a pointer on the bitmap */
1400
        SDL_LockYUVOverlay (vp->bmp);
1401

    
1402
        dst_pix_fmt = PIX_FMT_YUV420P;
1403
        memset(&pict,0,sizeof(AVPicture));
1404
        pict.data[0] = vp->bmp->pixels[0];
1405
        pict.data[1] = vp->bmp->pixels[2];
1406
        pict.data[2] = vp->bmp->pixels[1];
1407

    
1408
        pict.linesize[0] = vp->bmp->pitches[0];
1409
        pict.linesize[1] = vp->bmp->pitches[2];
1410
        pict.linesize[2] = vp->bmp->pitches[1];
1411

    
1412
#if CONFIG_AVFILTER
1413
        pict_src.data[0] = src_frame->data[0];
1414
        pict_src.data[1] = src_frame->data[1];
1415
        pict_src.data[2] = src_frame->data[2];
1416

    
1417
        pict_src.linesize[0] = src_frame->linesize[0];
1418
        pict_src.linesize[1] = src_frame->linesize[1];
1419
        pict_src.linesize[2] = src_frame->linesize[2];
1420

    
1421
        //FIXME use direct rendering
1422
        av_picture_copy(&pict, &pict_src,
1423
                        vp->pix_fmt, vp->width, vp->height);
1424
#else
1425
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1426
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1427
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1428
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1429
        if (is->img_convert_ctx == NULL) {
1430
            fprintf(stderr, "Cannot initialize the conversion context\n");
1431
            exit(1);
1432
        }
1433
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1434
                  0, vp->height, pict.data, pict.linesize);
1435
#endif
1436
        /* update the bitmap content */
1437
        SDL_UnlockYUVOverlay(vp->bmp);
1438

    
1439
        vp->pts = pts;
1440
        vp->pos = pos;
1441

    
1442
        /* now we can update the picture count */
1443
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1444
            is->pictq_windex = 0;
1445
        SDL_LockMutex(is->pictq_mutex);
1446
        vp->target_clock= compute_target_time(vp->pts, is);
1447

    
1448
        is->pictq_size++;
1449
        SDL_UnlockMutex(is->pictq_mutex);
1450
    }
1451
    return 0;
1452
}
1453

    
1454
/**
1455
 * compute the exact PTS for the picture if it is omitted in the stream
1456
 * @param pts1 the dts of the pkt / pts of the frame
1457
 */
1458
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1459
{
1460
    double frame_delay, pts;
1461

    
1462
    pts = pts1;
1463

    
1464
    if (pts != 0) {
1465
        /* update video clock with pts, if present */
1466
        is->video_clock = pts;
1467
    } else {
1468
        pts = is->video_clock;
1469
    }
1470
    /* update video clock for next frame */
1471
    frame_delay = av_q2d(is->video_st->codec->time_base);
1472
    /* for MPEG2, the frame can be repeated, so we update the
1473
       clock accordingly */
1474
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1475
    is->video_clock += frame_delay;
1476

    
1477
#if defined(DEBUG_SYNC) && 0
1478
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1479
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1480
#endif
1481
    return queue_picture(is, src_frame, pts, pos);
1482
}
1483

    
1484
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1485
{
1486
    int len1, got_picture, i;
1487

    
1488
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1489
            return -1;
1490

    
1491
        if(pkt->data == flush_pkt.data){
1492
            avcodec_flush_buffers(is->video_st->codec);
1493

    
1494
            SDL_LockMutex(is->pictq_mutex);
1495
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1496
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1497
                is->pictq[i].target_clock= 0;
1498
            }
1499
            while (is->pictq_size && !is->videoq.abort_request) {
1500
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1501
            }
1502
            is->video_current_pos= -1;
1503
            SDL_UnlockMutex(is->pictq_mutex);
1504

    
1505
            is->last_dts_for_fault_detection=
1506
            is->last_pts_for_fault_detection= INT64_MIN;
1507
            is->frame_last_pts= AV_NOPTS_VALUE;
1508
            is->frame_last_delay = 0;
1509
            is->frame_timer = (double)av_gettime() / 1000000.0;
1510
            is->skip_frames= 1;
1511
            is->skip_frames_index= 0;
1512
            return 0;
1513
        }
1514

    
1515
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1516
           this packet, if any */
1517
        is->video_st->codec->reordered_opaque= pkt->pts;
1518
        len1 = avcodec_decode_video2(is->video_st->codec,
1519
                                    frame, &got_picture,
1520
                                    pkt);
1521

    
1522
        if (got_picture) {
1523
            if(pkt->dts != AV_NOPTS_VALUE){
1524
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1525
                is->last_dts_for_fault_detection= pkt->dts;
1526
            }
1527
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1528
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1529
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1530
            }
1531
        }
1532

    
1533
        if(   (   decoder_reorder_pts==1
1534
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1535
               || pkt->dts == AV_NOPTS_VALUE)
1536
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1537
            *pts= frame->reordered_opaque;
1538
        else if(pkt->dts != AV_NOPTS_VALUE)
1539
            *pts= pkt->dts;
1540
        else
1541
            *pts= 0;
1542

    
1543
//            if (len1 < 0)
1544
//                break;
1545
    if (got_picture){
1546
        is->skip_frames_index += 1;
1547
        if(is->skip_frames_index >= is->skip_frames){
1548
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1549
            return 1;
1550
        }
1551

    
1552
    }
1553
    return 0;
1554
}
1555

    
1556
#if CONFIG_AVFILTER
1557
typedef struct {
1558
    VideoState *is;
1559
    AVFrame *frame;
1560
    int use_dr1;
1561
} FilterPriv;
1562

    
1563
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1564
{
1565
    AVFilterContext *ctx = codec->opaque;
1566
    AVFilterPicRef  *ref;
1567
    int perms = AV_PERM_WRITE;
1568
    int i, w, h, stride[4];
1569
    unsigned edge;
1570

    
1571
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1572
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1573
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1574
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1575
    }
1576
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1577

    
1578
    w = codec->width;
1579
    h = codec->height;
1580
    avcodec_align_dimensions2(codec, &w, &h, stride);
1581
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1582
    w += edge << 1;
1583
    h += edge << 1;
1584

    
1585
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1586
        return -1;
1587

    
1588
    ref->w = codec->width;
1589
    ref->h = codec->height;
1590
    for(i = 0; i < 3; i ++) {
1591
        unsigned hshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_w;
1592
        unsigned vshift = i == 0 ? 0 : av_pix_fmt_descriptors[ref->pic->format].log2_chroma_h;
1593

    
1594
        if (ref->data[i]) {
1595
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1596
        }
1597
        pic->data[i]     = ref->data[i];
1598
        pic->linesize[i] = ref->linesize[i];
1599
    }
1600
    pic->opaque = ref;
1601
    pic->age    = INT_MAX;
1602
    pic->type   = FF_BUFFER_TYPE_USER;
1603
    return 0;
1604
}
1605

    
1606
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1607
{
1608
    memset(pic->data, 0, sizeof(pic->data));
1609
    avfilter_unref_pic(pic->opaque);
1610
}
1611

    
1612
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1613
{
1614
    AVFilterPicRef *ref = pic->opaque;
1615

    
1616
    if (pic->data[0] == NULL) {
1617
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1618
        return codec->get_buffer(codec, pic);
1619
    }
1620

    
1621
    if ((codec->width != ref->w) || (codec->height != ref->h) ||
1622
        (codec->pix_fmt != ref->pic->format)) {
1623
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1624
        return -1;
1625
    }
1626

    
1627
    pic->reordered_opaque = codec->reordered_opaque;
1628
    return 0;
1629
}
1630

    
1631
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1632
{
1633
    FilterPriv *priv = ctx->priv;
1634
    AVCodecContext *codec;
1635
    if(!opaque) return -1;
1636

    
1637
    priv->is = opaque;
1638
    codec    = priv->is->video_st->codec;
1639
    codec->opaque = ctx;
1640
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1641
        priv->use_dr1 = 1;
1642
        codec->get_buffer     = input_get_buffer;
1643
        codec->release_buffer = input_release_buffer;
1644
        codec->reget_buffer   = input_reget_buffer;
1645
    }
1646

    
1647
    priv->frame = avcodec_alloc_frame();
1648

    
1649
    return 0;
1650
}
1651

    
1652
static void input_uninit(AVFilterContext *ctx)
1653
{
1654
    FilterPriv *priv = ctx->priv;
1655
    av_free(priv->frame);
1656
}
1657

    
1658
static int input_request_frame(AVFilterLink *link)
1659
{
1660
    FilterPriv *priv = link->src->priv;
1661
    AVFilterPicRef *picref;
1662
    int64_t pts = 0;
1663
    AVPacket pkt;
1664
    int ret;
1665

    
1666
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1667
        av_free_packet(&pkt);
1668
    if (ret < 0)
1669
        return -1;
1670

    
1671
    if(priv->use_dr1) {
1672
        picref = avfilter_ref_pic(priv->frame->opaque, ~0);
1673
    } else {
1674
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1675
        av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1676
                        picref->pic->format, link->w, link->h);
1677
    }
1678
    av_free_packet(&pkt);
1679

    
1680
    picref->pts = pts;
1681
    picref->pos = pkt.pos;
1682
    picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1683
    avfilter_start_frame(link, picref);
1684
    avfilter_draw_slice(link, 0, link->h, 1);
1685
    avfilter_end_frame(link);
1686

    
1687
    return 0;
1688
}
1689

    
1690
static int input_query_formats(AVFilterContext *ctx)
1691
{
1692
    FilterPriv *priv = ctx->priv;
1693
    enum PixelFormat pix_fmts[] = {
1694
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1695
    };
1696

    
1697
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1698
    return 0;
1699
}
1700

    
1701
static int input_config_props(AVFilterLink *link)
1702
{
1703
    FilterPriv *priv  = link->src->priv;
1704
    AVCodecContext *c = priv->is->video_st->codec;
1705

    
1706
    link->w = c->width;
1707
    link->h = c->height;
1708

    
1709
    return 0;
1710
}
1711

    
1712
static AVFilter input_filter =
1713
{
1714
    .name      = "ffplay_input",
1715

    
1716
    .priv_size = sizeof(FilterPriv),
1717

    
1718
    .init      = input_init,
1719
    .uninit    = input_uninit,
1720

    
1721
    .query_formats = input_query_formats,
1722

    
1723
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1724
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1725
                                    .type = AVMEDIA_TYPE_VIDEO,
1726
                                    .request_frame = input_request_frame,
1727
                                    .config_props  = input_config_props, },
1728
                                  { .name = NULL }},
1729
};
1730

    
1731
static void output_end_frame(AVFilterLink *link)
1732
{
1733
}
1734

    
1735
static int output_query_formats(AVFilterContext *ctx)
1736
{
1737
    enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1738

    
1739
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1740
    return 0;
1741
}
1742

    
1743
static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1744
                                    int64_t *pts, int64_t *pos)
1745
{
1746
    AVFilterPicRef *pic;
1747

    
1748
    if(avfilter_request_frame(ctx->inputs[0]))
1749
        return -1;
1750
    if(!(pic = ctx->inputs[0]->cur_pic))
1751
        return -1;
1752
    ctx->inputs[0]->cur_pic = NULL;
1753

    
1754
    frame->opaque = pic;
1755
    *pts          = pic->pts;
1756
    *pos          = pic->pos;
1757

    
1758
    memcpy(frame->data,     pic->data,     sizeof(frame->data));
1759
    memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1760

    
1761
    return 1;
1762
}
1763

    
1764
static AVFilter output_filter =
1765
{
1766
    .name      = "ffplay_output",
1767

    
1768
    .query_formats = output_query_formats,
1769

    
1770
    .inputs    = (AVFilterPad[]) {{ .name          = "default",
1771
                                    .type          = AVMEDIA_TYPE_VIDEO,
1772
                                    .end_frame     = output_end_frame,
1773
                                    .min_perms     = AV_PERM_READ, },
1774
                                  { .name = NULL }},
1775
    .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1776
};
1777
#endif  /* CONFIG_AVFILTER */
1778

    
1779
static int video_thread(void *arg)
1780
{
1781
    VideoState *is = arg;
1782
    AVFrame *frame= avcodec_alloc_frame();
1783
    int64_t pts_int;
1784
    double pts;
1785
    int ret;
1786

    
1787
#if CONFIG_AVFILTER
1788
    int64_t pos;
1789
    char sws_flags_str[128];
1790
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1791
    AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1792
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1793
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1794

    
1795
    if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1796
    if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1797

    
1798
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1799
    if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1800

    
1801

    
1802
    if(vfilters) {
1803
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1804
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1805

    
1806
        outputs->name    = av_strdup("in");
1807
        outputs->filter  = filt_src;
1808
        outputs->pad_idx = 0;
1809
        outputs->next    = NULL;
1810

    
1811
        inputs->name    = av_strdup("out");
1812
        inputs->filter  = filt_out;
1813
        inputs->pad_idx = 0;
1814
        inputs->next    = NULL;
1815

    
1816
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1817
            goto the_end;
1818
        av_freep(&vfilters);
1819
    } else {
1820
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1821
    }
1822
    avfilter_graph_add_filter(graph, filt_src);
1823
    avfilter_graph_add_filter(graph, filt_out);
1824

    
1825
    if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1826
    if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1827
    if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1828

    
1829
    is->out_video_filter = filt_out;
1830
#endif
1831

    
1832
    for(;;) {
1833
#if !CONFIG_AVFILTER
1834
        AVPacket pkt;
1835
#endif
1836
        while (is->paused && !is->videoq.abort_request)
1837
            SDL_Delay(10);
1838
#if CONFIG_AVFILTER
1839
        ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1840
#else
1841
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1842
#endif
1843

    
1844
        if (ret < 0) goto the_end;
1845

    
1846
        if (!ret)
1847
            continue;
1848

    
1849
        pts = pts_int*av_q2d(is->video_st->time_base);
1850

    
1851
#if CONFIG_AVFILTER
1852
        ret = output_picture2(is, frame, pts, pos);
1853
#else
1854
        ret = output_picture2(is, frame, pts,  pkt.pos);
1855
        av_free_packet(&pkt);
1856
#endif
1857
        if (ret < 0)
1858
            goto the_end;
1859

    
1860
        if (step)
1861
            if (cur_stream)
1862
                stream_pause(cur_stream);
1863
    }
1864
 the_end:
1865
#if CONFIG_AVFILTER
1866
    avfilter_graph_destroy(graph);
1867
    av_freep(&graph);
1868
#endif
1869
    av_free(frame);
1870
    return 0;
1871
}
1872

    
1873
static int subtitle_thread(void *arg)
1874
{
1875
    VideoState *is = arg;
1876
    SubPicture *sp;
1877
    AVPacket pkt1, *pkt = &pkt1;
1878
    int len1, got_subtitle;
1879
    double pts;
1880
    int i, j;
1881
    int r, g, b, y, u, v, a;
1882

    
1883
    for(;;) {
1884
        while (is->paused && !is->subtitleq.abort_request) {
1885
            SDL_Delay(10);
1886
        }
1887
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1888
            break;
1889

    
1890
        if(pkt->data == flush_pkt.data){
1891
            avcodec_flush_buffers(is->subtitle_st->codec);
1892
            continue;
1893
        }
1894
        SDL_LockMutex(is->subpq_mutex);
1895
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1896
               !is->subtitleq.abort_request) {
1897
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1898
        }
1899
        SDL_UnlockMutex(is->subpq_mutex);
1900

    
1901
        if (is->subtitleq.abort_request)
1902
            goto the_end;
1903

    
1904
        sp = &is->subpq[is->subpq_windex];
1905

    
1906
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1907
           this packet, if any */
1908
        pts = 0;
1909
        if (pkt->pts != AV_NOPTS_VALUE)
1910
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1911

    
1912
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1913
                                    &sp->sub, &got_subtitle,
1914
                                    pkt);
1915
//            if (len1 < 0)
1916
//                break;
1917
        if (got_subtitle && sp->sub.format == 0) {
1918
            sp->pts = pts;
1919

    
1920
            for (i = 0; i < sp->sub.num_rects; i++)
1921
            {
1922
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1923
                {
1924
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1925
                    y = RGB_TO_Y_CCIR(r, g, b);
1926
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1927
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1928
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1929
                }
1930
            }
1931

    
1932
            /* now we can update the picture count */
1933
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1934
                is->subpq_windex = 0;
1935
            SDL_LockMutex(is->subpq_mutex);
1936
            is->subpq_size++;
1937
            SDL_UnlockMutex(is->subpq_mutex);
1938
        }
1939
        av_free_packet(pkt);
1940
//        if (step)
1941
//            if (cur_stream)
1942
//                stream_pause(cur_stream);
1943
    }
1944
 the_end:
1945
    return 0;
1946
}
1947

    
1948
/* copy samples for viewing in editor window */
1949
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1950
{
1951
    int size, len, channels;
1952

    
1953
    channels = is->audio_st->codec->channels;
1954

    
1955
    size = samples_size / sizeof(short);
1956
    while (size > 0) {
1957
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1958
        if (len > size)
1959
            len = size;
1960
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1961
        samples += len;
1962
        is->sample_array_index += len;
1963
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1964
            is->sample_array_index = 0;
1965
        size -= len;
1966
    }
1967
}
1968

    
1969
/* return the new audio buffer size (samples can be added or deleted
1970
   to get better sync if video or external master clock) */
1971
static int synchronize_audio(VideoState *is, short *samples,
1972
                             int samples_size1, double pts)
1973
{
1974
    int n, samples_size;
1975
    double ref_clock;
1976

    
1977
    n = 2 * is->audio_st->codec->channels;
1978
    samples_size = samples_size1;
1979

    
1980
    /* if not master, then we try to remove or add samples to correct the clock */
1981
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1982
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1983
        double diff, avg_diff;
1984
        int wanted_size, min_size, max_size, nb_samples;
1985

    
1986
        ref_clock = get_master_clock(is);
1987
        diff = get_audio_clock(is) - ref_clock;
1988

    
1989
        if (diff < AV_NOSYNC_THRESHOLD) {
1990
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1991
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1992
                /* not enough measures to have a correct estimate */
1993
                is->audio_diff_avg_count++;
1994
            } else {
1995
                /* estimate the A-V difference */
1996
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1997

    
1998
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1999
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2000
                    nb_samples = samples_size / n;
2001

    
2002
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2003
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2004
                    if (wanted_size < min_size)
2005
                        wanted_size = min_size;
2006
                    else if (wanted_size > max_size)
2007
                        wanted_size = max_size;
2008

    
2009
                    /* add or remove samples to correction the synchro */
2010
                    if (wanted_size < samples_size) {
2011
                        /* remove samples */
2012
                        samples_size = wanted_size;
2013
                    } else if (wanted_size > samples_size) {
2014
                        uint8_t *samples_end, *q;
2015
                        int nb;
2016

    
2017
                        /* add samples */
2018
                        nb = (samples_size - wanted_size);
2019
                        samples_end = (uint8_t *)samples + samples_size - n;
2020
                        q = samples_end + n;
2021
                        while (nb > 0) {
2022
                            memcpy(q, samples_end, n);
2023
                            q += n;
2024
                            nb -= n;
2025
                        }
2026
                        samples_size = wanted_size;
2027
                    }
2028
                }
2029
#if 0
2030
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2031
                       diff, avg_diff, samples_size - samples_size1,
2032
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2033
#endif
2034
            }
2035
        } else {
2036
            /* too big difference : may be initial PTS errors, so
2037
               reset A-V filter */
2038
            is->audio_diff_avg_count = 0;
2039
            is->audio_diff_cum = 0;
2040
        }
2041
    }
2042

    
2043
    return samples_size;
2044
}
2045

    
2046
/* decode one audio frame and returns its uncompressed size */
2047
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2048
{
2049
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2050
    AVPacket *pkt = &is->audio_pkt;
2051
    AVCodecContext *dec= is->audio_st->codec;
2052
    int n, len1, data_size;
2053
    double pts;
2054

    
2055
    for(;;) {
2056
        /* NOTE: the audio packet can contain several frames */
2057
        while (pkt_temp->size > 0) {
2058
            data_size = sizeof(is->audio_buf1);
2059
            len1 = avcodec_decode_audio3(dec,
2060
                                        (int16_t *)is->audio_buf1, &data_size,
2061
                                        pkt_temp);
2062
            if (len1 < 0) {
2063
                /* if error, we skip the frame */
2064
                pkt_temp->size = 0;
2065
                break;
2066
            }
2067

    
2068
            pkt_temp->data += len1;
2069
            pkt_temp->size -= len1;
2070
            if (data_size <= 0)
2071
                continue;
2072

    
2073
            if (dec->sample_fmt != is->audio_src_fmt) {
2074
                if (is->reformat_ctx)
2075
                    av_audio_convert_free(is->reformat_ctx);
2076
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2077
                                                         dec->sample_fmt, 1, NULL, 0);
2078
                if (!is->reformat_ctx) {
2079
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2080
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
2081
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2082
                        break;
2083
                }
2084
                is->audio_src_fmt= dec->sample_fmt;
2085
            }
2086

    
2087
            if (is->reformat_ctx) {
2088
                const void *ibuf[6]= {is->audio_buf1};
2089
                void *obuf[6]= {is->audio_buf2};
2090
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2091
                int ostride[6]= {2};
2092
                int len= data_size/istride[0];
2093
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2094
                    printf("av_audio_convert() failed\n");
2095
                    break;
2096
                }
2097
                is->audio_buf= is->audio_buf2;
2098
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2099
                          remove this legacy cruft */
2100
                data_size= len*2;
2101
            }else{
2102
                is->audio_buf= is->audio_buf1;
2103
            }
2104

    
2105
            /* if no pts, then compute it */
2106
            pts = is->audio_clock;
2107
            *pts_ptr = pts;
2108
            n = 2 * dec->channels;
2109
            is->audio_clock += (double)data_size /
2110
                (double)(n * dec->sample_rate);
2111
#if defined(DEBUG_SYNC)
2112
            {
2113
                static double last_clock;
2114
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2115
                       is->audio_clock - last_clock,
2116
                       is->audio_clock, pts);
2117
                last_clock = is->audio_clock;
2118
            }
2119
#endif
2120
            return data_size;
2121
        }
2122

    
2123
        /* free the current packet */
2124
        if (pkt->data)
2125
            av_free_packet(pkt);
2126

    
2127
        if (is->paused || is->audioq.abort_request) {
2128
            return -1;
2129
        }
2130

    
2131
        /* read next packet */
2132
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2133
            return -1;
2134
        if(pkt->data == flush_pkt.data){
2135
            avcodec_flush_buffers(dec);
2136
            continue;
2137
        }
2138

    
2139
        pkt_temp->data = pkt->data;
2140
        pkt_temp->size = pkt->size;
2141

    
2142
        /* if update the audio clock with the pts */
2143
        if (pkt->pts != AV_NOPTS_VALUE) {
2144
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2145
        }
2146
    }
2147
}
2148

    
2149
/* get the current audio output buffer size, in samples. With SDL, we
2150
   cannot have a precise information */
2151
static int audio_write_get_buf_size(VideoState *is)
2152
{
2153
    return is->audio_buf_size - is->audio_buf_index;
2154
}
2155

    
2156

    
2157
/* prepare a new audio buffer */
2158
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2159
{
2160
    VideoState *is = opaque;
2161
    int audio_size, len1;
2162
    double pts;
2163

    
2164
    audio_callback_time = av_gettime();
2165

    
2166
    while (len > 0) {
2167
        if (is->audio_buf_index >= is->audio_buf_size) {
2168
           audio_size = audio_decode_frame(is, &pts);
2169
           if (audio_size < 0) {
2170
                /* if error, just output silence */
2171
               is->audio_buf = is->audio_buf1;
2172
               is->audio_buf_size = 1024;
2173
               memset(is->audio_buf, 0, is->audio_buf_size);
2174
           } else {
2175
               if (is->show_audio)
2176
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2177
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2178
                                              pts);
2179
               is->audio_buf_size = audio_size;
2180
           }
2181
           is->audio_buf_index = 0;
2182
        }
2183
        len1 = is->audio_buf_size - is->audio_buf_index;
2184
        if (len1 > len)
2185
            len1 = len;
2186
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2187
        len -= len1;
2188
        stream += len1;
2189
        is->audio_buf_index += len1;
2190
    }
2191
}
2192

    
2193
/* open a given stream. Return 0 if OK */
2194
static int stream_component_open(VideoState *is, int stream_index)
2195
{
2196
    AVFormatContext *ic = is->ic;
2197
    AVCodecContext *avctx;
2198
    AVCodec *codec;
2199
    SDL_AudioSpec wanted_spec, spec;
2200

    
2201
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2202
        return -1;
2203
    avctx = ic->streams[stream_index]->codec;
2204

    
2205
    /* prepare audio output */
2206
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2207
        if (avctx->channels > 0) {
2208
            avctx->request_channels = FFMIN(2, avctx->channels);
2209
        } else {
2210
            avctx->request_channels = 2;
2211
        }
2212
    }
2213

    
2214
    codec = avcodec_find_decoder(avctx->codec_id);
2215
    avctx->debug_mv = debug_mv;
2216
    avctx->debug = debug;
2217
    avctx->workaround_bugs = workaround_bugs;
2218
    avctx->lowres = lowres;
2219
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2220
    avctx->idct_algo= idct;
2221
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2222
    avctx->skip_frame= skip_frame;
2223
    avctx->skip_idct= skip_idct;
2224
    avctx->skip_loop_filter= skip_loop_filter;
2225
    avctx->error_recognition= error_recognition;
2226
    avctx->error_concealment= error_concealment;
2227
    avcodec_thread_init(avctx, thread_count);
2228

    
2229
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2230

    
2231
    if (!codec ||
2232
        avcodec_open(avctx, codec) < 0)
2233
        return -1;
2234

    
2235
    /* prepare audio output */
2236
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2237
        wanted_spec.freq = avctx->sample_rate;
2238
        wanted_spec.format = AUDIO_S16SYS;
2239
        wanted_spec.channels = avctx->channels;
2240
        wanted_spec.silence = 0;
2241
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2242
        wanted_spec.callback = sdl_audio_callback;
2243
        wanted_spec.userdata = is;
2244
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2245
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2246
            return -1;
2247
        }
2248
        is->audio_hw_buf_size = spec.size;
2249
        is->audio_src_fmt= SAMPLE_FMT_S16;
2250
    }
2251

    
2252
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2253
    switch(avctx->codec_type) {
2254
    case AVMEDIA_TYPE_AUDIO:
2255
        is->audio_stream = stream_index;
2256
        is->audio_st = ic->streams[stream_index];
2257
        is->audio_buf_size = 0;
2258
        is->audio_buf_index = 0;
2259

    
2260
        /* init averaging filter */
2261
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2262
        is->audio_diff_avg_count = 0;
2263
        /* since we do not have a precise anough audio fifo fullness,
2264
           we correct audio sync only if larger than this threshold */
2265
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2266

    
2267
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2268
        packet_queue_init(&is->audioq);
2269
        SDL_PauseAudio(0);
2270
        break;
2271
    case AVMEDIA_TYPE_VIDEO:
2272
        is->video_stream = stream_index;
2273
        is->video_st = ic->streams[stream_index];
2274

    
2275
//        is->video_current_pts_time = av_gettime();
2276

    
2277
        packet_queue_init(&is->videoq);
2278
        is->video_tid = SDL_CreateThread(video_thread, is);
2279
        break;
2280
    case AVMEDIA_TYPE_SUBTITLE:
2281
        is->subtitle_stream = stream_index;
2282
        is->subtitle_st = ic->streams[stream_index];
2283
        packet_queue_init(&is->subtitleq);
2284

    
2285
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2286
        break;
2287
    default:
2288
        break;
2289
    }
2290
    return 0;
2291
}
2292

    
2293
static void stream_component_close(VideoState *is, int stream_index)
2294
{
2295
    AVFormatContext *ic = is->ic;
2296
    AVCodecContext *avctx;
2297

    
2298
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2299
        return;
2300
    avctx = ic->streams[stream_index]->codec;
2301

    
2302
    switch(avctx->codec_type) {
2303
    case AVMEDIA_TYPE_AUDIO:
2304
        packet_queue_abort(&is->audioq);
2305

    
2306
        SDL_CloseAudio();
2307

    
2308
        packet_queue_end(&is->audioq);
2309
        if (is->reformat_ctx)
2310
            av_audio_convert_free(is->reformat_ctx);
2311
        is->reformat_ctx = NULL;
2312
        break;
2313
    case AVMEDIA_TYPE_VIDEO:
2314
        packet_queue_abort(&is->videoq);
2315

    
2316
        /* note: we also signal this mutex to make sure we deblock the
2317
           video thread in all cases */
2318
        SDL_LockMutex(is->pictq_mutex);
2319
        SDL_CondSignal(is->pictq_cond);
2320
        SDL_UnlockMutex(is->pictq_mutex);
2321

    
2322
        SDL_WaitThread(is->video_tid, NULL);
2323

    
2324
        packet_queue_end(&is->videoq);
2325
        break;
2326
    case AVMEDIA_TYPE_SUBTITLE:
2327
        packet_queue_abort(&is->subtitleq);
2328

    
2329
        /* note: we also signal this mutex to make sure we deblock the
2330
           video thread in all cases */
2331
        SDL_LockMutex(is->subpq_mutex);
2332
        is->subtitle_stream_changed = 1;
2333

    
2334
        SDL_CondSignal(is->subpq_cond);
2335
        SDL_UnlockMutex(is->subpq_mutex);
2336

    
2337
        SDL_WaitThread(is->subtitle_tid, NULL);
2338

    
2339
        packet_queue_end(&is->subtitleq);
2340
        break;
2341
    default:
2342
        break;
2343
    }
2344

    
2345
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2346
    avcodec_close(avctx);
2347
    switch(avctx->codec_type) {
2348
    case AVMEDIA_TYPE_AUDIO:
2349
        is->audio_st = NULL;
2350
        is->audio_stream = -1;
2351
        break;
2352
    case AVMEDIA_TYPE_VIDEO:
2353
        is->video_st = NULL;
2354
        is->video_stream = -1;
2355
        break;
2356
    case AVMEDIA_TYPE_SUBTITLE:
2357
        is->subtitle_st = NULL;
2358
        is->subtitle_stream = -1;
2359
        break;
2360
    default:
2361
        break;
2362
    }
2363
}
2364

    
2365
/* since we have only one decoding thread, we can use a global
2366
   variable instead of a thread local variable */
2367
static VideoState *global_video_state;
2368

    
2369
static int decode_interrupt_cb(void)
2370
{
2371
    return (global_video_state && global_video_state->abort_request);
2372
}
2373

    
2374
/* this thread gets the stream from the disk or the network */
2375
static int decode_thread(void *arg)
2376
{
2377
    VideoState *is = arg;
2378
    AVFormatContext *ic;
2379
    int err, i, ret;
2380
    int st_index[AVMEDIA_TYPE_NB];
2381
    int st_count[AVMEDIA_TYPE_NB]={0};
2382
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2383
    AVPacket pkt1, *pkt = &pkt1;
2384
    AVFormatParameters params, *ap = &params;
2385
    int eof=0;
2386
    int pkt_in_play_range = 0;
2387

    
2388
    ic = avformat_alloc_context();
2389

    
2390
    memset(st_index, -1, sizeof(st_index));
2391
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2392
    is->video_stream = -1;
2393
    is->audio_stream = -1;
2394
    is->subtitle_stream = -1;
2395

    
2396
    global_video_state = is;
2397
    url_set_interrupt_cb(decode_interrupt_cb);
2398

    
2399
    memset(ap, 0, sizeof(*ap));
2400

    
2401
    ap->prealloced_context = 1;
2402
    ap->width = frame_width;
2403
    ap->height= frame_height;
2404
    ap->time_base= (AVRational){1, 25};
2405
    ap->pix_fmt = frame_pix_fmt;
2406

    
2407
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2408

    
2409
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2410
    if (err < 0) {
2411
        print_error(is->filename, err);
2412
        ret = -1;
2413
        goto fail;
2414
    }
2415
    is->ic = ic;
2416

    
2417
    if(genpts)
2418
        ic->flags |= AVFMT_FLAG_GENPTS;
2419

    
2420
    err = av_find_stream_info(ic);
2421
    if (err < 0) {
2422
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2423
        ret = -1;
2424
        goto fail;
2425
    }
2426
    if(ic->pb)
2427
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2428

    
2429
    if(seek_by_bytes<0)
2430
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2431

    
2432
    /* if seeking requested, we execute it */
2433
    if (start_time != AV_NOPTS_VALUE) {
2434
        int64_t timestamp;
2435

    
2436
        timestamp = start_time;
2437
        /* add the stream start time */
2438
        if (ic->start_time != AV_NOPTS_VALUE)
2439
            timestamp += ic->start_time;
2440
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2441
        if (ret < 0) {
2442
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2443
                    is->filename, (double)timestamp / AV_TIME_BASE);
2444
        }
2445
    }
2446

    
2447
    for(i = 0; i < ic->nb_streams; i++) {
2448
        AVStream *st= ic->streams[i];
2449
        AVCodecContext *avctx = st->codec;
2450
        ic->streams[i]->discard = AVDISCARD_ALL;
2451
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2452
            continue;
2453
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2454
            continue;
2455

    
2456
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2457
            continue;
2458
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2459

    
2460
        switch(avctx->codec_type) {
2461
        case AVMEDIA_TYPE_AUDIO:
2462
            if (!audio_disable)
2463
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2464
            break;
2465
        case AVMEDIA_TYPE_VIDEO:
2466
        case AVMEDIA_TYPE_SUBTITLE:
2467
            if (!video_disable)
2468
                st_index[avctx->codec_type] = i;
2469
            break;
2470
        default:
2471
            break;
2472
        }
2473
    }
2474
    if (show_status) {
2475
        dump_format(ic, 0, is->filename, 0);
2476
    }
2477

    
2478
    /* open the streams */
2479
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2480
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2481
    }
2482

    
2483
    ret=-1;
2484
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2485
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2486
    }
2487
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2488
    if(ret<0) {
2489
        if (!display_disable)
2490
            is->show_audio = 2;
2491
    }
2492

    
2493
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2494
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2495
    }
2496

    
2497
    if (is->video_stream < 0 && is->audio_stream < 0) {
2498
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2499
        ret = -1;
2500
        goto fail;
2501
    }
2502

    
2503
    for(;;) {
2504
        if (is->abort_request)
2505
            break;
2506
        if (is->paused != is->last_paused) {
2507
            is->last_paused = is->paused;
2508
            if (is->paused)
2509
                is->read_pause_return= av_read_pause(ic);
2510
            else
2511
                av_read_play(ic);
2512
        }
2513
#if CONFIG_RTSP_DEMUXER
2514
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2515
            /* wait 10 ms to avoid trying to get another packet */
2516
            /* XXX: horrible */
2517
            SDL_Delay(10);
2518
            continue;
2519
        }
2520
#endif
2521
        if (is->seek_req) {
2522
            int64_t seek_target= is->seek_pos;
2523
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2524
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2525
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2526
//      of the seek_pos/seek_rel variables
2527

    
2528
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2529
            if (ret < 0) {
2530
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2531
            }else{
2532
                if (is->audio_stream >= 0) {
2533
                    packet_queue_flush(&is->audioq);
2534
                    packet_queue_put(&is->audioq, &flush_pkt);
2535
                }
2536
                if (is->subtitle_stream >= 0) {
2537
                    packet_queue_flush(&is->subtitleq);
2538
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2539
                }
2540
                if (is->video_stream >= 0) {
2541
                    packet_queue_flush(&is->videoq);
2542
                    packet_queue_put(&is->videoq, &flush_pkt);
2543
                }
2544
            }
2545
            is->seek_req = 0;
2546
            eof= 0;
2547
        }
2548

    
2549
        /* if the queue are full, no need to read more */
2550
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2551
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2552
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2553
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2554
            /* wait 10 ms */
2555
            SDL_Delay(10);
2556
            continue;
2557
        }
2558
        if(url_feof(ic->pb) || eof) {
2559
            if(is->video_stream >= 0){
2560
                av_init_packet(pkt);
2561
                pkt->data=NULL;
2562
                pkt->size=0;
2563
                pkt->stream_index= is->video_stream;
2564
                packet_queue_put(&is->videoq, pkt);
2565
            }
2566
            SDL_Delay(10);
2567
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2568
                if(loop!=1 && (!loop || --loop)){
2569
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2570
                }else if(autoexit){
2571
                    ret=AVERROR_EOF;
2572
                    goto fail;
2573
                }
2574
            }
2575
            continue;
2576
        }
2577
        ret = av_read_frame(ic, pkt);
2578
        if (ret < 0) {
2579
            if (ret == AVERROR_EOF)
2580
                eof=1;
2581
            if (url_ferror(ic->pb))
2582
                break;
2583
            SDL_Delay(100); /* wait for user event */
2584
            continue;
2585
        }
2586
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2587
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2588
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2589
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2590
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2591
                <= ((double)duration/1000000);
2592
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2593
            packet_queue_put(&is->audioq, pkt);
2594
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2595
            packet_queue_put(&is->videoq, pkt);
2596
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2597
            packet_queue_put(&is->subtitleq, pkt);
2598
        } else {
2599
            av_free_packet(pkt);
2600
        }
2601
    }
2602
    /* wait until the end */
2603
    while (!is->abort_request) {
2604
        SDL_Delay(100);
2605
    }
2606

    
2607
    ret = 0;
2608
 fail:
2609
    /* disable interrupting */
2610
    global_video_state = NULL;
2611

    
2612
    /* close each stream */
2613
    if (is->audio_stream >= 0)
2614
        stream_component_close(is, is->audio_stream);
2615
    if (is->video_stream >= 0)
2616
        stream_component_close(is, is->video_stream);
2617
    if (is->subtitle_stream >= 0)
2618
        stream_component_close(is, is->subtitle_stream);
2619
    if (is->ic) {
2620
        av_close_input_file(is->ic);
2621
        is->ic = NULL; /* safety */
2622
    }
2623
    url_set_interrupt_cb(NULL);
2624

    
2625
    if (ret != 0) {
2626
        SDL_Event event;
2627

    
2628
        event.type = FF_QUIT_EVENT;
2629
        event.user.data1 = is;
2630
        SDL_PushEvent(&event);
2631
    }
2632
    return 0;
2633
}
2634

    
2635
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2636
{
2637
    VideoState *is;
2638

    
2639
    is = av_mallocz(sizeof(VideoState));
2640
    if (!is)
2641
        return NULL;
2642
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2643
    is->iformat = iformat;
2644
    is->ytop = 0;
2645
    is->xleft = 0;
2646

    
2647
    /* start video display */
2648
    is->pictq_mutex = SDL_CreateMutex();
2649
    is->pictq_cond = SDL_CreateCond();
2650

    
2651
    is->subpq_mutex = SDL_CreateMutex();
2652
    is->subpq_cond = SDL_CreateCond();
2653

    
2654
    is->av_sync_type = av_sync_type;
2655
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2656
    if (!is->parse_tid) {
2657
        av_free(is);
2658
        return NULL;
2659
    }
2660
    return is;
2661
}
2662

    
2663
static void stream_close(VideoState *is)
2664
{
2665
    VideoPicture *vp;
2666
    int i;
2667
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2668
    is->abort_request = 1;
2669
    SDL_WaitThread(is->parse_tid, NULL);
2670
    SDL_WaitThread(is->refresh_tid, NULL);
2671

    
2672
    /* free all pictures */
2673
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2674
        vp = &is->pictq[i];
2675
#if CONFIG_AVFILTER
2676
        if (vp->picref) {
2677
            avfilter_unref_pic(vp->picref);
2678
            vp->picref = NULL;
2679
        }
2680
#endif
2681
        if (vp->bmp) {
2682
            SDL_FreeYUVOverlay(vp->bmp);
2683
            vp->bmp = NULL;
2684
        }
2685
    }
2686
    SDL_DestroyMutex(is->pictq_mutex);
2687
    SDL_DestroyCond(is->pictq_cond);
2688
    SDL_DestroyMutex(is->subpq_mutex);
2689
    SDL_DestroyCond(is->subpq_cond);
2690
#if !CONFIG_AVFILTER
2691
    if (is->img_convert_ctx)
2692
        sws_freeContext(is->img_convert_ctx);
2693
#endif
2694
    av_free(is);
2695
}
2696

    
2697
static void stream_cycle_channel(VideoState *is, int codec_type)
2698
{
2699
    AVFormatContext *ic = is->ic;
2700
    int start_index, stream_index;
2701
    AVStream *st;
2702

    
2703
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2704
        start_index = is->video_stream;
2705
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2706
        start_index = is->audio_stream;
2707
    else
2708
        start_index = is->subtitle_stream;
2709
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2710
        return;
2711
    stream_index = start_index;
2712
    for(;;) {
2713
        if (++stream_index >= is->ic->nb_streams)
2714
        {
2715
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2716
            {
2717
                stream_index = -1;
2718
                goto the_end;
2719
            } else
2720
                stream_index = 0;
2721
        }
2722
        if (stream_index == start_index)
2723
            return;
2724
        st = ic->streams[stream_index];
2725
        if (st->codec->codec_type == codec_type) {
2726
            /* check that parameters are OK */
2727
            switch(codec_type) {
2728
            case AVMEDIA_TYPE_AUDIO:
2729
                if (st->codec->sample_rate != 0 &&
2730
                    st->codec->channels != 0)
2731
                    goto the_end;
2732
                break;
2733
            case AVMEDIA_TYPE_VIDEO:
2734
            case AVMEDIA_TYPE_SUBTITLE:
2735
                goto the_end;
2736
            default:
2737
                break;
2738
            }
2739
        }
2740
    }
2741
 the_end:
2742
    stream_component_close(is, start_index);
2743
    stream_component_open(is, stream_index);
2744
}
2745

    
2746

    
2747
static void toggle_full_screen(void)
2748
{
2749
    is_full_screen = !is_full_screen;
2750
    if (!fs_screen_width) {
2751
        /* use default SDL method */
2752
//        SDL_WM_ToggleFullScreen(screen);
2753
    }
2754
    video_open(cur_stream);
2755
}
2756

    
2757
static void toggle_pause(void)
2758
{
2759
    if (cur_stream)
2760
        stream_pause(cur_stream);
2761
    step = 0;
2762
}
2763

    
2764
static void step_to_next_frame(void)
2765
{
2766
    if (cur_stream) {
2767
        /* if the stream is paused unpause it, then step */
2768
        if (cur_stream->paused)
2769
            stream_pause(cur_stream);
2770
    }
2771
    step = 1;
2772
}
2773

    
2774
static void do_exit(void)
2775
{
2776
    int i;
2777
    if (cur_stream) {
2778
        stream_close(cur_stream);
2779
        cur_stream = NULL;
2780
    }
2781
    for (i = 0; i < AVMEDIA_TYPE_NB; i++)
2782
        av_free(avcodec_opts[i]);
2783
    av_free(avformat_opts);
2784
    av_free(sws_opts);
2785
#if CONFIG_AVFILTER
2786
    avfilter_uninit();
2787
#endif
2788
    if (show_status)
2789
        printf("\n");
2790
    SDL_Quit();
2791
    exit(0);
2792
}
2793

    
2794
static void toggle_audio_display(void)
2795
{
2796
    if (cur_stream) {
2797
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2798
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2799
        fill_rectangle(screen,
2800
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2801
                    bgcolor);
2802
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2803
    }
2804
}
2805

    
2806
/* handle an event sent by the GUI */
2807
static void event_loop(void)
2808
{
2809
    SDL_Event event;
2810
    double incr, pos, frac;
2811

    
2812
    for(;;) {
2813
        double x;
2814
        SDL_WaitEvent(&event);
2815
        switch(event.type) {
2816
        case SDL_KEYDOWN:
2817
            switch(event.key.keysym.sym) {
2818
            case SDLK_ESCAPE:
2819
            case SDLK_q:
2820
                do_exit();
2821
                break;
2822
            case SDLK_f:
2823
                toggle_full_screen();
2824
                break;
2825
            case SDLK_p:
2826
            case SDLK_SPACE:
2827
                toggle_pause();
2828
                break;
2829
            case SDLK_s: //S: Step to next frame
2830
                step_to_next_frame();
2831
                break;
2832
            case SDLK_a:
2833
                if (cur_stream)
2834
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2835
                break;
2836
            case SDLK_v:
2837
                if (cur_stream)
2838
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2839
                break;
2840
            case SDLK_t:
2841
                if (cur_stream)
2842
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2843
                break;
2844
            case SDLK_w:
2845
                toggle_audio_display();
2846
                break;
2847
            case SDLK_LEFT:
2848
                incr = -10.0;
2849
                goto do_seek;
2850
            case SDLK_RIGHT:
2851
                incr = 10.0;
2852
                goto do_seek;
2853
            case SDLK_UP:
2854
                incr = 60.0;
2855
                goto do_seek;
2856
            case SDLK_DOWN:
2857
                incr = -60.0;
2858
            do_seek:
2859
                if (cur_stream) {
2860
                    if (seek_by_bytes) {
2861
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2862
                            pos= cur_stream->video_current_pos;
2863
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2864
                            pos= cur_stream->audio_pkt.pos;
2865
                        }else
2866
                            pos = url_ftell(cur_stream->ic->pb);
2867
                        if (cur_stream->ic->bit_rate)
2868
                            incr *= cur_stream->ic->bit_rate / 8.0;
2869
                        else
2870
                            incr *= 180000.0;
2871
                        pos += incr;
2872
                        stream_seek(cur_stream, pos, incr, 1);
2873
                    } else {
2874
                        pos = get_master_clock(cur_stream);
2875
                        pos += incr;
2876
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2877
                    }
2878
                }
2879
                break;
2880
            default:
2881
                break;
2882
            }
2883
            break;
2884
        case SDL_MOUSEBUTTONDOWN:
2885
        case SDL_MOUSEMOTION:
2886
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2887
                x= event.button.x;
2888
            }else{
2889
                if(event.motion.state != SDL_PRESSED)
2890
                    break;
2891
                x= event.motion.x;
2892
            }
2893
            if (cur_stream) {
2894
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2895
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2896
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2897
                }else{
2898
                    int64_t ts;
2899
                    int ns, hh, mm, ss;
2900
                    int tns, thh, tmm, tss;
2901
                    tns = cur_stream->ic->duration/1000000LL;
2902
                    thh = tns/3600;
2903
                    tmm = (tns%3600)/60;
2904
                    tss = (tns%60);
2905
                    frac = x/cur_stream->width;
2906
                    ns = frac*tns;
2907
                    hh = ns/3600;
2908
                    mm = (ns%3600)/60;
2909
                    ss = (ns%60);
2910
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2911
                            hh, mm, ss, thh, tmm, tss);
2912
                    ts = frac*cur_stream->ic->duration;
2913
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2914
                        ts += cur_stream->ic->start_time;
2915
                    stream_seek(cur_stream, ts, 0, 0);
2916
                }
2917
            }
2918
            break;
2919
        case SDL_VIDEORESIZE:
2920
            if (cur_stream) {
2921
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2922
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2923
                screen_width = cur_stream->width = event.resize.w;
2924
                screen_height= cur_stream->height= event.resize.h;
2925
            }
2926
            break;
2927
        case SDL_QUIT:
2928
        case FF_QUIT_EVENT:
2929
            do_exit();
2930
            break;
2931
        case FF_ALLOC_EVENT:
2932
            video_open(event.user.data1);
2933
            alloc_picture(event.user.data1);
2934
            break;
2935
        case FF_REFRESH_EVENT:
2936
            video_refresh_timer(event.user.data1);
2937
            cur_stream->refresh=0;
2938
            break;
2939
        default:
2940
            break;
2941
        }
2942
    }
2943
}
2944

    
2945
static void opt_frame_size(const char *arg)
2946
{
2947
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2948
        fprintf(stderr, "Incorrect frame size\n");
2949
        exit(1);
2950
    }
2951
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2952
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2953
        exit(1);
2954
    }
2955
}
2956

    
2957
static int opt_width(const char *opt, const char *arg)
2958
{
2959
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2960
    return 0;
2961
}
2962

    
2963
static int opt_height(const char *opt, const char *arg)
2964
{
2965
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2966
    return 0;
2967
}
2968

    
2969
static void opt_format(const char *arg)
2970
{
2971
    file_iformat = av_find_input_format(arg);
2972
    if (!file_iformat) {
2973
        fprintf(stderr, "Unknown input format: %s\n", arg);
2974
        exit(1);
2975
    }
2976
}
2977

    
2978
static void opt_frame_pix_fmt(const char *arg)
2979
{
2980
    frame_pix_fmt = av_get_pix_fmt(arg);
2981
}
2982

    
2983
static int opt_sync(const char *opt, const char *arg)
2984
{
2985
    if (!strcmp(arg, "audio"))
2986
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2987
    else if (!strcmp(arg, "video"))
2988
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2989
    else if (!strcmp(arg, "ext"))
2990
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2991
    else {
2992
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2993
        exit(1);
2994
    }
2995
    return 0;
2996
}
2997

    
2998
static int opt_seek(const char *opt, const char *arg)
2999
{
3000
    start_time = parse_time_or_die(opt, arg, 1);
3001
    return 0;
3002
}
3003

    
3004
static int opt_duration(const char *opt, const char *arg)
3005
{
3006
    duration = parse_time_or_die(opt, arg, 1);
3007
    return 0;
3008
}
3009

    
3010
static int opt_debug(const char *opt, const char *arg)
3011
{
3012
    av_log_set_level(99);
3013
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3014
    return 0;
3015
}
3016

    
3017
static int opt_vismv(const char *opt, const char *arg)
3018
{
3019
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
3020
    return 0;
3021
}
3022

    
3023
static int opt_thread_count(const char *opt, const char *arg)
3024
{
3025
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3026
#if !HAVE_THREADS
3027
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3028
#endif
3029
    return 0;
3030
}
3031

    
3032
static const OptionDef options[] = {
3033
#include "cmdutils_common_opts.h"
3034
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3035
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3036
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3037
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3038
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3039
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3040
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3041
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3042
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3043
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3044
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3045
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3046
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3047
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3048
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3049
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3050
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3051
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3052
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3053
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3054
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3055
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3056
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3057
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3058
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3059
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3060
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3061
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3062
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3063
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3064
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3065
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3066
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3067
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3068
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3069
#if CONFIG_AVFILTER
3070
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3071
#endif
3072
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3073
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3074
    { NULL, },
3075
};
3076

    
3077
static void show_usage(void)
3078
{
3079
    printf("Simple media player\n");
3080
    printf("usage: ffplay [options] input_file\n");
3081
    printf("\n");
3082
}
3083

    
3084
static void show_help(void)
3085
{
3086
    show_usage();
3087
    show_help_options(options, "Main options:\n",
3088
                      OPT_EXPERT, 0);
3089
    show_help_options(options, "\nAdvanced options:\n",
3090
                      OPT_EXPERT, OPT_EXPERT);
3091
    printf("\nWhile playing:\n"
3092
           "q, ESC              quit\n"
3093
           "f                   toggle full screen\n"
3094
           "p, SPC              pause\n"
3095
           "a                   cycle audio channel\n"
3096
           "v                   cycle video channel\n"
3097
           "t                   cycle subtitle channel\n"
3098
           "w                   show audio waves\n"
3099
           "s                   activate frame-step mode\n"
3100
           "left/right          seek backward/forward 10 seconds\n"
3101
           "down/up             seek backward/forward 1 minute\n"
3102
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3103
           );
3104
}
3105

    
3106
static void opt_input_file(const char *filename)
3107
{
3108
    if (input_filename) {
3109
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3110
                filename, input_filename);
3111
        exit(1);
3112
    }
3113
    if (!strcmp(filename, "-"))
3114
        filename = "pipe:";
3115
    input_filename = filename;
3116
}
3117

    
3118
/* Called from the main */
3119
int main(int argc, char **argv)
3120
{
3121
    int flags, i;
3122

    
3123
    /* register all codecs, demux and protocols */
3124
    avcodec_register_all();
3125
#if CONFIG_AVDEVICE
3126
    avdevice_register_all();
3127
#endif
3128
#if CONFIG_AVFILTER
3129
    avfilter_register_all();
3130
#endif
3131
    av_register_all();
3132

    
3133
    for(i=0; i<AVMEDIA_TYPE_NB; i++){
3134
        avcodec_opts[i]= avcodec_alloc_context2(i);
3135
    }
3136
    avformat_opts = avformat_alloc_context();
3137
#if !CONFIG_AVFILTER
3138
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3139
#endif
3140

    
3141
    show_banner();
3142

    
3143
    parse_options(argc, argv, options, opt_input_file);
3144

    
3145
    if (!input_filename) {
3146
        show_usage();
3147
        fprintf(stderr, "An input file must be specified\n");
3148
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3149
        exit(1);
3150
    }
3151

    
3152
    if (display_disable) {
3153
        video_disable = 1;
3154
    }
3155
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3156
#if !defined(__MINGW32__) && !defined(__APPLE__)
3157
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3158
#endif
3159
    if (SDL_Init (flags)) {
3160
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3161
        exit(1);
3162
    }
3163

    
3164
    if (!display_disable) {
3165
#if HAVE_SDL_VIDEO_SIZE
3166
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3167
        fs_screen_width = vi->current_w;
3168
        fs_screen_height = vi->current_h;
3169
#endif
3170
    }
3171

    
3172
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3173
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3174
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3175

    
3176
    av_init_packet(&flush_pkt);
3177
    flush_pkt.data= "FLUSH";
3178

    
3179
    cur_stream = stream_open(input_filename, file_iformat);
3180

    
3181
    event_loop();
3182

    
3183
    /* never returns */
3184

    
3185
    return 0;
3186
}