Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 112c4b87

History | View | Annotate | Download (99 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavcore/samplefmt.h"
34
#include "libavformat/avformat.h"
35
#include "libavdevice/avdevice.h"
36
#include "libswscale/swscale.h"
37
#include "libavcodec/audioconvert.h"
38
#include "libavcodec/opt.h"
39
#include "libavcodec/avfft.h"
40

    
41
#if CONFIG_AVFILTER
42
# include "libavfilter/avfilter.h"
43
# include "libavfilter/avfiltergraph.h"
44
#endif
45

    
46
#include "cmdutils.h"
47

    
48
#include <SDL.h>
49
#include <SDL_thread.h>
50

    
51
#ifdef __MINGW32__
52
#undef main /* We don't want SDL to override our main() */
53
#endif
54

    
55
#include <unistd.h>
56
#include <assert.h>
57

    
58
const char program_name[] = "FFplay";
59
const int program_birth_year = 2003;
60

    
61
//#define DEBUG_SYNC
62

    
63
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
64
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
65
#define MIN_FRAMES 5
66

    
67
/* SDL audio buffer size, in samples. Should be small to have precise
68
   A/V sync as SDL does not have hardware buffer fullness info. */
69
#define SDL_AUDIO_BUFFER_SIZE 1024
70

    
71
/* no AV sync correction is done if below the AV sync threshold */
72
#define AV_SYNC_THRESHOLD 0.01
73
/* no AV correction is done if too big error */
74
#define AV_NOSYNC_THRESHOLD 10.0
75

    
76
#define FRAME_SKIP_FACTOR 0.05
77

    
78
/* maximum audio speed change to get correct sync */
79
#define SAMPLE_CORRECTION_PERCENT_MAX 10
80

    
81
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
82
#define AUDIO_DIFF_AVG_NB   20
83

    
84
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
85
#define SAMPLE_ARRAY_SIZE (2*65536)
86

    
87
static int sws_flags = SWS_BICUBIC;
88

    
89
typedef struct PacketQueue {
90
    AVPacketList *first_pkt, *last_pkt;
91
    int nb_packets;
92
    int size;
93
    int abort_request;
94
    SDL_mutex *mutex;
95
    SDL_cond *cond;
96
} PacketQueue;
97

    
98
#define VIDEO_PICTURE_QUEUE_SIZE 2
99
#define SUBPICTURE_QUEUE_SIZE 4
100

    
101
typedef struct VideoPicture {
102
    double pts;                                  ///<presentation time stamp for this picture
103
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
104
    int64_t pos;                                 ///<byte position in file
105
    SDL_Overlay *bmp;
106
    int width, height; /* source height & width */
107
    int allocated;
108
    enum PixelFormat pix_fmt;
109

    
110
#if CONFIG_AVFILTER
111
    AVFilterBufferRef *picref;
112
#endif
113
} VideoPicture;
114

    
115
typedef struct SubPicture {
116
    double pts; /* presentation time stamp for this picture */
117
    AVSubtitle sub;
118
} SubPicture;
119

    
120
enum {
121
    AV_SYNC_AUDIO_MASTER, /* default choice */
122
    AV_SYNC_VIDEO_MASTER,
123
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
124
};
125

    
126
typedef struct VideoState {
127
    SDL_Thread *parse_tid;
128
    SDL_Thread *video_tid;
129
    SDL_Thread *refresh_tid;
130
    AVInputFormat *iformat;
131
    int no_background;
132
    int abort_request;
133
    int paused;
134
    int last_paused;
135
    int seek_req;
136
    int seek_flags;
137
    int64_t seek_pos;
138
    int64_t seek_rel;
139
    int read_pause_return;
140
    AVFormatContext *ic;
141
    int dtg_active_format;
142

    
143
    int audio_stream;
144

    
145
    int av_sync_type;
146
    double external_clock; /* external clock base */
147
    int64_t external_clock_time;
148

    
149
    double audio_clock;
150
    double audio_diff_cum; /* used for AV difference average computation */
151
    double audio_diff_avg_coef;
152
    double audio_diff_threshold;
153
    int audio_diff_avg_count;
154
    AVStream *audio_st;
155
    PacketQueue audioq;
156
    int audio_hw_buf_size;
157
    /* samples output by the codec. we reserve more space for avsync
158
       compensation */
159
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    uint8_t *audio_buf;
162
    unsigned int audio_buf_size; /* in bytes */
163
    int audio_buf_index; /* in bytes */
164
    AVPacket audio_pkt_temp;
165
    AVPacket audio_pkt;
166
    enum AVSampleFormat audio_src_fmt;
167
    AVAudioConvert *reformat_ctx;
168

    
169
    int show_audio; /* if true, display audio samples */
170
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
171
    int sample_array_index;
172
    int last_i_start;
173
    RDFTContext *rdft;
174
    int rdft_bits;
175
    FFTSample *rdft_data;
176
    int xpos;
177

    
178
    SDL_Thread *subtitle_tid;
179
    int subtitle_stream;
180
    int subtitle_stream_changed;
181
    AVStream *subtitle_st;
182
    PacketQueue subtitleq;
183
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
184
    int subpq_size, subpq_rindex, subpq_windex;
185
    SDL_mutex *subpq_mutex;
186
    SDL_cond *subpq_cond;
187

    
188
    double frame_timer;
189
    double frame_last_pts;
190
    double frame_last_delay;
191
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
192
    int video_stream;
193
    AVStream *video_st;
194
    PacketQueue videoq;
195
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
196
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
197
    int64_t video_current_pos;                   ///<current displayed file pos
198
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
199
    int pictq_size, pictq_rindex, pictq_windex;
200
    SDL_mutex *pictq_mutex;
201
    SDL_cond *pictq_cond;
202
#if !CONFIG_AVFILTER
203
    struct SwsContext *img_convert_ctx;
204
#endif
205

    
206
    //    QETimer *video_timer;
207
    char filename[1024];
208
    int width, height, xleft, ytop;
209

    
210
    PtsCorrectionContext pts_ctx;
211

    
212
#if CONFIG_AVFILTER
213
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214
#endif
215

    
216
    float skip_frames;
217
    float skip_frames_index;
218
    int refresh;
219
} VideoState;
220

    
221
static void show_help(void);
222
static int audio_write_get_buf_size(VideoState *is);
223

    
224
/* options specified by the user */
225
static AVInputFormat *file_iformat;
226
static const char *input_filename;
227
static const char *window_title;
228
static int fs_screen_width;
229
static int fs_screen_height;
230
static int screen_width = 0;
231
static int screen_height = 0;
232
static int frame_width = 0;
233
static int frame_height = 0;
234
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235
static int audio_disable;
236
static int video_disable;
237
static int wanted_stream[AVMEDIA_TYPE_NB]={
238
    [AVMEDIA_TYPE_AUDIO]=-1,
239
    [AVMEDIA_TYPE_VIDEO]=-1,
240
    [AVMEDIA_TYPE_SUBTITLE]=-1,
241
};
242
static int seek_by_bytes=-1;
243
static int display_disable;
244
static int show_status = 1;
245
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246
static int64_t start_time = AV_NOPTS_VALUE;
247
static int64_t duration = AV_NOPTS_VALUE;
248
static int debug = 0;
249
static int debug_mv = 0;
250
static int step = 0;
251
static int thread_count = 1;
252
static int workaround_bugs = 1;
253
static int fast = 0;
254
static int genpts = 0;
255
static int lowres = 0;
256
static int idct = FF_IDCT_AUTO;
257
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
260
static int error_recognition = FF_ER_CAREFUL;
261
static int error_concealment = 3;
262
static int decoder_reorder_pts= -1;
263
static int autoexit;
264
static int exit_on_keydown;
265
static int exit_on_mousedown;
266
static int loop=1;
267
static int framedrop=1;
268

    
269
static int rdftspeed=20;
270
#if CONFIG_AVFILTER
271
static char *vfilters = NULL;
272
#endif
273

    
274
/* current context */
275
static int is_full_screen;
276
static VideoState *cur_stream;
277
static int64_t audio_callback_time;
278

    
279
static AVPacket flush_pkt;
280

    
281
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
282
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
283
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
284

    
285
static SDL_Surface *screen;
286

    
287
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
288

    
289
/* packet queue handling */
290
static void packet_queue_init(PacketQueue *q)
291
{
292
    memset(q, 0, sizeof(PacketQueue));
293
    q->mutex = SDL_CreateMutex();
294
    q->cond = SDL_CreateCond();
295
    packet_queue_put(q, &flush_pkt);
296
}
297

    
298
static void packet_queue_flush(PacketQueue *q)
299
{
300
    AVPacketList *pkt, *pkt1;
301

    
302
    SDL_LockMutex(q->mutex);
303
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
304
        pkt1 = pkt->next;
305
        av_free_packet(&pkt->pkt);
306
        av_freep(&pkt);
307
    }
308
    q->last_pkt = NULL;
309
    q->first_pkt = NULL;
310
    q->nb_packets = 0;
311
    q->size = 0;
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
static void packet_queue_end(PacketQueue *q)
316
{
317
    packet_queue_flush(q);
318
    SDL_DestroyMutex(q->mutex);
319
    SDL_DestroyCond(q->cond);
320
}
321

    
322
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
323
{
324
    AVPacketList *pkt1;
325

    
326
    /* duplicate the packet */
327
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
328
        return -1;
329

    
330
    pkt1 = av_malloc(sizeof(AVPacketList));
331
    if (!pkt1)
332
        return -1;
333
    pkt1->pkt = *pkt;
334
    pkt1->next = NULL;
335

    
336

    
337
    SDL_LockMutex(q->mutex);
338

    
339
    if (!q->last_pkt)
340

    
341
        q->first_pkt = pkt1;
342
    else
343
        q->last_pkt->next = pkt1;
344
    q->last_pkt = pkt1;
345
    q->nb_packets++;
346
    q->size += pkt1->pkt.size + sizeof(*pkt1);
347
    /* XXX: should duplicate packet data in DV case */
348
    SDL_CondSignal(q->cond);
349

    
350
    SDL_UnlockMutex(q->mutex);
351
    return 0;
352
}
353

    
354
static void packet_queue_abort(PacketQueue *q)
355
{
356
    SDL_LockMutex(q->mutex);
357

    
358
    q->abort_request = 1;
359

    
360
    SDL_CondSignal(q->cond);
361

    
362
    SDL_UnlockMutex(q->mutex);
363
}
364

    
365
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
366
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
367
{
368
    AVPacketList *pkt1;
369
    int ret;
370

    
371
    SDL_LockMutex(q->mutex);
372

    
373
    for(;;) {
374
        if (q->abort_request) {
375
            ret = -1;
376
            break;
377
        }
378

    
379
        pkt1 = q->first_pkt;
380
        if (pkt1) {
381
            q->first_pkt = pkt1->next;
382
            if (!q->first_pkt)
383
                q->last_pkt = NULL;
384
            q->nb_packets--;
385
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
386
            *pkt = pkt1->pkt;
387
            av_free(pkt1);
388
            ret = 1;
389
            break;
390
        } else if (!block) {
391
            ret = 0;
392
            break;
393
        } else {
394
            SDL_CondWait(q->cond, q->mutex);
395
        }
396
    }
397
    SDL_UnlockMutex(q->mutex);
398
    return ret;
399
}
400

    
401
static inline void fill_rectangle(SDL_Surface *screen,
402
                                  int x, int y, int w, int h, int color)
403
{
404
    SDL_Rect rect;
405
    rect.x = x;
406
    rect.y = y;
407
    rect.w = w;
408
    rect.h = h;
409
    SDL_FillRect(screen, &rect, color);
410
}
411

    
412
#if 0
413
/* draw only the border of a rectangle */
414
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
415
{
416
    int w1, w2, h1, h2;
417

418
    /* fill the background */
419
    w1 = x;
420
    if (w1 < 0)
421
        w1 = 0;
422
    w2 = s->width - (x + w);
423
    if (w2 < 0)
424
        w2 = 0;
425
    h1 = y;
426
    if (h1 < 0)
427
        h1 = 0;
428
    h2 = s->height - (y + h);
429
    if (h2 < 0)
430
        h2 = 0;
431
    fill_rectangle(screen,
432
                   s->xleft, s->ytop,
433
                   w1, s->height,
434
                   color);
435
    fill_rectangle(screen,
436
                   s->xleft + s->width - w2, s->ytop,
437
                   w2, s->height,
438
                   color);
439
    fill_rectangle(screen,
440
                   s->xleft + w1, s->ytop,
441
                   s->width - w1 - w2, h1,
442
                   color);
443
    fill_rectangle(screen,
444
                   s->xleft + w1, s->ytop + s->height - h2,
445
                   s->width - w1 - w2, h2,
446
                   color);
447
}
448
#endif
449

    
450
#define ALPHA_BLEND(a, oldp, newp, s)\
451
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
452

    
453
#define RGBA_IN(r, g, b, a, s)\
454
{\
455
    unsigned int v = ((const uint32_t *)(s))[0];\
456
    a = (v >> 24) & 0xff;\
457
    r = (v >> 16) & 0xff;\
458
    g = (v >> 8) & 0xff;\
459
    b = v & 0xff;\
460
}
461

    
462
#define YUVA_IN(y, u, v, a, s, pal)\
463
{\
464
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
465
    a = (val >> 24) & 0xff;\
466
    y = (val >> 16) & 0xff;\
467
    u = (val >> 8) & 0xff;\
468
    v = val & 0xff;\
469
}
470

    
471
#define YUVA_OUT(d, y, u, v, a)\
472
{\
473
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
474
}
475

    
476

    
477
#define BPP 1
478

    
479
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
480
{
481
    int wrap, wrap3, width2, skip2;
482
    int y, u, v, a, u1, v1, a1, w, h;
483
    uint8_t *lum, *cb, *cr;
484
    const uint8_t *p;
485
    const uint32_t *pal;
486
    int dstx, dsty, dstw, dsth;
487

    
488
    dstw = av_clip(rect->w, 0, imgw);
489
    dsth = av_clip(rect->h, 0, imgh);
490
    dstx = av_clip(rect->x, 0, imgw - dstw);
491
    dsty = av_clip(rect->y, 0, imgh - dsth);
492
    lum = dst->data[0] + dsty * dst->linesize[0];
493
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
494
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
495

    
496
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
497
    skip2 = dstx >> 1;
498
    wrap = dst->linesize[0];
499
    wrap3 = rect->pict.linesize[0];
500
    p = rect->pict.data[0];
501
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
502

    
503
    if (dsty & 1) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
512
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
513
            cb++;
514
            cr++;
515
            lum++;
516
            p += BPP;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += 2 * BPP;
535
            lum += 2;
536
        }
537
        if (w) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
541
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
542
            p++;
543
            lum++;
544
        }
545
        p += wrap3 - dstw * BPP;
546
        lum += wrap - dstw - dstx;
547
        cb += dst->linesize[1] - width2 - skip2;
548
        cr += dst->linesize[2] - width2 - skip2;
549
    }
550
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
551
        lum += dstx;
552
        cb += skip2;
553
        cr += skip2;
554

    
555
        if (dstx & 1) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
576
            YUVA_IN(y, u, v, a, p, pal);
577
            u1 = u;
578
            v1 = v;
579
            a1 = a;
580
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
581

    
582
            YUVA_IN(y, u, v, a, p + BPP, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
587
            p += wrap3;
588
            lum += wrap;
589

    
590
            YUVA_IN(y, u, v, a, p, pal);
591
            u1 += u;
592
            v1 += v;
593
            a1 += a;
594
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595

    
596
            YUVA_IN(y, u, v, a, p + BPP, pal);
597
            u1 += u;
598
            v1 += v;
599
            a1 += a;
600
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
601

    
602
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
603
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
604

    
605
            cb++;
606
            cr++;
607
            p += -wrap3 + 2 * BPP;
608
            lum += -wrap + 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            u1 = u;
613
            v1 = v;
614
            a1 = a;
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            p += wrap3;
617
            lum += wrap;
618
            YUVA_IN(y, u, v, a, p, pal);
619
            u1 += u;
620
            v1 += v;
621
            a1 += a;
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
624
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
625
            cb++;
626
            cr++;
627
            p += -wrap3 + BPP;
628
            lum += -wrap + 1;
629
        }
630
        p += wrap3 + (wrap3 - dstw * BPP);
631
        lum += wrap + (wrap - dstw - dstx);
632
        cb += dst->linesize[1] - width2 - skip2;
633
        cr += dst->linesize[2] - width2 - skip2;
634
    }
635
    /* handle odd height */
636
    if (h) {
637
        lum += dstx;
638
        cb += skip2;
639
        cr += skip2;
640

    
641
        if (dstx & 1) {
642
            YUVA_IN(y, u, v, a, p, pal);
643
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
644
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
645
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
646
            cb++;
647
            cr++;
648
            lum++;
649
            p += BPP;
650
        }
651
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
652
            YUVA_IN(y, u, v, a, p, pal);
653
            u1 = u;
654
            v1 = v;
655
            a1 = a;
656
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
657

    
658
            YUVA_IN(y, u, v, a, p + BPP, pal);
659
            u1 += u;
660
            v1 += v;
661
            a1 += a;
662
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
663
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
664
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
665
            cb++;
666
            cr++;
667
            p += 2 * BPP;
668
            lum += 2;
669
        }
670
        if (w) {
671
            YUVA_IN(y, u, v, a, p, pal);
672
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
673
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
674
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
675
        }
676
    }
677
}
678

    
679
static void free_subpicture(SubPicture *sp)
680
{
681
    avsubtitle_free(&sp->sub);
682
}
683

    
684
static void video_image_display(VideoState *is)
685
{
686
    VideoPicture *vp;
687
    SubPicture *sp;
688
    AVPicture pict;
689
    float aspect_ratio;
690
    int width, height, x, y;
691
    SDL_Rect rect;
692
    int i;
693

    
694
    vp = &is->pictq[is->pictq_rindex];
695
    if (vp->bmp) {
696
#if CONFIG_AVFILTER
697
         if (vp->picref->video->pixel_aspect.num == 0)
698
             aspect_ratio = 0;
699
         else
700
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
701
#else
702

    
703
        /* XXX: use variable in the frame */
704
        if (is->video_st->sample_aspect_ratio.num)
705
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
706
        else if (is->video_st->codec->sample_aspect_ratio.num)
707
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
708
        else
709
            aspect_ratio = 0;
710
#endif
711
        if (aspect_ratio <= 0.0)
712
            aspect_ratio = 1.0;
713
        aspect_ratio *= (float)vp->width / (float)vp->height;
714
        /* if an active format is indicated, then it overrides the
715
           mpeg format */
716
#if 0
717
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
718
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
719
            printf("dtg_active_format=%d\n", is->dtg_active_format);
720
        }
721
#endif
722
#if 0
723
        switch(is->video_st->codec->dtg_active_format) {
724
        case FF_DTG_AFD_SAME:
725
        default:
726
            /* nothing to do */
727
            break;
728
        case FF_DTG_AFD_4_3:
729
            aspect_ratio = 4.0 / 3.0;
730
            break;
731
        case FF_DTG_AFD_16_9:
732
            aspect_ratio = 16.0 / 9.0;
733
            break;
734
        case FF_DTG_AFD_14_9:
735
            aspect_ratio = 14.0 / 9.0;
736
            break;
737
        case FF_DTG_AFD_4_3_SP_14_9:
738
            aspect_ratio = 14.0 / 9.0;
739
            break;
740
        case FF_DTG_AFD_16_9_SP_14_9:
741
            aspect_ratio = 14.0 / 9.0;
742
            break;
743
        case FF_DTG_AFD_SP_4_3:
744
            aspect_ratio = 4.0 / 3.0;
745
            break;
746
        }
747
#endif
748

    
749
        if (is->subtitle_st)
750
        {
751
            if (is->subpq_size > 0)
752
            {
753
                sp = &is->subpq[is->subpq_rindex];
754

    
755
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
756
                {
757
                    SDL_LockYUVOverlay (vp->bmp);
758

    
759
                    pict.data[0] = vp->bmp->pixels[0];
760
                    pict.data[1] = vp->bmp->pixels[2];
761
                    pict.data[2] = vp->bmp->pixels[1];
762

    
763
                    pict.linesize[0] = vp->bmp->pitches[0];
764
                    pict.linesize[1] = vp->bmp->pitches[2];
765
                    pict.linesize[2] = vp->bmp->pitches[1];
766

    
767
                    for (i = 0; i < sp->sub.num_rects; i++)
768
                        blend_subrect(&pict, sp->sub.rects[i],
769
                                      vp->bmp->w, vp->bmp->h);
770

    
771
                    SDL_UnlockYUVOverlay (vp->bmp);
772
                }
773
            }
774
        }
775

    
776

    
777
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
778
        height = is->height;
779
        width = ((int)rint(height * aspect_ratio)) & ~1;
780
        if (width > is->width) {
781
            width = is->width;
782
            height = ((int)rint(width / aspect_ratio)) & ~1;
783
        }
784
        x = (is->width - width) / 2;
785
        y = (is->height - height) / 2;
786
        if (!is->no_background) {
787
            /* fill the background */
788
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
789
        } else {
790
            is->no_background = 0;
791
        }
792
        rect.x = is->xleft + x;
793
        rect.y = is->ytop  + y;
794
        rect.w = width;
795
        rect.h = height;
796
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
797
    } else {
798
#if 0
799
        fill_rectangle(screen,
800
                       is->xleft, is->ytop, is->width, is->height,
801
                       QERGB(0x00, 0x00, 0x00));
802
#endif
803
    }
804
}
805

    
806
static inline int compute_mod(int a, int b)
807
{
808
    a = a % b;
809
    if (a >= 0)
810
        return a;
811
    else
812
        return a + b;
813
}
814

    
815
static void video_audio_display(VideoState *s)
816
{
817
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
818
    int ch, channels, h, h2, bgcolor, fgcolor;
819
    int16_t time_diff;
820
    int rdft_bits, nb_freq;
821

    
822
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
823
        ;
824
    nb_freq= 1<<(rdft_bits-1);
825

    
826
    /* compute display index : center on currently output samples */
827
    channels = s->audio_st->codec->channels;
828
    nb_display_channels = channels;
829
    if (!s->paused) {
830
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
831
        n = 2 * channels;
832
        delay = audio_write_get_buf_size(s);
833
        delay /= n;
834

    
835
        /* to be more precise, we take into account the time spent since
836
           the last buffer computation */
837
        if (audio_callback_time) {
838
            time_diff = av_gettime() - audio_callback_time;
839
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
840
        }
841

    
842
        delay += 2*data_used;
843
        if (delay < data_used)
844
            delay = data_used;
845

    
846
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
847
        if(s->show_audio==1){
848
            h= INT_MIN;
849
            for(i=0; i<1000; i+=channels){
850
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
851
                int a= s->sample_array[idx];
852
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
853
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
854
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
855
                int score= a-d;
856
                if(h<score && (b^c)<0){
857
                    h= score;
858
                    i_start= idx;
859
                }
860
            }
861
        }
862

    
863
        s->last_i_start = i_start;
864
    } else {
865
        i_start = s->last_i_start;
866
    }
867

    
868
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
869
    if(s->show_audio==1){
870
        fill_rectangle(screen,
871
                       s->xleft, s->ytop, s->width, s->height,
872
                       bgcolor);
873

    
874
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
875

    
876
        /* total height for one channel */
877
        h = s->height / nb_display_channels;
878
        /* graph height / 2 */
879
        h2 = (h * 9) / 20;
880
        for(ch = 0;ch < nb_display_channels; ch++) {
881
            i = i_start + ch;
882
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
883
            for(x = 0; x < s->width; x++) {
884
                y = (s->sample_array[i] * h2) >> 15;
885
                if (y < 0) {
886
                    y = -y;
887
                    ys = y1 - y;
888
                } else {
889
                    ys = y1;
890
                }
891
                fill_rectangle(screen,
892
                               s->xleft + x, ys, 1, y,
893
                               fgcolor);
894
                i += channels;
895
                if (i >= SAMPLE_ARRAY_SIZE)
896
                    i -= SAMPLE_ARRAY_SIZE;
897
            }
898
        }
899

    
900
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
901

    
902
        for(ch = 1;ch < nb_display_channels; ch++) {
903
            y = s->ytop + ch * h;
904
            fill_rectangle(screen,
905
                           s->xleft, y, s->width, 1,
906
                           fgcolor);
907
        }
908
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
909
    }else{
910
        nb_display_channels= FFMIN(nb_display_channels, 2);
911
        if(rdft_bits != s->rdft_bits){
912
            av_rdft_end(s->rdft);
913
            av_free(s->rdft_data);
914
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
915
            s->rdft_bits= rdft_bits;
916
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
917
        }
918
        {
919
            FFTSample *data[2];
920
            for(ch = 0;ch < nb_display_channels; ch++) {
921
                data[ch] = s->rdft_data + 2*nb_freq*ch;
922
                i = i_start + ch;
923
                for(x = 0; x < 2*nb_freq; x++) {
924
                    double w= (x-nb_freq)*(1.0/nb_freq);
925
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
926
                    i += channels;
927
                    if (i >= SAMPLE_ARRAY_SIZE)
928
                        i -= SAMPLE_ARRAY_SIZE;
929
                }
930
                av_rdft_calc(s->rdft, data[ch]);
931
            }
932
            //least efficient way to do this, we should of course directly access it but its more than fast enough
933
            for(y=0; y<s->height; y++){
934
                double w= 1/sqrt(nb_freq);
935
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
936
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
937
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
938
                a= FFMIN(a,255);
939
                b= FFMIN(b,255);
940
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941

    
942
                fill_rectangle(screen,
943
                            s->xpos, s->height-y, 1, 1,
944
                            fgcolor);
945
            }
946
        }
947
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948
        s->xpos++;
949
        if(s->xpos >= s->width)
950
            s->xpos= s->xleft;
951
    }
952
}
953

    
954
static int video_open(VideoState *is){
955
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956
    int w,h;
957

    
958
    if(is_full_screen) flags |= SDL_FULLSCREEN;
959
    else               flags |= SDL_RESIZABLE;
960

    
961
    if (is_full_screen && fs_screen_width) {
962
        w = fs_screen_width;
963
        h = fs_screen_height;
964
    } else if(!is_full_screen && screen_width){
965
        w = screen_width;
966
        h = screen_height;
967
#if CONFIG_AVFILTER
968
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969
        w = is->out_video_filter->inputs[0]->w;
970
        h = is->out_video_filter->inputs[0]->h;
971
#else
972
    }else if (is->video_st && is->video_st->codec->width){
973
        w = is->video_st->codec->width;
974
        h = is->video_st->codec->height;
975
#endif
976
    } else {
977
        w = 640;
978
        h = 480;
979
    }
980
    if(screen && is->width == screen->w && screen->w == w
981
       && is->height== screen->h && screen->h == h)
982
        return 0;
983

    
984
#ifndef __APPLE__
985
    screen = SDL_SetVideoMode(w, h, 0, flags);
986
#else
987
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988
    screen = SDL_SetVideoMode(w, h, 24, flags);
989
#endif
990
    if (!screen) {
991
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
992
        return -1;
993
    }
994
    if (!window_title)
995
        window_title = input_filename;
996
    SDL_WM_SetCaption(window_title, window_title);
997

    
998
    is->width = screen->w;
999
    is->height = screen->h;
1000

    
1001
    return 0;
1002
}
1003

    
1004
/* display the current picture, if any */
1005
static void video_display(VideoState *is)
1006
{
1007
    if(!screen)
1008
        video_open(cur_stream);
1009
    if (is->audio_st && is->show_audio)
1010
        video_audio_display(is);
1011
    else if (is->video_st)
1012
        video_image_display(is);
1013
}
1014

    
1015
static int refresh_thread(void *opaque)
1016
{
1017
    VideoState *is= opaque;
1018
    while(!is->abort_request){
1019
        SDL_Event event;
1020
        event.type = FF_REFRESH_EVENT;
1021
        event.user.data1 = opaque;
1022
        if(!is->refresh){
1023
            is->refresh=1;
1024
            SDL_PushEvent(&event);
1025
        }
1026
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1027
    }
1028
    return 0;
1029
}
1030

    
1031
/* get the current audio clock value */
1032
static double get_audio_clock(VideoState *is)
1033
{
1034
    double pts;
1035
    int hw_buf_size, bytes_per_sec;
1036
    pts = is->audio_clock;
1037
    hw_buf_size = audio_write_get_buf_size(is);
1038
    bytes_per_sec = 0;
1039
    if (is->audio_st) {
1040
        bytes_per_sec = is->audio_st->codec->sample_rate *
1041
            2 * is->audio_st->codec->channels;
1042
    }
1043
    if (bytes_per_sec)
1044
        pts -= (double)hw_buf_size / bytes_per_sec;
1045
    return pts;
1046
}
1047

    
1048
/* get the current video clock value */
1049
static double get_video_clock(VideoState *is)
1050
{
1051
    if (is->paused) {
1052
        return is->video_current_pts;
1053
    } else {
1054
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1055
    }
1056
}
1057

    
1058
/* get the current external clock value */
1059
static double get_external_clock(VideoState *is)
1060
{
1061
    int64_t ti;
1062
    ti = av_gettime();
1063
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1064
}
1065

    
1066
/* get the current master clock value */
1067
static double get_master_clock(VideoState *is)
1068
{
1069
    double val;
1070

    
1071
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1072
        if (is->video_st)
1073
            val = get_video_clock(is);
1074
        else
1075
            val = get_audio_clock(is);
1076
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1077
        if (is->audio_st)
1078
            val = get_audio_clock(is);
1079
        else
1080
            val = get_video_clock(is);
1081
    } else {
1082
        val = get_external_clock(is);
1083
    }
1084
    return val;
1085
}
1086

    
1087
/* seek in the stream */
1088
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1089
{
1090
    if (!is->seek_req) {
1091
        is->seek_pos = pos;
1092
        is->seek_rel = rel;
1093
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1094
        if (seek_by_bytes)
1095
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1096
        is->seek_req = 1;
1097
    }
1098
}
1099

    
1100
/* pause or resume the video */
1101
static void stream_pause(VideoState *is)
1102
{
1103
    if (is->paused) {
1104
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1105
        if(is->read_pause_return != AVERROR(ENOSYS)){
1106
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1107
        }
1108
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1109
    }
1110
    is->paused = !is->paused;
1111
}
1112

    
1113
static double compute_target_time(double frame_current_pts, VideoState *is)
1114
{
1115
    double delay, sync_threshold, diff;
1116

    
1117
    /* compute nominal delay */
1118
    delay = frame_current_pts - is->frame_last_pts;
1119
    if (delay <= 0 || delay >= 10.0) {
1120
        /* if incorrect delay, use previous one */
1121
        delay = is->frame_last_delay;
1122
    } else {
1123
        is->frame_last_delay = delay;
1124
    }
1125
    is->frame_last_pts = frame_current_pts;
1126

    
1127
    /* update delay to follow master synchronisation source */
1128
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1129
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1130
        /* if video is slave, we try to correct big delays by
1131
           duplicating or deleting a frame */
1132
        diff = get_video_clock(is) - get_master_clock(is);
1133

    
1134
        /* skip or repeat frame. We take into account the
1135
           delay to compute the threshold. I still don't know
1136
           if it is the best guess */
1137
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1138
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1139
            if (diff <= -sync_threshold)
1140
                delay = 0;
1141
            else if (diff >= sync_threshold)
1142
                delay = 2 * delay;
1143
        }
1144
    }
1145
    is->frame_timer += delay;
1146
#if defined(DEBUG_SYNC)
1147
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1148
            delay, actual_delay, frame_current_pts, -diff);
1149
#endif
1150

    
1151
    return is->frame_timer;
1152
}
1153

    
1154
/* called to display each frame */
1155
static void video_refresh_timer(void *opaque)
1156
{
1157
    VideoState *is = opaque;
1158
    VideoPicture *vp;
1159

    
1160
    SubPicture *sp, *sp2;
1161

    
1162
    if (is->video_st) {
1163
retry:
1164
        if (is->pictq_size == 0) {
1165
            //nothing to do, no picture to display in the que
1166
        } else {
1167
            double time= av_gettime()/1000000.0;
1168
            double next_target;
1169
            /* dequeue the picture */
1170
            vp = &is->pictq[is->pictq_rindex];
1171

    
1172
            if(time < vp->target_clock)
1173
                return;
1174
            /* update current video pts */
1175
            is->video_current_pts = vp->pts;
1176
            is->video_current_pts_drift = is->video_current_pts - time;
1177
            is->video_current_pos = vp->pos;
1178
            if(is->pictq_size > 1){
1179
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1180
                assert(nextvp->target_clock >= vp->target_clock);
1181
                next_target= nextvp->target_clock;
1182
            }else{
1183
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1184
            }
1185
            if(framedrop && time > next_target){
1186
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1187
                if(is->pictq_size > 1 || time > next_target + 0.5){
1188
                    /* update queue size and signal for next picture */
1189
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1190
                        is->pictq_rindex = 0;
1191

    
1192
                    SDL_LockMutex(is->pictq_mutex);
1193
                    is->pictq_size--;
1194
                    SDL_CondSignal(is->pictq_cond);
1195
                    SDL_UnlockMutex(is->pictq_mutex);
1196
                    goto retry;
1197
                }
1198
            }
1199

    
1200
            if(is->subtitle_st) {
1201
                if (is->subtitle_stream_changed) {
1202
                    SDL_LockMutex(is->subpq_mutex);
1203

    
1204
                    while (is->subpq_size) {
1205
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1206

    
1207
                        /* update queue size and signal for next picture */
1208
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1209
                            is->subpq_rindex = 0;
1210

    
1211
                        is->subpq_size--;
1212
                    }
1213
                    is->subtitle_stream_changed = 0;
1214

    
1215
                    SDL_CondSignal(is->subpq_cond);
1216
                    SDL_UnlockMutex(is->subpq_mutex);
1217
                } else {
1218
                    if (is->subpq_size > 0) {
1219
                        sp = &is->subpq[is->subpq_rindex];
1220

    
1221
                        if (is->subpq_size > 1)
1222
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1223
                        else
1224
                            sp2 = NULL;
1225

    
1226
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1227
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1228
                        {
1229
                            free_subpicture(sp);
1230

    
1231
                            /* update queue size and signal for next picture */
1232
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1233
                                is->subpq_rindex = 0;
1234

    
1235
                            SDL_LockMutex(is->subpq_mutex);
1236
                            is->subpq_size--;
1237
                            SDL_CondSignal(is->subpq_cond);
1238
                            SDL_UnlockMutex(is->subpq_mutex);
1239
                        }
1240
                    }
1241
                }
1242
            }
1243

    
1244
            /* display picture */
1245
            if (!display_disable)
1246
            video_display(is);
1247

    
1248
            /* update queue size and signal for next picture */
1249
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1250
                is->pictq_rindex = 0;
1251

    
1252
            SDL_LockMutex(is->pictq_mutex);
1253
            is->pictq_size--;
1254
            SDL_CondSignal(is->pictq_cond);
1255
            SDL_UnlockMutex(is->pictq_mutex);
1256
        }
1257
    } else if (is->audio_st) {
1258
        /* draw the next audio frame */
1259

    
1260
        /* if only audio stream, then display the audio bars (better
1261
           than nothing, just to test the implementation */
1262

    
1263
        /* display picture */
1264
        if (!display_disable)
1265
        video_display(is);
1266
    }
1267
    if (show_status) {
1268
        static int64_t last_time;
1269
        int64_t cur_time;
1270
        int aqsize, vqsize, sqsize;
1271
        double av_diff;
1272

    
1273
        cur_time = av_gettime();
1274
        if (!last_time || (cur_time - last_time) >= 30000) {
1275
            aqsize = 0;
1276
            vqsize = 0;
1277
            sqsize = 0;
1278
            if (is->audio_st)
1279
                aqsize = is->audioq.size;
1280
            if (is->video_st)
1281
                vqsize = is->videoq.size;
1282
            if (is->subtitle_st)
1283
                sqsize = is->subtitleq.size;
1284
            av_diff = 0;
1285
            if (is->audio_st && is->video_st)
1286
                av_diff = get_audio_clock(is) - get_video_clock(is);
1287
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1288
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1289
            fflush(stdout);
1290
            last_time = cur_time;
1291
        }
1292
    }
1293
}
1294

    
1295
static void stream_close(VideoState *is)
1296
{
1297
    VideoPicture *vp;
1298
    int i;
1299
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1300
    is->abort_request = 1;
1301
    SDL_WaitThread(is->parse_tid, NULL);
1302
    SDL_WaitThread(is->refresh_tid, NULL);
1303

    
1304
    /* free all pictures */
1305
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1306
        vp = &is->pictq[i];
1307
#if CONFIG_AVFILTER
1308
        if (vp->picref) {
1309
            avfilter_unref_buffer(vp->picref);
1310
            vp->picref = NULL;
1311
        }
1312
#endif
1313
        if (vp->bmp) {
1314
            SDL_FreeYUVOverlay(vp->bmp);
1315
            vp->bmp = NULL;
1316
        }
1317
    }
1318
    SDL_DestroyMutex(is->pictq_mutex);
1319
    SDL_DestroyCond(is->pictq_cond);
1320
    SDL_DestroyMutex(is->subpq_mutex);
1321
    SDL_DestroyCond(is->subpq_cond);
1322
#if !CONFIG_AVFILTER
1323
    if (is->img_convert_ctx)
1324
        sws_freeContext(is->img_convert_ctx);
1325
#endif
1326
    av_free(is);
1327
}
1328

    
1329
static void do_exit(void)
1330
{
1331
    if (cur_stream) {
1332
        stream_close(cur_stream);
1333
        cur_stream = NULL;
1334
    }
1335
    uninit_opts();
1336
#if CONFIG_AVFILTER
1337
    avfilter_uninit();
1338
#endif
1339
    if (show_status)
1340
        printf("\n");
1341
    SDL_Quit();
1342
    av_log(NULL, AV_LOG_QUIET, "");
1343
    exit(0);
1344
}
1345

    
1346
/* allocate a picture (needs to do that in main thread to avoid
1347
   potential locking problems */
1348
static void alloc_picture(void *opaque)
1349
{
1350
    VideoState *is = opaque;
1351
    VideoPicture *vp;
1352

    
1353
    vp = &is->pictq[is->pictq_windex];
1354

    
1355
    if (vp->bmp)
1356
        SDL_FreeYUVOverlay(vp->bmp);
1357

    
1358
#if CONFIG_AVFILTER
1359
    if (vp->picref)
1360
        avfilter_unref_buffer(vp->picref);
1361
    vp->picref = NULL;
1362

    
1363
    vp->width   = is->out_video_filter->inputs[0]->w;
1364
    vp->height  = is->out_video_filter->inputs[0]->h;
1365
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1366
#else
1367
    vp->width   = is->video_st->codec->width;
1368
    vp->height  = is->video_st->codec->height;
1369
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1370
#endif
1371

    
1372
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1373
                                   SDL_YV12_OVERLAY,
1374
                                   screen);
1375
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1376
        /* SDL allocates a buffer smaller than requested if the video
1377
         * overlay hardware is unable to support the requested size. */
1378
        fprintf(stderr, "Error: the video system does not support an image\n"
1379
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1380
                        "to reduce the image size.\n", vp->width, vp->height );
1381
        do_exit();
1382
    }
1383

    
1384
    SDL_LockMutex(is->pictq_mutex);
1385
    vp->allocated = 1;
1386
    SDL_CondSignal(is->pictq_cond);
1387
    SDL_UnlockMutex(is->pictq_mutex);
1388
}
1389

    
1390
/**
1391
 *
1392
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1393
 */
1394
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1395
{
1396
    VideoPicture *vp;
1397
    int dst_pix_fmt;
1398
#if CONFIG_AVFILTER
1399
    AVPicture pict_src;
1400
#endif
1401
    /* wait until we have space to put a new picture */
1402
    SDL_LockMutex(is->pictq_mutex);
1403

    
1404
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1405
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1406

    
1407
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1408
           !is->videoq.abort_request) {
1409
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1410
    }
1411
    SDL_UnlockMutex(is->pictq_mutex);
1412

    
1413
    if (is->videoq.abort_request)
1414
        return -1;
1415

    
1416
    vp = &is->pictq[is->pictq_windex];
1417

    
1418
    /* alloc or resize hardware picture buffer */
1419
    if (!vp->bmp ||
1420
#if CONFIG_AVFILTER
1421
        vp->width  != is->out_video_filter->inputs[0]->w ||
1422
        vp->height != is->out_video_filter->inputs[0]->h) {
1423
#else
1424
        vp->width != is->video_st->codec->width ||
1425
        vp->height != is->video_st->codec->height) {
1426
#endif
1427
        SDL_Event event;
1428

    
1429
        vp->allocated = 0;
1430

    
1431
        /* the allocation must be done in the main thread to avoid
1432
           locking problems */
1433
        event.type = FF_ALLOC_EVENT;
1434
        event.user.data1 = is;
1435
        SDL_PushEvent(&event);
1436

    
1437
        /* wait until the picture is allocated */
1438
        SDL_LockMutex(is->pictq_mutex);
1439
        while (!vp->allocated && !is->videoq.abort_request) {
1440
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1441
        }
1442
        SDL_UnlockMutex(is->pictq_mutex);
1443

    
1444
        if (is->videoq.abort_request)
1445
            return -1;
1446
    }
1447

    
1448
    /* if the frame is not skipped, then display it */
1449
    if (vp->bmp) {
1450
        AVPicture pict;
1451
#if CONFIG_AVFILTER
1452
        if(vp->picref)
1453
            avfilter_unref_buffer(vp->picref);
1454
        vp->picref = src_frame->opaque;
1455
#endif
1456

    
1457
        /* get a pointer on the bitmap */
1458
        SDL_LockYUVOverlay (vp->bmp);
1459

    
1460
        dst_pix_fmt = PIX_FMT_YUV420P;
1461
        memset(&pict,0,sizeof(AVPicture));
1462
        pict.data[0] = vp->bmp->pixels[0];
1463
        pict.data[1] = vp->bmp->pixels[2];
1464
        pict.data[2] = vp->bmp->pixels[1];
1465

    
1466
        pict.linesize[0] = vp->bmp->pitches[0];
1467
        pict.linesize[1] = vp->bmp->pitches[2];
1468
        pict.linesize[2] = vp->bmp->pitches[1];
1469

    
1470
#if CONFIG_AVFILTER
1471
        pict_src.data[0] = src_frame->data[0];
1472
        pict_src.data[1] = src_frame->data[1];
1473
        pict_src.data[2] = src_frame->data[2];
1474

    
1475
        pict_src.linesize[0] = src_frame->linesize[0];
1476
        pict_src.linesize[1] = src_frame->linesize[1];
1477
        pict_src.linesize[2] = src_frame->linesize[2];
1478

    
1479
        //FIXME use direct rendering
1480
        av_picture_copy(&pict, &pict_src,
1481
                        vp->pix_fmt, vp->width, vp->height);
1482
#else
1483
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1484
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1485
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1486
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1487
        if (is->img_convert_ctx == NULL) {
1488
            fprintf(stderr, "Cannot initialize the conversion context\n");
1489
            exit(1);
1490
        }
1491
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1492
                  0, vp->height, pict.data, pict.linesize);
1493
#endif
1494
        /* update the bitmap content */
1495
        SDL_UnlockYUVOverlay(vp->bmp);
1496

    
1497
        vp->pts = pts;
1498
        vp->pos = pos;
1499

    
1500
        /* now we can update the picture count */
1501
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1502
            is->pictq_windex = 0;
1503
        SDL_LockMutex(is->pictq_mutex);
1504
        vp->target_clock= compute_target_time(vp->pts, is);
1505

    
1506
        is->pictq_size++;
1507
        SDL_UnlockMutex(is->pictq_mutex);
1508
    }
1509
    return 0;
1510
}
1511

    
1512
/**
1513
 * compute the exact PTS for the picture if it is omitted in the stream
1514
 * @param pts1 the dts of the pkt / pts of the frame
1515
 */
1516
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1517
{
1518
    double frame_delay, pts;
1519

    
1520
    pts = pts1;
1521

    
1522
    if (pts != 0) {
1523
        /* update video clock with pts, if present */
1524
        is->video_clock = pts;
1525
    } else {
1526
        pts = is->video_clock;
1527
    }
1528
    /* update video clock for next frame */
1529
    frame_delay = av_q2d(is->video_st->codec->time_base);
1530
    /* for MPEG2, the frame can be repeated, so we update the
1531
       clock accordingly */
1532
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1533
    is->video_clock += frame_delay;
1534

    
1535
#if defined(DEBUG_SYNC) && 0
1536
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1537
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1538
#endif
1539
    return queue_picture(is, src_frame, pts, pos);
1540
}
1541

    
1542
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1543
{
1544
    int len1, got_picture, i;
1545

    
1546
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1547
            return -1;
1548

    
1549
        if(pkt->data == flush_pkt.data){
1550
            avcodec_flush_buffers(is->video_st->codec);
1551

    
1552
            SDL_LockMutex(is->pictq_mutex);
1553
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1554
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1555
                is->pictq[i].target_clock= 0;
1556
            }
1557
            while (is->pictq_size && !is->videoq.abort_request) {
1558
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1559
            }
1560
            is->video_current_pos= -1;
1561
            SDL_UnlockMutex(is->pictq_mutex);
1562

    
1563
            init_pts_correction(&is->pts_ctx);
1564
            is->frame_last_pts= AV_NOPTS_VALUE;
1565
            is->frame_last_delay = 0;
1566
            is->frame_timer = (double)av_gettime() / 1000000.0;
1567
            is->skip_frames= 1;
1568
            is->skip_frames_index= 0;
1569
            return 0;
1570
        }
1571

    
1572
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1573
           this packet, if any */
1574
        is->video_st->codec->reordered_opaque= pkt->pts;
1575
        len1 = avcodec_decode_video2(is->video_st->codec,
1576
                                    frame, &got_picture,
1577
                                    pkt);
1578

    
1579
        if (got_picture) {
1580
            if (decoder_reorder_pts == -1) {
1581
                *pts = guess_correct_pts(&is->pts_ctx, frame->reordered_opaque, pkt->dts);
1582
            } else if (decoder_reorder_pts) {
1583
                *pts = frame->reordered_opaque;
1584
            } else {
1585
                *pts = pkt->dts;
1586
            }
1587

    
1588
            if (*pts == AV_NOPTS_VALUE) {
1589
                *pts = 0;
1590
            }
1591
        }
1592

    
1593
//            if (len1 < 0)
1594
//                break;
1595
    if (got_picture){
1596
        is->skip_frames_index += 1;
1597
        if(is->skip_frames_index >= is->skip_frames){
1598
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1599
            return 1;
1600
        }
1601

    
1602
    }
1603
    return 0;
1604
}
1605

    
1606
#if CONFIG_AVFILTER
1607
typedef struct {
1608
    VideoState *is;
1609
    AVFrame *frame;
1610
    int use_dr1;
1611
} FilterPriv;
1612

    
1613
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1614
{
1615
    AVFilterContext *ctx = codec->opaque;
1616
    AVFilterBufferRef  *ref;
1617
    int perms = AV_PERM_WRITE;
1618
    int i, w, h, stride[4];
1619
    unsigned edge;
1620

    
1621
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1622
        perms |= AV_PERM_NEG_LINESIZES;
1623

    
1624
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1625
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1626
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1627
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1628
    }
1629
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1630

    
1631
    w = codec->width;
1632
    h = codec->height;
1633
    avcodec_align_dimensions2(codec, &w, &h, stride);
1634
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1635
    w += edge << 1;
1636
    h += edge << 1;
1637

    
1638
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1639
        return -1;
1640

    
1641
    ref->video->w = codec->width;
1642
    ref->video->h = codec->height;
1643
    for(i = 0; i < 4; i ++) {
1644
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1645
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1646

    
1647
        if (ref->data[i]) {
1648
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1649
        }
1650
        pic->data[i]     = ref->data[i];
1651
        pic->linesize[i] = ref->linesize[i];
1652
    }
1653
    pic->opaque = ref;
1654
    pic->age    = INT_MAX;
1655
    pic->type   = FF_BUFFER_TYPE_USER;
1656
    pic->reordered_opaque = codec->reordered_opaque;
1657
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1658
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1659
    return 0;
1660
}
1661

    
1662
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1663
{
1664
    memset(pic->data, 0, sizeof(pic->data));
1665
    avfilter_unref_buffer(pic->opaque);
1666
}
1667

    
1668
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1669
{
1670
    AVFilterBufferRef *ref = pic->opaque;
1671

    
1672
    if (pic->data[0] == NULL) {
1673
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1674
        return codec->get_buffer(codec, pic);
1675
    }
1676

    
1677
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1678
        (codec->pix_fmt != ref->format)) {
1679
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1680
        return -1;
1681
    }
1682

    
1683
    pic->reordered_opaque = codec->reordered_opaque;
1684
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1685
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1686
    return 0;
1687
}
1688

    
1689
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1690
{
1691
    FilterPriv *priv = ctx->priv;
1692
    AVCodecContext *codec;
1693
    if(!opaque) return -1;
1694

    
1695
    priv->is = opaque;
1696
    codec    = priv->is->video_st->codec;
1697
    codec->opaque = ctx;
1698
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1699
        priv->use_dr1 = 1;
1700
        codec->get_buffer     = input_get_buffer;
1701
        codec->release_buffer = input_release_buffer;
1702
        codec->reget_buffer   = input_reget_buffer;
1703
    }
1704

    
1705
    priv->frame = avcodec_alloc_frame();
1706

    
1707
    return 0;
1708
}
1709

    
1710
static void input_uninit(AVFilterContext *ctx)
1711
{
1712
    FilterPriv *priv = ctx->priv;
1713
    av_free(priv->frame);
1714
}
1715

    
1716
static int input_request_frame(AVFilterLink *link)
1717
{
1718
    FilterPriv *priv = link->src->priv;
1719
    AVFilterBufferRef *picref;
1720
    int64_t pts = 0;
1721
    AVPacket pkt;
1722
    int ret;
1723

    
1724
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1725
        av_free_packet(&pkt);
1726
    if (ret < 0)
1727
        return -1;
1728

    
1729
    if(priv->use_dr1) {
1730
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1731
    } else {
1732
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1733
        av_image_copy(picref->data, picref->linesize,
1734
                      priv->frame->data, priv->frame->linesize,
1735
                      picref->format, link->w, link->h);
1736
    }
1737
    av_free_packet(&pkt);
1738

    
1739
    picref->pts = pts;
1740
    picref->pos = pkt.pos;
1741
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1742
    avfilter_start_frame(link, picref);
1743
    avfilter_draw_slice(link, 0, link->h, 1);
1744
    avfilter_end_frame(link);
1745

    
1746
    return 0;
1747
}
1748

    
1749
static int input_query_formats(AVFilterContext *ctx)
1750
{
1751
    FilterPriv *priv = ctx->priv;
1752
    enum PixelFormat pix_fmts[] = {
1753
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1754
    };
1755

    
1756
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1757
    return 0;
1758
}
1759

    
1760
static int input_config_props(AVFilterLink *link)
1761
{
1762
    FilterPriv *priv  = link->src->priv;
1763
    AVCodecContext *c = priv->is->video_st->codec;
1764

    
1765
    link->w = c->width;
1766
    link->h = c->height;
1767
    link->time_base = priv->is->video_st->time_base;
1768

    
1769
    return 0;
1770
}
1771

    
1772
static AVFilter input_filter =
1773
{
1774
    .name      = "ffplay_input",
1775

    
1776
    .priv_size = sizeof(FilterPriv),
1777

    
1778
    .init      = input_init,
1779
    .uninit    = input_uninit,
1780

    
1781
    .query_formats = input_query_formats,
1782

    
1783
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1784
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1785
                                    .type = AVMEDIA_TYPE_VIDEO,
1786
                                    .request_frame = input_request_frame,
1787
                                    .config_props  = input_config_props, },
1788
                                  { .name = NULL }},
1789
};
1790

    
1791
#endif  /* CONFIG_AVFILTER */
1792

    
1793
static int video_thread(void *arg)
1794
{
1795
    VideoState *is = arg;
1796
    AVFrame *frame= avcodec_alloc_frame();
1797
    int64_t pts_int;
1798
    double pts;
1799
    int ret;
1800

    
1801
#if CONFIG_AVFILTER
1802
    int64_t pos;
1803
    char sws_flags_str[128];
1804
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1805
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1806
    AVFilterGraph *graph = avfilter_graph_alloc();
1807
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1808
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1809

    
1810
    if (avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1811
                                     NULL, is, graph) < 0)
1812
        goto the_end;
1813
    if (avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1814
                                     NULL, &ffsink_ctx, graph) < 0)
1815
        goto the_end;
1816

    
1817
    if(vfilters) {
1818
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1819
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1820

    
1821
        outputs->name    = av_strdup("in");
1822
        outputs->filter_ctx = filt_src;
1823
        outputs->pad_idx = 0;
1824
        outputs->next    = NULL;
1825

    
1826
        inputs->name    = av_strdup("out");
1827
        inputs->filter_ctx = filt_out;
1828
        inputs->pad_idx = 0;
1829
        inputs->next    = NULL;
1830

    
1831
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1832
            goto the_end;
1833
        av_freep(&vfilters);
1834
    } else {
1835
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1836
    }
1837

    
1838
    if (avfilter_graph_config(graph, NULL) < 0)
1839
        goto the_end;
1840

    
1841
    is->out_video_filter = filt_out;
1842
#endif
1843

    
1844
    for(;;) {
1845
#if !CONFIG_AVFILTER
1846
        AVPacket pkt;
1847
#else
1848
        AVFilterBufferRef *picref;
1849
        AVRational tb;
1850
#endif
1851
        while (is->paused && !is->videoq.abort_request)
1852
            SDL_Delay(10);
1853
#if CONFIG_AVFILTER
1854
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1855
        if (picref) {
1856
            pts_int = picref->pts;
1857
            pos     = picref->pos;
1858
            frame->opaque = picref;
1859
        }
1860

    
1861
        if (av_cmp_q(tb, is->video_st->time_base)) {
1862
            int64_t pts1 = pts_int;
1863
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1864
            av_log(NULL, AV_LOG_DEBUG, "video_thread(): "
1865
                   "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1866
                   tb.num, tb.den, pts1,
1867
                   is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1868
        }
1869
#else
1870
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1871
#endif
1872

    
1873
        if (ret < 0) goto the_end;
1874

    
1875
        if (!ret)
1876
            continue;
1877

    
1878
        pts = pts_int*av_q2d(is->video_st->time_base);
1879

    
1880
#if CONFIG_AVFILTER
1881
        ret = output_picture2(is, frame, pts, pos);
1882
#else
1883
        ret = output_picture2(is, frame, pts,  pkt.pos);
1884
        av_free_packet(&pkt);
1885
#endif
1886
        if (ret < 0)
1887
            goto the_end;
1888

    
1889
        if (step)
1890
            if (cur_stream)
1891
                stream_pause(cur_stream);
1892
    }
1893
 the_end:
1894
#if CONFIG_AVFILTER
1895
    avfilter_graph_free(graph);
1896
    av_freep(&graph);
1897
#endif
1898
    av_free(frame);
1899
    return 0;
1900
}
1901

    
1902
static int subtitle_thread(void *arg)
1903
{
1904
    VideoState *is = arg;
1905
    SubPicture *sp;
1906
    AVPacket pkt1, *pkt = &pkt1;
1907
    int len1, got_subtitle;
1908
    double pts;
1909
    int i, j;
1910
    int r, g, b, y, u, v, a;
1911

    
1912
    for(;;) {
1913
        while (is->paused && !is->subtitleq.abort_request) {
1914
            SDL_Delay(10);
1915
        }
1916
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1917
            break;
1918

    
1919
        if(pkt->data == flush_pkt.data){
1920
            avcodec_flush_buffers(is->subtitle_st->codec);
1921
            continue;
1922
        }
1923
        SDL_LockMutex(is->subpq_mutex);
1924
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1925
               !is->subtitleq.abort_request) {
1926
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1927
        }
1928
        SDL_UnlockMutex(is->subpq_mutex);
1929

    
1930
        if (is->subtitleq.abort_request)
1931
            goto the_end;
1932

    
1933
        sp = &is->subpq[is->subpq_windex];
1934

    
1935
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1936
           this packet, if any */
1937
        pts = 0;
1938
        if (pkt->pts != AV_NOPTS_VALUE)
1939
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1940

    
1941
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1942
                                    &sp->sub, &got_subtitle,
1943
                                    pkt);
1944
//            if (len1 < 0)
1945
//                break;
1946
        if (got_subtitle && sp->sub.format == 0) {
1947
            sp->pts = pts;
1948

    
1949
            for (i = 0; i < sp->sub.num_rects; i++)
1950
            {
1951
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1952
                {
1953
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1954
                    y = RGB_TO_Y_CCIR(r, g, b);
1955
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1956
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1957
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1958
                }
1959
            }
1960

    
1961
            /* now we can update the picture count */
1962
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1963
                is->subpq_windex = 0;
1964
            SDL_LockMutex(is->subpq_mutex);
1965
            is->subpq_size++;
1966
            SDL_UnlockMutex(is->subpq_mutex);
1967
        }
1968
        av_free_packet(pkt);
1969
//        if (step)
1970
//            if (cur_stream)
1971
//                stream_pause(cur_stream);
1972
    }
1973
 the_end:
1974
    return 0;
1975
}
1976

    
1977
/* copy samples for viewing in editor window */
1978
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1979
{
1980
    int size, len, channels;
1981

    
1982
    channels = is->audio_st->codec->channels;
1983

    
1984
    size = samples_size / sizeof(short);
1985
    while (size > 0) {
1986
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1987
        if (len > size)
1988
            len = size;
1989
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1990
        samples += len;
1991
        is->sample_array_index += len;
1992
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1993
            is->sample_array_index = 0;
1994
        size -= len;
1995
    }
1996
}
1997

    
1998
/* return the new audio buffer size (samples can be added or deleted
1999
   to get better sync if video or external master clock) */
2000
static int synchronize_audio(VideoState *is, short *samples,
2001
                             int samples_size1, double pts)
2002
{
2003
    int n, samples_size;
2004
    double ref_clock;
2005

    
2006
    n = 2 * is->audio_st->codec->channels;
2007
    samples_size = samples_size1;
2008

    
2009
    /* if not master, then we try to remove or add samples to correct the clock */
2010
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2011
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2012
        double diff, avg_diff;
2013
        int wanted_size, min_size, max_size, nb_samples;
2014

    
2015
        ref_clock = get_master_clock(is);
2016
        diff = get_audio_clock(is) - ref_clock;
2017

    
2018
        if (diff < AV_NOSYNC_THRESHOLD) {
2019
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2020
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2021
                /* not enough measures to have a correct estimate */
2022
                is->audio_diff_avg_count++;
2023
            } else {
2024
                /* estimate the A-V difference */
2025
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2026

    
2027
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2028
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2029
                    nb_samples = samples_size / n;
2030

    
2031
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2032
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2033
                    if (wanted_size < min_size)
2034
                        wanted_size = min_size;
2035
                    else if (wanted_size > max_size)
2036
                        wanted_size = max_size;
2037

    
2038
                    /* add or remove samples to correction the synchro */
2039
                    if (wanted_size < samples_size) {
2040
                        /* remove samples */
2041
                        samples_size = wanted_size;
2042
                    } else if (wanted_size > samples_size) {
2043
                        uint8_t *samples_end, *q;
2044
                        int nb;
2045

    
2046
                        /* add samples */
2047
                        nb = (samples_size - wanted_size);
2048
                        samples_end = (uint8_t *)samples + samples_size - n;
2049
                        q = samples_end + n;
2050
                        while (nb > 0) {
2051
                            memcpy(q, samples_end, n);
2052
                            q += n;
2053
                            nb -= n;
2054
                        }
2055
                        samples_size = wanted_size;
2056
                    }
2057
                }
2058
#if 0
2059
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2060
                       diff, avg_diff, samples_size - samples_size1,
2061
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2062
#endif
2063
            }
2064
        } else {
2065
            /* too big difference : may be initial PTS errors, so
2066
               reset A-V filter */
2067
            is->audio_diff_avg_count = 0;
2068
            is->audio_diff_cum = 0;
2069
        }
2070
    }
2071

    
2072
    return samples_size;
2073
}
2074

    
2075
/* decode one audio frame and returns its uncompressed size */
2076
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2077
{
2078
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2079
    AVPacket *pkt = &is->audio_pkt;
2080
    AVCodecContext *dec= is->audio_st->codec;
2081
    int n, len1, data_size;
2082
    double pts;
2083

    
2084
    for(;;) {
2085
        /* NOTE: the audio packet can contain several frames */
2086
        while (pkt_temp->size > 0) {
2087
            data_size = sizeof(is->audio_buf1);
2088
            len1 = avcodec_decode_audio3(dec,
2089
                                        (int16_t *)is->audio_buf1, &data_size,
2090
                                        pkt_temp);
2091
            if (len1 < 0) {
2092
                /* if error, we skip the frame */
2093
                pkt_temp->size = 0;
2094
                break;
2095
            }
2096

    
2097
            pkt_temp->data += len1;
2098
            pkt_temp->size -= len1;
2099
            if (data_size <= 0)
2100
                continue;
2101

    
2102
            if (dec->sample_fmt != is->audio_src_fmt) {
2103
                if (is->reformat_ctx)
2104
                    av_audio_convert_free(is->reformat_ctx);
2105
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2106
                                                         dec->sample_fmt, 1, NULL, 0);
2107
                if (!is->reformat_ctx) {
2108
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2109
                        av_get_sample_fmt_name(dec->sample_fmt),
2110
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2111
                        break;
2112
                }
2113
                is->audio_src_fmt= dec->sample_fmt;
2114
            }
2115

    
2116
            if (is->reformat_ctx) {
2117
                const void *ibuf[6]= {is->audio_buf1};
2118
                void *obuf[6]= {is->audio_buf2};
2119
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2120
                int ostride[6]= {2};
2121
                int len= data_size/istride[0];
2122
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2123
                    printf("av_audio_convert() failed\n");
2124
                    break;
2125
                }
2126
                is->audio_buf= is->audio_buf2;
2127
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2128
                          remove this legacy cruft */
2129
                data_size= len*2;
2130
            }else{
2131
                is->audio_buf= is->audio_buf1;
2132
            }
2133

    
2134
            /* if no pts, then compute it */
2135
            pts = is->audio_clock;
2136
            *pts_ptr = pts;
2137
            n = 2 * dec->channels;
2138
            is->audio_clock += (double)data_size /
2139
                (double)(n * dec->sample_rate);
2140
#if defined(DEBUG_SYNC)
2141
            {
2142
                static double last_clock;
2143
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2144
                       is->audio_clock - last_clock,
2145
                       is->audio_clock, pts);
2146
                last_clock = is->audio_clock;
2147
            }
2148
#endif
2149
            return data_size;
2150
        }
2151

    
2152
        /* free the current packet */
2153
        if (pkt->data)
2154
            av_free_packet(pkt);
2155

    
2156
        if (is->paused || is->audioq.abort_request) {
2157
            return -1;
2158
        }
2159

    
2160
        /* read next packet */
2161
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2162
            return -1;
2163
        if(pkt->data == flush_pkt.data){
2164
            avcodec_flush_buffers(dec);
2165
            continue;
2166
        }
2167

    
2168
        pkt_temp->data = pkt->data;
2169
        pkt_temp->size = pkt->size;
2170

    
2171
        /* if update the audio clock with the pts */
2172
        if (pkt->pts != AV_NOPTS_VALUE) {
2173
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2174
        }
2175
    }
2176
}
2177

    
2178
/* get the current audio output buffer size, in samples. With SDL, we
2179
   cannot have a precise information */
2180
static int audio_write_get_buf_size(VideoState *is)
2181
{
2182
    return is->audio_buf_size - is->audio_buf_index;
2183
}
2184

    
2185

    
2186
/* prepare a new audio buffer */
2187
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2188
{
2189
    VideoState *is = opaque;
2190
    int audio_size, len1;
2191
    double pts;
2192

    
2193
    audio_callback_time = av_gettime();
2194

    
2195
    while (len > 0) {
2196
        if (is->audio_buf_index >= is->audio_buf_size) {
2197
           audio_size = audio_decode_frame(is, &pts);
2198
           if (audio_size < 0) {
2199
                /* if error, just output silence */
2200
               is->audio_buf = is->audio_buf1;
2201
               is->audio_buf_size = 1024;
2202
               memset(is->audio_buf, 0, is->audio_buf_size);
2203
           } else {
2204
               if (is->show_audio)
2205
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2206
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2207
                                              pts);
2208
               is->audio_buf_size = audio_size;
2209
           }
2210
           is->audio_buf_index = 0;
2211
        }
2212
        len1 = is->audio_buf_size - is->audio_buf_index;
2213
        if (len1 > len)
2214
            len1 = len;
2215
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2216
        len -= len1;
2217
        stream += len1;
2218
        is->audio_buf_index += len1;
2219
    }
2220
}
2221

    
2222
/* open a given stream. Return 0 if OK */
2223
static int stream_component_open(VideoState *is, int stream_index)
2224
{
2225
    AVFormatContext *ic = is->ic;
2226
    AVCodecContext *avctx;
2227
    AVCodec *codec;
2228
    SDL_AudioSpec wanted_spec, spec;
2229

    
2230
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2231
        return -1;
2232
    avctx = ic->streams[stream_index]->codec;
2233

    
2234
    /* prepare audio output */
2235
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2236
        if (avctx->channels > 0) {
2237
            avctx->request_channels = FFMIN(2, avctx->channels);
2238
        } else {
2239
            avctx->request_channels = 2;
2240
        }
2241
    }
2242

    
2243
    codec = avcodec_find_decoder(avctx->codec_id);
2244
    avctx->debug_mv = debug_mv;
2245
    avctx->debug = debug;
2246
    avctx->workaround_bugs = workaround_bugs;
2247
    avctx->lowres = lowres;
2248
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2249
    avctx->idct_algo= idct;
2250
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2251
    avctx->skip_frame= skip_frame;
2252
    avctx->skip_idct= skip_idct;
2253
    avctx->skip_loop_filter= skip_loop_filter;
2254
    avctx->error_recognition= error_recognition;
2255
    avctx->error_concealment= error_concealment;
2256
    avcodec_thread_init(avctx, thread_count);
2257

    
2258
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2259

    
2260
    if (!codec ||
2261
        avcodec_open(avctx, codec) < 0)
2262
        return -1;
2263

    
2264
    /* prepare audio output */
2265
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2266
        wanted_spec.freq = avctx->sample_rate;
2267
        wanted_spec.format = AUDIO_S16SYS;
2268
        wanted_spec.channels = avctx->channels;
2269
        wanted_spec.silence = 0;
2270
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2271
        wanted_spec.callback = sdl_audio_callback;
2272
        wanted_spec.userdata = is;
2273
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2274
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2275
            return -1;
2276
        }
2277
        is->audio_hw_buf_size = spec.size;
2278
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2279
    }
2280

    
2281
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2282
    switch(avctx->codec_type) {
2283
    case AVMEDIA_TYPE_AUDIO:
2284
        is->audio_stream = stream_index;
2285
        is->audio_st = ic->streams[stream_index];
2286
        is->audio_buf_size = 0;
2287
        is->audio_buf_index = 0;
2288

    
2289
        /* init averaging filter */
2290
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2291
        is->audio_diff_avg_count = 0;
2292
        /* since we do not have a precise anough audio fifo fullness,
2293
           we correct audio sync only if larger than this threshold */
2294
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2295

    
2296
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2297
        packet_queue_init(&is->audioq);
2298
        SDL_PauseAudio(0);
2299
        break;
2300
    case AVMEDIA_TYPE_VIDEO:
2301
        is->video_stream = stream_index;
2302
        is->video_st = ic->streams[stream_index];
2303

    
2304
//        is->video_current_pts_time = av_gettime();
2305

    
2306
        packet_queue_init(&is->videoq);
2307
        is->video_tid = SDL_CreateThread(video_thread, is);
2308
        break;
2309
    case AVMEDIA_TYPE_SUBTITLE:
2310
        is->subtitle_stream = stream_index;
2311
        is->subtitle_st = ic->streams[stream_index];
2312
        packet_queue_init(&is->subtitleq);
2313

    
2314
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2315
        break;
2316
    default:
2317
        break;
2318
    }
2319
    return 0;
2320
}
2321

    
2322
static void stream_component_close(VideoState *is, int stream_index)
2323
{
2324
    AVFormatContext *ic = is->ic;
2325
    AVCodecContext *avctx;
2326

    
2327
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2328
        return;
2329
    avctx = ic->streams[stream_index]->codec;
2330

    
2331
    switch(avctx->codec_type) {
2332
    case AVMEDIA_TYPE_AUDIO:
2333
        packet_queue_abort(&is->audioq);
2334

    
2335
        SDL_CloseAudio();
2336

    
2337
        packet_queue_end(&is->audioq);
2338
        if (is->reformat_ctx)
2339
            av_audio_convert_free(is->reformat_ctx);
2340
        is->reformat_ctx = NULL;
2341
        break;
2342
    case AVMEDIA_TYPE_VIDEO:
2343
        packet_queue_abort(&is->videoq);
2344

    
2345
        /* note: we also signal this mutex to make sure we deblock the
2346
           video thread in all cases */
2347
        SDL_LockMutex(is->pictq_mutex);
2348
        SDL_CondSignal(is->pictq_cond);
2349
        SDL_UnlockMutex(is->pictq_mutex);
2350

    
2351
        SDL_WaitThread(is->video_tid, NULL);
2352

    
2353
        packet_queue_end(&is->videoq);
2354
        break;
2355
    case AVMEDIA_TYPE_SUBTITLE:
2356
        packet_queue_abort(&is->subtitleq);
2357

    
2358
        /* note: we also signal this mutex to make sure we deblock the
2359
           video thread in all cases */
2360
        SDL_LockMutex(is->subpq_mutex);
2361
        is->subtitle_stream_changed = 1;
2362

    
2363
        SDL_CondSignal(is->subpq_cond);
2364
        SDL_UnlockMutex(is->subpq_mutex);
2365

    
2366
        SDL_WaitThread(is->subtitle_tid, NULL);
2367

    
2368
        packet_queue_end(&is->subtitleq);
2369
        break;
2370
    default:
2371
        break;
2372
    }
2373

    
2374
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2375
    avcodec_close(avctx);
2376
    switch(avctx->codec_type) {
2377
    case AVMEDIA_TYPE_AUDIO:
2378
        is->audio_st = NULL;
2379
        is->audio_stream = -1;
2380
        break;
2381
    case AVMEDIA_TYPE_VIDEO:
2382
        is->video_st = NULL;
2383
        is->video_stream = -1;
2384
        break;
2385
    case AVMEDIA_TYPE_SUBTITLE:
2386
        is->subtitle_st = NULL;
2387
        is->subtitle_stream = -1;
2388
        break;
2389
    default:
2390
        break;
2391
    }
2392
}
2393

    
2394
/* since we have only one decoding thread, we can use a global
2395
   variable instead of a thread local variable */
2396
static VideoState *global_video_state;
2397

    
2398
static int decode_interrupt_cb(void)
2399
{
2400
    return (global_video_state && global_video_state->abort_request);
2401
}
2402

    
2403
/* this thread gets the stream from the disk or the network */
2404
static int decode_thread(void *arg)
2405
{
2406
    VideoState *is = arg;
2407
    AVFormatContext *ic;
2408
    int err, i, ret;
2409
    int st_index[AVMEDIA_TYPE_NB];
2410
    AVPacket pkt1, *pkt = &pkt1;
2411
    AVFormatParameters params, *ap = &params;
2412
    int eof=0;
2413
    int pkt_in_play_range = 0;
2414

    
2415
    ic = avformat_alloc_context();
2416

    
2417
    memset(st_index, -1, sizeof(st_index));
2418
    is->video_stream = -1;
2419
    is->audio_stream = -1;
2420
    is->subtitle_stream = -1;
2421

    
2422
    global_video_state = is;
2423
    url_set_interrupt_cb(decode_interrupt_cb);
2424

    
2425
    memset(ap, 0, sizeof(*ap));
2426

    
2427
    ap->prealloced_context = 1;
2428
    ap->width = frame_width;
2429
    ap->height= frame_height;
2430
    ap->time_base= (AVRational){1, 25};
2431
    ap->pix_fmt = frame_pix_fmt;
2432

    
2433
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2434

    
2435
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2436
    if (err < 0) {
2437
        print_error(is->filename, err);
2438
        ret = -1;
2439
        goto fail;
2440
    }
2441
    is->ic = ic;
2442

    
2443
    if(genpts)
2444
        ic->flags |= AVFMT_FLAG_GENPTS;
2445

    
2446
    err = av_find_stream_info(ic);
2447
    if (err < 0) {
2448
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2449
        ret = -1;
2450
        goto fail;
2451
    }
2452
    if(ic->pb)
2453
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2454

    
2455
    if(seek_by_bytes<0)
2456
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2457

    
2458
    /* if seeking requested, we execute it */
2459
    if (start_time != AV_NOPTS_VALUE) {
2460
        int64_t timestamp;
2461

    
2462
        timestamp = start_time;
2463
        /* add the stream start time */
2464
        if (ic->start_time != AV_NOPTS_VALUE)
2465
            timestamp += ic->start_time;
2466
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2467
        if (ret < 0) {
2468
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2469
                    is->filename, (double)timestamp / AV_TIME_BASE);
2470
        }
2471
    }
2472

    
2473
    for (i = 0; i < ic->nb_streams; i++)
2474
        ic->streams[i]->discard = AVDISCARD_ALL;
2475
    if (!video_disable)
2476
        st_index[AVMEDIA_TYPE_VIDEO] =
2477
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2478
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2479
    if (!audio_disable)
2480
        st_index[AVMEDIA_TYPE_AUDIO] =
2481
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2482
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2483
                                st_index[AVMEDIA_TYPE_VIDEO],
2484
                                NULL, 0);
2485
    if (!video_disable)
2486
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2487
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2488
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2489
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2490
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2491
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2492
                                NULL, 0);
2493
    if (show_status) {
2494
        dump_format(ic, 0, is->filename, 0);
2495
    }
2496

    
2497
    /* open the streams */
2498
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2499
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2500
    }
2501

    
2502
    ret=-1;
2503
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2504
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2505
    }
2506
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2507
    if(ret<0) {
2508
        if (!display_disable)
2509
            is->show_audio = 2;
2510
    }
2511

    
2512
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2513
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2514
    }
2515

    
2516
    if (is->video_stream < 0 && is->audio_stream < 0) {
2517
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2518
        ret = -1;
2519
        goto fail;
2520
    }
2521

    
2522
    for(;;) {
2523
        if (is->abort_request)
2524
            break;
2525
        if (is->paused != is->last_paused) {
2526
            is->last_paused = is->paused;
2527
            if (is->paused)
2528
                is->read_pause_return= av_read_pause(ic);
2529
            else
2530
                av_read_play(ic);
2531
        }
2532
#if CONFIG_RTSP_DEMUXER
2533
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2534
            /* wait 10 ms to avoid trying to get another packet */
2535
            /* XXX: horrible */
2536
            SDL_Delay(10);
2537
            continue;
2538
        }
2539
#endif
2540
        if (is->seek_req) {
2541
            int64_t seek_target= is->seek_pos;
2542
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2543
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2544
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2545
//      of the seek_pos/seek_rel variables
2546

    
2547
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2548
            if (ret < 0) {
2549
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2550
            }else{
2551
                if (is->audio_stream >= 0) {
2552
                    packet_queue_flush(&is->audioq);
2553
                    packet_queue_put(&is->audioq, &flush_pkt);
2554
                }
2555
                if (is->subtitle_stream >= 0) {
2556
                    packet_queue_flush(&is->subtitleq);
2557
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2558
                }
2559
                if (is->video_stream >= 0) {
2560
                    packet_queue_flush(&is->videoq);
2561
                    packet_queue_put(&is->videoq, &flush_pkt);
2562
                }
2563
            }
2564
            is->seek_req = 0;
2565
            eof= 0;
2566
        }
2567

    
2568
        /* if the queue are full, no need to read more */
2569
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2570
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2571
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2572
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2573
            /* wait 10 ms */
2574
            SDL_Delay(10);
2575
            continue;
2576
        }
2577
        if(eof) {
2578
            if(is->video_stream >= 0){
2579
                av_init_packet(pkt);
2580
                pkt->data=NULL;
2581
                pkt->size=0;
2582
                pkt->stream_index= is->video_stream;
2583
                packet_queue_put(&is->videoq, pkt);
2584
            }
2585
            SDL_Delay(10);
2586
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2587
                if(loop!=1 && (!loop || --loop)){
2588
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2589
                }else if(autoexit){
2590
                    ret=AVERROR_EOF;
2591
                    goto fail;
2592
                }
2593
            }
2594
            continue;
2595
        }
2596
        ret = av_read_frame(ic, pkt);
2597
        if (ret < 0) {
2598
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2599
                eof=1;
2600
            if (url_ferror(ic->pb))
2601
                break;
2602
            SDL_Delay(100); /* wait for user event */
2603
            continue;
2604
        }
2605
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2606
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2607
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2608
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2609
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2610
                <= ((double)duration/1000000);
2611
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2612
            packet_queue_put(&is->audioq, pkt);
2613
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2614
            packet_queue_put(&is->videoq, pkt);
2615
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2616
            packet_queue_put(&is->subtitleq, pkt);
2617
        } else {
2618
            av_free_packet(pkt);
2619
        }
2620
    }
2621
    /* wait until the end */
2622
    while (!is->abort_request) {
2623
        SDL_Delay(100);
2624
    }
2625

    
2626
    ret = 0;
2627
 fail:
2628
    /* disable interrupting */
2629
    global_video_state = NULL;
2630

    
2631
    /* close each stream */
2632
    if (is->audio_stream >= 0)
2633
        stream_component_close(is, is->audio_stream);
2634
    if (is->video_stream >= 0)
2635
        stream_component_close(is, is->video_stream);
2636
    if (is->subtitle_stream >= 0)
2637
        stream_component_close(is, is->subtitle_stream);
2638
    if (is->ic) {
2639
        av_close_input_file(is->ic);
2640
        is->ic = NULL; /* safety */
2641
    }
2642
    url_set_interrupt_cb(NULL);
2643

    
2644
    if (ret != 0) {
2645
        SDL_Event event;
2646

    
2647
        event.type = FF_QUIT_EVENT;
2648
        event.user.data1 = is;
2649
        SDL_PushEvent(&event);
2650
    }
2651
    return 0;
2652
}
2653

    
2654
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2655
{
2656
    VideoState *is;
2657

    
2658
    is = av_mallocz(sizeof(VideoState));
2659
    if (!is)
2660
        return NULL;
2661
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2662
    is->iformat = iformat;
2663
    is->ytop = 0;
2664
    is->xleft = 0;
2665

    
2666
    /* start video display */
2667
    is->pictq_mutex = SDL_CreateMutex();
2668
    is->pictq_cond = SDL_CreateCond();
2669

    
2670
    is->subpq_mutex = SDL_CreateMutex();
2671
    is->subpq_cond = SDL_CreateCond();
2672

    
2673
    is->av_sync_type = av_sync_type;
2674
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2675
    if (!is->parse_tid) {
2676
        av_free(is);
2677
        return NULL;
2678
    }
2679
    return is;
2680
}
2681

    
2682
static void stream_cycle_channel(VideoState *is, int codec_type)
2683
{
2684
    AVFormatContext *ic = is->ic;
2685
    int start_index, stream_index;
2686
    AVStream *st;
2687

    
2688
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2689
        start_index = is->video_stream;
2690
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2691
        start_index = is->audio_stream;
2692
    else
2693
        start_index = is->subtitle_stream;
2694
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2695
        return;
2696
    stream_index = start_index;
2697
    for(;;) {
2698
        if (++stream_index >= is->ic->nb_streams)
2699
        {
2700
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2701
            {
2702
                stream_index = -1;
2703
                goto the_end;
2704
            } else
2705
                stream_index = 0;
2706
        }
2707
        if (stream_index == start_index)
2708
            return;
2709
        st = ic->streams[stream_index];
2710
        if (st->codec->codec_type == codec_type) {
2711
            /* check that parameters are OK */
2712
            switch(codec_type) {
2713
            case AVMEDIA_TYPE_AUDIO:
2714
                if (st->codec->sample_rate != 0 &&
2715
                    st->codec->channels != 0)
2716
                    goto the_end;
2717
                break;
2718
            case AVMEDIA_TYPE_VIDEO:
2719
            case AVMEDIA_TYPE_SUBTITLE:
2720
                goto the_end;
2721
            default:
2722
                break;
2723
            }
2724
        }
2725
    }
2726
 the_end:
2727
    stream_component_close(is, start_index);
2728
    stream_component_open(is, stream_index);
2729
}
2730

    
2731

    
2732
static void toggle_full_screen(void)
2733
{
2734
    is_full_screen = !is_full_screen;
2735
    if (!fs_screen_width) {
2736
        /* use default SDL method */
2737
//        SDL_WM_ToggleFullScreen(screen);
2738
    }
2739
    video_open(cur_stream);
2740
}
2741

    
2742
static void toggle_pause(void)
2743
{
2744
    if (cur_stream)
2745
        stream_pause(cur_stream);
2746
    step = 0;
2747
}
2748

    
2749
static void step_to_next_frame(void)
2750
{
2751
    if (cur_stream) {
2752
        /* if the stream is paused unpause it, then step */
2753
        if (cur_stream->paused)
2754
            stream_pause(cur_stream);
2755
    }
2756
    step = 1;
2757
}
2758

    
2759
static void toggle_audio_display(void)
2760
{
2761
    if (cur_stream) {
2762
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2763
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2764
        fill_rectangle(screen,
2765
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2766
                    bgcolor);
2767
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2768
    }
2769
}
2770

    
2771
/* handle an event sent by the GUI */
2772
static void event_loop(void)
2773
{
2774
    SDL_Event event;
2775
    double incr, pos, frac;
2776

    
2777
    for(;;) {
2778
        double x;
2779
        SDL_WaitEvent(&event);
2780
        switch(event.type) {
2781
        case SDL_KEYDOWN:
2782
            if (exit_on_keydown) {
2783
                do_exit();
2784
                break;
2785
            }
2786
            switch(event.key.keysym.sym) {
2787
            case SDLK_ESCAPE:
2788
            case SDLK_q:
2789
                do_exit();
2790
                break;
2791
            case SDLK_f:
2792
                toggle_full_screen();
2793
                break;
2794
            case SDLK_p:
2795
            case SDLK_SPACE:
2796
                toggle_pause();
2797
                break;
2798
            case SDLK_s: //S: Step to next frame
2799
                step_to_next_frame();
2800
                break;
2801
            case SDLK_a:
2802
                if (cur_stream)
2803
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2804
                break;
2805
            case SDLK_v:
2806
                if (cur_stream)
2807
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2808
                break;
2809
            case SDLK_t:
2810
                if (cur_stream)
2811
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2812
                break;
2813
            case SDLK_w:
2814
                toggle_audio_display();
2815
                break;
2816
            case SDLK_LEFT:
2817
                incr = -10.0;
2818
                goto do_seek;
2819
            case SDLK_RIGHT:
2820
                incr = 10.0;
2821
                goto do_seek;
2822
            case SDLK_UP:
2823
                incr = 60.0;
2824
                goto do_seek;
2825
            case SDLK_DOWN:
2826
                incr = -60.0;
2827
            do_seek:
2828
                if (cur_stream) {
2829
                    if (seek_by_bytes) {
2830
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2831
                            pos= cur_stream->video_current_pos;
2832
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2833
                            pos= cur_stream->audio_pkt.pos;
2834
                        }else
2835
                            pos = url_ftell(cur_stream->ic->pb);
2836
                        if (cur_stream->ic->bit_rate)
2837
                            incr *= cur_stream->ic->bit_rate / 8.0;
2838
                        else
2839
                            incr *= 180000.0;
2840
                        pos += incr;
2841
                        stream_seek(cur_stream, pos, incr, 1);
2842
                    } else {
2843
                        pos = get_master_clock(cur_stream);
2844
                        pos += incr;
2845
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2846
                    }
2847
                }
2848
                break;
2849
            default:
2850
                break;
2851
            }
2852
            break;
2853
        case SDL_MOUSEBUTTONDOWN:
2854
            if (exit_on_mousedown) {
2855
                do_exit();
2856
                break;
2857
            }
2858
        case SDL_MOUSEMOTION:
2859
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2860
                x= event.button.x;
2861
            }else{
2862
                if(event.motion.state != SDL_PRESSED)
2863
                    break;
2864
                x= event.motion.x;
2865
            }
2866
            if (cur_stream) {
2867
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2868
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2869
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2870
                }else{
2871
                    int64_t ts;
2872
                    int ns, hh, mm, ss;
2873
                    int tns, thh, tmm, tss;
2874
                    tns = cur_stream->ic->duration/1000000LL;
2875
                    thh = tns/3600;
2876
                    tmm = (tns%3600)/60;
2877
                    tss = (tns%60);
2878
                    frac = x/cur_stream->width;
2879
                    ns = frac*tns;
2880
                    hh = ns/3600;
2881
                    mm = (ns%3600)/60;
2882
                    ss = (ns%60);
2883
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2884
                            hh, mm, ss, thh, tmm, tss);
2885
                    ts = frac*cur_stream->ic->duration;
2886
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2887
                        ts += cur_stream->ic->start_time;
2888
                    stream_seek(cur_stream, ts, 0, 0);
2889
                }
2890
            }
2891
            break;
2892
        case SDL_VIDEORESIZE:
2893
            if (cur_stream) {
2894
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2895
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2896
                screen_width = cur_stream->width = event.resize.w;
2897
                screen_height= cur_stream->height= event.resize.h;
2898
            }
2899
            break;
2900
        case SDL_QUIT:
2901
        case FF_QUIT_EVENT:
2902
            do_exit();
2903
            break;
2904
        case FF_ALLOC_EVENT:
2905
            video_open(event.user.data1);
2906
            alloc_picture(event.user.data1);
2907
            break;
2908
        case FF_REFRESH_EVENT:
2909
            video_refresh_timer(event.user.data1);
2910
            cur_stream->refresh=0;
2911
            break;
2912
        default:
2913
            break;
2914
        }
2915
    }
2916
}
2917

    
2918
static void opt_frame_size(const char *arg)
2919
{
2920
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2921
        fprintf(stderr, "Incorrect frame size\n");
2922
        exit(1);
2923
    }
2924
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2925
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2926
        exit(1);
2927
    }
2928
}
2929

    
2930
static int opt_width(const char *opt, const char *arg)
2931
{
2932
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2933
    return 0;
2934
}
2935

    
2936
static int opt_height(const char *opt, const char *arg)
2937
{
2938
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2939
    return 0;
2940
}
2941

    
2942
static void opt_format(const char *arg)
2943
{
2944
    file_iformat = av_find_input_format(arg);
2945
    if (!file_iformat) {
2946
        fprintf(stderr, "Unknown input format: %s\n", arg);
2947
        exit(1);
2948
    }
2949
}
2950

    
2951
static void opt_frame_pix_fmt(const char *arg)
2952
{
2953
    frame_pix_fmt = av_get_pix_fmt(arg);
2954
}
2955

    
2956
static int opt_sync(const char *opt, const char *arg)
2957
{
2958
    if (!strcmp(arg, "audio"))
2959
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2960
    else if (!strcmp(arg, "video"))
2961
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2962
    else if (!strcmp(arg, "ext"))
2963
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2964
    else {
2965
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2966
        exit(1);
2967
    }
2968
    return 0;
2969
}
2970

    
2971
static int opt_seek(const char *opt, const char *arg)
2972
{
2973
    start_time = parse_time_or_die(opt, arg, 1);
2974
    return 0;
2975
}
2976

    
2977
static int opt_duration(const char *opt, const char *arg)
2978
{
2979
    duration = parse_time_or_die(opt, arg, 1);
2980
    return 0;
2981
}
2982

    
2983
static int opt_debug(const char *opt, const char *arg)
2984
{
2985
    av_log_set_level(99);
2986
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2987
    return 0;
2988
}
2989

    
2990
static int opt_vismv(const char *opt, const char *arg)
2991
{
2992
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2993
    return 0;
2994
}
2995

    
2996
static int opt_thread_count(const char *opt, const char *arg)
2997
{
2998
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2999
#if !HAVE_THREADS
3000
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3001
#endif
3002
    return 0;
3003
}
3004

    
3005
static const OptionDef options[] = {
3006
#include "cmdutils_common_opts.h"
3007
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3008
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3009
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3010
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3011
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3012
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3013
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3014
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3015
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3016
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3017
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3018
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3019
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3020
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3021
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3022
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3023
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3024
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3025
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3026
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3027
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3028
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3029
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3030
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3031
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3032
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3033
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3034
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3035
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3036
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3037
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3038
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3039
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3040
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3041
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3042
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3043
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3044
#if CONFIG_AVFILTER
3045
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3046
#endif
3047
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3048
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3049
    { NULL, },
3050
};
3051

    
3052
static void show_usage(void)
3053
{
3054
    printf("Simple media player\n");
3055
    printf("usage: ffplay [options] input_file\n");
3056
    printf("\n");
3057
}
3058

    
3059
static void show_help(void)
3060
{
3061
    av_log_set_callback(log_callback_help);
3062
    show_usage();
3063
    show_help_options(options, "Main options:\n",
3064
                      OPT_EXPERT, 0);
3065
    show_help_options(options, "\nAdvanced options:\n",
3066
                      OPT_EXPERT, OPT_EXPERT);
3067
    printf("\n");
3068
    av_opt_show2(avcodec_opts[0], NULL,
3069
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3070
    printf("\n");
3071
    av_opt_show2(avformat_opts, NULL,
3072
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3073
#if !CONFIG_AVFILTER
3074
    printf("\n");
3075
    av_opt_show2(sws_opts, NULL,
3076
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3077
#endif
3078
    printf("\nWhile playing:\n"
3079
           "q, ESC              quit\n"
3080
           "f                   toggle full screen\n"
3081
           "p, SPC              pause\n"
3082
           "a                   cycle audio channel\n"
3083
           "v                   cycle video channel\n"
3084
           "t                   cycle subtitle channel\n"
3085
           "w                   show audio waves\n"
3086
           "s                   activate frame-step mode\n"
3087
           "left/right          seek backward/forward 10 seconds\n"
3088
           "down/up             seek backward/forward 1 minute\n"
3089
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3090
           );
3091
}
3092

    
3093
static void opt_input_file(const char *filename)
3094
{
3095
    if (input_filename) {
3096
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3097
                filename, input_filename);
3098
        exit(1);
3099
    }
3100
    if (!strcmp(filename, "-"))
3101
        filename = "pipe:";
3102
    input_filename = filename;
3103
}
3104

    
3105
/* Called from the main */
3106
int main(int argc, char **argv)
3107
{
3108
    int flags;
3109

    
3110
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3111

    
3112
    /* register all codecs, demux and protocols */
3113
    avcodec_register_all();
3114
#if CONFIG_AVDEVICE
3115
    avdevice_register_all();
3116
#endif
3117
#if CONFIG_AVFILTER
3118
    avfilter_register_all();
3119
#endif
3120
    av_register_all();
3121

    
3122
    init_opts();
3123

    
3124
    show_banner();
3125

    
3126
    parse_options(argc, argv, options, opt_input_file);
3127

    
3128
    if (!input_filename) {
3129
        show_usage();
3130
        fprintf(stderr, "An input file must be specified\n");
3131
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3132
        exit(1);
3133
    }
3134

    
3135
    if (display_disable) {
3136
        video_disable = 1;
3137
    }
3138
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3139
#if !defined(__MINGW32__) && !defined(__APPLE__)
3140
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3141
#endif
3142
    if (SDL_Init (flags)) {
3143
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3144
        exit(1);
3145
    }
3146

    
3147
    if (!display_disable) {
3148
#if HAVE_SDL_VIDEO_SIZE
3149
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3150
        fs_screen_width = vi->current_w;
3151
        fs_screen_height = vi->current_h;
3152
#endif
3153
    }
3154

    
3155
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3156
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3157
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3158

    
3159
    av_init_packet(&flush_pkt);
3160
    flush_pkt.data= "FLUSH";
3161

    
3162
    cur_stream = stream_open(input_filename, file_iformat);
3163

    
3164
    event_loop();
3165

    
3166
    /* never returns */
3167

    
3168
    return 0;
3169
}