Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ a6f395d6

History | View | Annotate | Download (97.9 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavutil/imgutils.h"
32
#include "libavutil/parseutils.h"
33
#include "libavutil/samplefmt.h"
34
#include "libavutil/avassert.h"
35
#include "libavformat/avformat.h"
36
#include "libavdevice/avdevice.h"
37
#include "libswscale/swscale.h"
38
#include "libavcodec/audioconvert.h"
39
#include "libavcodec/opt.h"
40
#include "libavcodec/avfft.h"
41

    
42
#if CONFIG_AVFILTER
43
# include "libavfilter/avfilter.h"
44
# include "libavfilter/avfiltergraph.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG
63
//#define DEBUG_SYNC
64

    
65
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67
#define MIN_FRAMES 5
68

    
69
/* SDL audio buffer size, in samples. Should be small to have precise
70
   A/V sync as SDL does not have hardware buffer fullness info. */
71
#define SDL_AUDIO_BUFFER_SIZE 1024
72

    
73
/* no AV sync correction is done if below the AV sync threshold */
74
#define AV_SYNC_THRESHOLD 0.01
75
/* no AV correction is done if too big error */
76
#define AV_NOSYNC_THRESHOLD 10.0
77

    
78
#define FRAME_SKIP_FACTOR 0.05
79

    
80
/* maximum audio speed change to get correct sync */
81
#define SAMPLE_CORRECTION_PERCENT_MAX 10
82

    
83
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84
#define AUDIO_DIFF_AVG_NB   20
85

    
86
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87
#define SAMPLE_ARRAY_SIZE (2*65536)
88

    
89
static int sws_flags = SWS_BICUBIC;
90

    
91
typedef struct PacketQueue {
92
    AVPacketList *first_pkt, *last_pkt;
93
    int nb_packets;
94
    int size;
95
    int abort_request;
96
    SDL_mutex *mutex;
97
    SDL_cond *cond;
98
} PacketQueue;
99

    
100
#define VIDEO_PICTURE_QUEUE_SIZE 2
101
#define SUBPICTURE_QUEUE_SIZE 4
102

    
103
typedef struct VideoPicture {
104
    double pts;                                  ///<presentation time stamp for this picture
105
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106
    int64_t pos;                                 ///<byte position in file
107
    SDL_Overlay *bmp;
108
    int width, height; /* source height & width */
109
    int allocated;
110
    enum PixelFormat pix_fmt;
111

    
112
#if CONFIG_AVFILTER
113
    AVFilterBufferRef *picref;
114
#endif
115
} VideoPicture;
116

    
117
typedef struct SubPicture {
118
    double pts; /* presentation time stamp for this picture */
119
    AVSubtitle sub;
120
} SubPicture;
121

    
122
enum {
123
    AV_SYNC_AUDIO_MASTER, /* default choice */
124
    AV_SYNC_VIDEO_MASTER,
125
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126
};
127

    
128
typedef struct VideoState {
129
    SDL_Thread *parse_tid;
130
    SDL_Thread *video_tid;
131
    SDL_Thread *refresh_tid;
132
    AVInputFormat *iformat;
133
    int no_background;
134
    int abort_request;
135
    int paused;
136
    int last_paused;
137
    int seek_req;
138
    int seek_flags;
139
    int64_t seek_pos;
140
    int64_t seek_rel;
141
    int read_pause_return;
142
    AVFormatContext *ic;
143

    
144
    int audio_stream;
145

    
146
    int av_sync_type;
147
    double external_clock; /* external clock base */
148
    int64_t external_clock_time;
149

    
150
    double audio_clock;
151
    double audio_diff_cum; /* used for AV difference average computation */
152
    double audio_diff_avg_coef;
153
    double audio_diff_threshold;
154
    int audio_diff_avg_count;
155
    AVStream *audio_st;
156
    PacketQueue audioq;
157
    int audio_hw_buf_size;
158
    /* samples output by the codec. we reserve more space for avsync
159
       compensation */
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    uint8_t *audio_buf;
163
    unsigned int audio_buf_size; /* in bytes */
164
    int audio_buf_index; /* in bytes */
165
    AVPacket audio_pkt_temp;
166
    AVPacket audio_pkt;
167
    enum AVSampleFormat audio_src_fmt;
168
    AVAudioConvert *reformat_ctx;
169

    
170
    int show_audio; /* if true, display audio samples */
171
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
172
    int sample_array_index;
173
    int last_i_start;
174
    RDFTContext *rdft;
175
    int rdft_bits;
176
    FFTSample *rdft_data;
177
    int xpos;
178

    
179
    SDL_Thread *subtitle_tid;
180
    int subtitle_stream;
181
    int subtitle_stream_changed;
182
    AVStream *subtitle_st;
183
    PacketQueue subtitleq;
184
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185
    int subpq_size, subpq_rindex, subpq_windex;
186
    SDL_mutex *subpq_mutex;
187
    SDL_cond *subpq_cond;
188

    
189
    double frame_timer;
190
    double frame_last_pts;
191
    double frame_last_delay;
192
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193
    int video_stream;
194
    AVStream *video_st;
195
    PacketQueue videoq;
196
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198
    int64_t video_current_pos;                   ///<current displayed file pos
199
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200
    int pictq_size, pictq_rindex, pictq_windex;
201
    SDL_mutex *pictq_mutex;
202
    SDL_cond *pictq_cond;
203
#if !CONFIG_AVFILTER
204
    struct SwsContext *img_convert_ctx;
205
#endif
206

    
207
    //    QETimer *video_timer;
208
    char filename[1024];
209
    int width, height, xleft, ytop;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int exit_on_keydown;
264
static int exit_on_mousedown;
265
static int loop=1;
266
static int framedrop=1;
267

    
268
static int rdftspeed=20;
269
#if CONFIG_AVFILTER
270
static char *vfilters = NULL;
271
#endif
272

    
273
/* current context */
274
static int is_full_screen;
275
static VideoState *cur_stream;
276
static int64_t audio_callback_time;
277

    
278
static AVPacket flush_pkt;
279

    
280
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
281
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283

    
284
static SDL_Surface *screen;
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
287
{
288
    AVPacketList *pkt1;
289

    
290
    /* duplicate the packet */
291
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
292
        return -1;
293

    
294
    pkt1 = av_malloc(sizeof(AVPacketList));
295
    if (!pkt1)
296
        return -1;
297
    pkt1->pkt = *pkt;
298
    pkt1->next = NULL;
299

    
300

    
301
    SDL_LockMutex(q->mutex);
302

    
303
    if (!q->last_pkt)
304

    
305
        q->first_pkt = pkt1;
306
    else
307
        q->last_pkt->next = pkt1;
308
    q->last_pkt = pkt1;
309
    q->nb_packets++;
310
    q->size += pkt1->pkt.size + sizeof(*pkt1);
311
    /* XXX: should duplicate packet data in DV case */
312
    SDL_CondSignal(q->cond);
313

    
314
    SDL_UnlockMutex(q->mutex);
315
    return 0;
316
}
317

    
318
/* packet queue handling */
319
static void packet_queue_init(PacketQueue *q)
320
{
321
    memset(q, 0, sizeof(PacketQueue));
322
    q->mutex = SDL_CreateMutex();
323
    q->cond = SDL_CreateCond();
324
    packet_queue_put(q, &flush_pkt);
325
}
326

    
327
static void packet_queue_flush(PacketQueue *q)
328
{
329
    AVPacketList *pkt, *pkt1;
330

    
331
    SDL_LockMutex(q->mutex);
332
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
333
        pkt1 = pkt->next;
334
        av_free_packet(&pkt->pkt);
335
        av_freep(&pkt);
336
    }
337
    q->last_pkt = NULL;
338
    q->first_pkt = NULL;
339
    q->nb_packets = 0;
340
    q->size = 0;
341
    SDL_UnlockMutex(q->mutex);
342
}
343

    
344
static void packet_queue_end(PacketQueue *q)
345
{
346
    packet_queue_flush(q);
347
    SDL_DestroyMutex(q->mutex);
348
    SDL_DestroyCond(q->cond);
349
}
350

    
351
static void packet_queue_abort(PacketQueue *q)
352
{
353
    SDL_LockMutex(q->mutex);
354

    
355
    q->abort_request = 1;
356

    
357
    SDL_CondSignal(q->cond);
358

    
359
    SDL_UnlockMutex(q->mutex);
360
}
361

    
362
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364
{
365
    AVPacketList *pkt1;
366
    int ret;
367

    
368
    SDL_LockMutex(q->mutex);
369

    
370
    for(;;) {
371
        if (q->abort_request) {
372
            ret = -1;
373
            break;
374
        }
375

    
376
        pkt1 = q->first_pkt;
377
        if (pkt1) {
378
            q->first_pkt = pkt1->next;
379
            if (!q->first_pkt)
380
                q->last_pkt = NULL;
381
            q->nb_packets--;
382
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
383
            *pkt = pkt1->pkt;
384
            av_free(pkt1);
385
            ret = 1;
386
            break;
387
        } else if (!block) {
388
            ret = 0;
389
            break;
390
        } else {
391
            SDL_CondWait(q->cond, q->mutex);
392
        }
393
    }
394
    SDL_UnlockMutex(q->mutex);
395
    return ret;
396
}
397

    
398
static inline void fill_rectangle(SDL_Surface *screen,
399
                                  int x, int y, int w, int h, int color)
400
{
401
    SDL_Rect rect;
402
    rect.x = x;
403
    rect.y = y;
404
    rect.w = w;
405
    rect.h = h;
406
    SDL_FillRect(screen, &rect, color);
407
}
408

    
409
#if 0
410
/* draw only the border of a rectangle */
411
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412
{
413
    int w1, w2, h1, h2;
414

415
    /* fill the background */
416
    w1 = x;
417
    if (w1 < 0)
418
        w1 = 0;
419
    w2 = s->width - (x + w);
420
    if (w2 < 0)
421
        w2 = 0;
422
    h1 = y;
423
    if (h1 < 0)
424
        h1 = 0;
425
    h2 = s->height - (y + h);
426
    if (h2 < 0)
427
        h2 = 0;
428
    fill_rectangle(screen,
429
                   s->xleft, s->ytop,
430
                   w1, s->height,
431
                   color);
432
    fill_rectangle(screen,
433
                   s->xleft + s->width - w2, s->ytop,
434
                   w2, s->height,
435
                   color);
436
    fill_rectangle(screen,
437
                   s->xleft + w1, s->ytop,
438
                   s->width - w1 - w2, h1,
439
                   color);
440
    fill_rectangle(screen,
441
                   s->xleft + w1, s->ytop + s->height - h2,
442
                   s->width - w1 - w2, h2,
443
                   color);
444
}
445
#endif
446

    
447
#define ALPHA_BLEND(a, oldp, newp, s)\
448
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449

    
450
#define RGBA_IN(r, g, b, a, s)\
451
{\
452
    unsigned int v = ((const uint32_t *)(s))[0];\
453
    a = (v >> 24) & 0xff;\
454
    r = (v >> 16) & 0xff;\
455
    g = (v >> 8) & 0xff;\
456
    b = v & 0xff;\
457
}
458

    
459
#define YUVA_IN(y, u, v, a, s, pal)\
460
{\
461
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462
    a = (val >> 24) & 0xff;\
463
    y = (val >> 16) & 0xff;\
464
    u = (val >> 8) & 0xff;\
465
    v = val & 0xff;\
466
}
467

    
468
#define YUVA_OUT(d, y, u, v, a)\
469
{\
470
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471
}
472

    
473

    
474
#define BPP 1
475

    
476
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477
{
478
    int wrap, wrap3, width2, skip2;
479
    int y, u, v, a, u1, v1, a1, w, h;
480
    uint8_t *lum, *cb, *cr;
481
    const uint8_t *p;
482
    const uint32_t *pal;
483
    int dstx, dsty, dstw, dsth;
484

    
485
    dstw = av_clip(rect->w, 0, imgw);
486
    dsth = av_clip(rect->h, 0, imgh);
487
    dstx = av_clip(rect->x, 0, imgw - dstw);
488
    dsty = av_clip(rect->y, 0, imgh - dsth);
489
    lum = dst->data[0] + dsty * dst->linesize[0];
490
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492

    
493
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494
    skip2 = dstx >> 1;
495
    wrap = dst->linesize[0];
496
    wrap3 = rect->pict.linesize[0];
497
    p = rect->pict.data[0];
498
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499

    
500
    if (dsty & 1) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510
            cb++;
511
            cr++;
512
            lum++;
513
            p += BPP;
514
        }
515
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 = u;
518
            v1 = v;
519
            a1 = a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521

    
522
            YUVA_IN(y, u, v, a, p + BPP, pal);
523
            u1 += u;
524
            v1 += v;
525
            a1 += a;
526
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529
            cb++;
530
            cr++;
531
            p += 2 * BPP;
532
            lum += 2;
533
        }
534
        if (w) {
535
            YUVA_IN(y, u, v, a, p, pal);
536
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539
            p++;
540
            lum++;
541
        }
542
        p += wrap3 - dstw * BPP;
543
        lum += wrap - dstw - dstx;
544
        cb += dst->linesize[1] - width2 - skip2;
545
        cr += dst->linesize[2] - width2 - skip2;
546
    }
547
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548
        lum += dstx;
549
        cb += skip2;
550
        cr += skip2;
551

    
552
        if (dstx & 1) {
553
            YUVA_IN(y, u, v, a, p, pal);
554
            u1 = u;
555
            v1 = v;
556
            a1 = a;
557
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558
            p += wrap3;
559
            lum += wrap;
560
            YUVA_IN(y, u, v, a, p, pal);
561
            u1 += u;
562
            v1 += v;
563
            a1 += a;
564
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567
            cb++;
568
            cr++;
569
            p += -wrap3 + BPP;
570
            lum += -wrap + 1;
571
        }
572
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573
            YUVA_IN(y, u, v, a, p, pal);
574
            u1 = u;
575
            v1 = v;
576
            a1 = a;
577
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578

    
579
            YUVA_IN(y, u, v, a, p + BPP, pal);
580
            u1 += u;
581
            v1 += v;
582
            a1 += a;
583
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584
            p += wrap3;
585
            lum += wrap;
586

    
587
            YUVA_IN(y, u, v, a, p, pal);
588
            u1 += u;
589
            v1 += v;
590
            a1 += a;
591
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592

    
593
            YUVA_IN(y, u, v, a, p + BPP, pal);
594
            u1 += u;
595
            v1 += v;
596
            a1 += a;
597
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598

    
599
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601

    
602
            cb++;
603
            cr++;
604
            p += -wrap3 + 2 * BPP;
605
            lum += -wrap + 2;
606
        }
607
        if (w) {
608
            YUVA_IN(y, u, v, a, p, pal);
609
            u1 = u;
610
            v1 = v;
611
            a1 = a;
612
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
            p += wrap3;
614
            lum += wrap;
615
            YUVA_IN(y, u, v, a, p, pal);
616
            u1 += u;
617
            v1 += v;
618
            a1 += a;
619
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622
            cb++;
623
            cr++;
624
            p += -wrap3 + BPP;
625
            lum += -wrap + 1;
626
        }
627
        p += wrap3 + (wrap3 - dstw * BPP);
628
        lum += wrap + (wrap - dstw - dstx);
629
        cb += dst->linesize[1] - width2 - skip2;
630
        cr += dst->linesize[2] - width2 - skip2;
631
    }
632
    /* handle odd height */
633
    if (h) {
634
        lum += dstx;
635
        cb += skip2;
636
        cr += skip2;
637

    
638
        if (dstx & 1) {
639
            YUVA_IN(y, u, v, a, p, pal);
640
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643
            cb++;
644
            cr++;
645
            lum++;
646
            p += BPP;
647
        }
648
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649
            YUVA_IN(y, u, v, a, p, pal);
650
            u1 = u;
651
            v1 = v;
652
            a1 = a;
653
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654

    
655
            YUVA_IN(y, u, v, a, p + BPP, pal);
656
            u1 += u;
657
            v1 += v;
658
            a1 += a;
659
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662
            cb++;
663
            cr++;
664
            p += 2 * BPP;
665
            lum += 2;
666
        }
667
        if (w) {
668
            YUVA_IN(y, u, v, a, p, pal);
669
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672
        }
673
    }
674
}
675

    
676
static void free_subpicture(SubPicture *sp)
677
{
678
    avsubtitle_free(&sp->sub);
679
}
680

    
681
static void video_image_display(VideoState *is)
682
{
683
    VideoPicture *vp;
684
    SubPicture *sp;
685
    AVPicture pict;
686
    float aspect_ratio;
687
    int width, height, x, y;
688
    SDL_Rect rect;
689
    int i;
690

    
691
    vp = &is->pictq[is->pictq_rindex];
692
    if (vp->bmp) {
693
#if CONFIG_AVFILTER
694
         if (vp->picref->video->pixel_aspect.num == 0)
695
             aspect_ratio = 0;
696
         else
697
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
698
#else
699

    
700
        /* XXX: use variable in the frame */
701
        if (is->video_st->sample_aspect_ratio.num)
702
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
703
        else if (is->video_st->codec->sample_aspect_ratio.num)
704
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
705
        else
706
            aspect_ratio = 0;
707
#endif
708
        if (aspect_ratio <= 0.0)
709
            aspect_ratio = 1.0;
710
        aspect_ratio *= (float)vp->width / (float)vp->height;
711

    
712
        if (is->subtitle_st) {
713
            if (is->subpq_size > 0) {
714
                sp = &is->subpq[is->subpq_rindex];
715

    
716
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
717
                    SDL_LockYUVOverlay (vp->bmp);
718

    
719
                    pict.data[0] = vp->bmp->pixels[0];
720
                    pict.data[1] = vp->bmp->pixels[2];
721
                    pict.data[2] = vp->bmp->pixels[1];
722

    
723
                    pict.linesize[0] = vp->bmp->pitches[0];
724
                    pict.linesize[1] = vp->bmp->pitches[2];
725
                    pict.linesize[2] = vp->bmp->pitches[1];
726

    
727
                    for (i = 0; i < sp->sub.num_rects; i++)
728
                        blend_subrect(&pict, sp->sub.rects[i],
729
                                      vp->bmp->w, vp->bmp->h);
730

    
731
                    SDL_UnlockYUVOverlay (vp->bmp);
732
                }
733
            }
734
        }
735

    
736

    
737
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
738
        height = is->height;
739
        width = ((int)rint(height * aspect_ratio)) & ~1;
740
        if (width > is->width) {
741
            width = is->width;
742
            height = ((int)rint(width / aspect_ratio)) & ~1;
743
        }
744
        x = (is->width - width) / 2;
745
        y = (is->height - height) / 2;
746
        if (!is->no_background) {
747
            /* fill the background */
748
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749
        } else {
750
            is->no_background = 0;
751
        }
752
        rect.x = is->xleft + x;
753
        rect.y = is->ytop  + y;
754
        rect.w = width;
755
        rect.h = height;
756
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
757
    } else {
758
#if 0
759
        fill_rectangle(screen,
760
                       is->xleft, is->ytop, is->width, is->height,
761
                       QERGB(0x00, 0x00, 0x00));
762
#endif
763
    }
764
}
765

    
766
static inline int compute_mod(int a, int b)
767
{
768
    return a < 0 ? a%b + b : a%b;
769
}
770

    
771
static void video_audio_display(VideoState *s)
772
{
773
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
774
    int ch, channels, h, h2, bgcolor, fgcolor;
775
    int16_t time_diff;
776
    int rdft_bits, nb_freq;
777

    
778
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
779
        ;
780
    nb_freq= 1<<(rdft_bits-1);
781

    
782
    /* compute display index : center on currently output samples */
783
    channels = s->audio_st->codec->channels;
784
    nb_display_channels = channels;
785
    if (!s->paused) {
786
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
787
        n = 2 * channels;
788
        delay = audio_write_get_buf_size(s);
789
        delay /= n;
790

    
791
        /* to be more precise, we take into account the time spent since
792
           the last buffer computation */
793
        if (audio_callback_time) {
794
            time_diff = av_gettime() - audio_callback_time;
795
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
796
        }
797

    
798
        delay += 2*data_used;
799
        if (delay < data_used)
800
            delay = data_used;
801

    
802
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
803
        if(s->show_audio==1){
804
            h= INT_MIN;
805
            for(i=0; i<1000; i+=channels){
806
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
807
                int a= s->sample_array[idx];
808
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
809
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
810
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
811
                int score= a-d;
812
                if(h<score && (b^c)<0){
813
                    h= score;
814
                    i_start= idx;
815
                }
816
            }
817
        }
818

    
819
        s->last_i_start = i_start;
820
    } else {
821
        i_start = s->last_i_start;
822
    }
823

    
824
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
825
    if(s->show_audio==1){
826
        fill_rectangle(screen,
827
                       s->xleft, s->ytop, s->width, s->height,
828
                       bgcolor);
829

    
830
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
831

    
832
        /* total height for one channel */
833
        h = s->height / nb_display_channels;
834
        /* graph height / 2 */
835
        h2 = (h * 9) / 20;
836
        for(ch = 0;ch < nb_display_channels; ch++) {
837
            i = i_start + ch;
838
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
839
            for(x = 0; x < s->width; x++) {
840
                y = (s->sample_array[i] * h2) >> 15;
841
                if (y < 0) {
842
                    y = -y;
843
                    ys = y1 - y;
844
                } else {
845
                    ys = y1;
846
                }
847
                fill_rectangle(screen,
848
                               s->xleft + x, ys, 1, y,
849
                               fgcolor);
850
                i += channels;
851
                if (i >= SAMPLE_ARRAY_SIZE)
852
                    i -= SAMPLE_ARRAY_SIZE;
853
            }
854
        }
855

    
856
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
857

    
858
        for(ch = 1;ch < nb_display_channels; ch++) {
859
            y = s->ytop + ch * h;
860
            fill_rectangle(screen,
861
                           s->xleft, y, s->width, 1,
862
                           fgcolor);
863
        }
864
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
865
    }else{
866
        nb_display_channels= FFMIN(nb_display_channels, 2);
867
        if(rdft_bits != s->rdft_bits){
868
            av_rdft_end(s->rdft);
869
            av_free(s->rdft_data);
870
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
871
            s->rdft_bits= rdft_bits;
872
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
873
        }
874
        {
875
            FFTSample *data[2];
876
            for(ch = 0;ch < nb_display_channels; ch++) {
877
                data[ch] = s->rdft_data + 2*nb_freq*ch;
878
                i = i_start + ch;
879
                for(x = 0; x < 2*nb_freq; x++) {
880
                    double w= (x-nb_freq)*(1.0/nb_freq);
881
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
882
                    i += channels;
883
                    if (i >= SAMPLE_ARRAY_SIZE)
884
                        i -= SAMPLE_ARRAY_SIZE;
885
                }
886
                av_rdft_calc(s->rdft, data[ch]);
887
            }
888
            //least efficient way to do this, we should of course directly access it but its more than fast enough
889
            for(y=0; y<s->height; y++){
890
                double w= 1/sqrt(nb_freq);
891
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
892
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
893
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
894
                a= FFMIN(a,255);
895
                b= FFMIN(b,255);
896
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
897

    
898
                fill_rectangle(screen,
899
                            s->xpos, s->height-y, 1, 1,
900
                            fgcolor);
901
            }
902
        }
903
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
904
        s->xpos++;
905
        if(s->xpos >= s->width)
906
            s->xpos= s->xleft;
907
    }
908
}
909

    
910
static int video_open(VideoState *is){
911
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912
    int w,h;
913

    
914
    if(is_full_screen) flags |= SDL_FULLSCREEN;
915
    else               flags |= SDL_RESIZABLE;
916

    
917
    if (is_full_screen && fs_screen_width) {
918
        w = fs_screen_width;
919
        h = fs_screen_height;
920
    } else if(!is_full_screen && screen_width){
921
        w = screen_width;
922
        h = screen_height;
923
#if CONFIG_AVFILTER
924
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925
        w = is->out_video_filter->inputs[0]->w;
926
        h = is->out_video_filter->inputs[0]->h;
927
#else
928
    }else if (is->video_st && is->video_st->codec->width){
929
        w = is->video_st->codec->width;
930
        h = is->video_st->codec->height;
931
#endif
932
    } else {
933
        w = 640;
934
        h = 480;
935
    }
936
    if(screen && is->width == screen->w && screen->w == w
937
       && is->height== screen->h && screen->h == h)
938
        return 0;
939

    
940
#ifndef __APPLE__
941
    screen = SDL_SetVideoMode(w, h, 0, flags);
942
#else
943
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944
    screen = SDL_SetVideoMode(w, h, 24, flags);
945
#endif
946
    if (!screen) {
947
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
948
        return -1;
949
    }
950
    if (!window_title)
951
        window_title = input_filename;
952
    SDL_WM_SetCaption(window_title, window_title);
953

    
954
    is->width = screen->w;
955
    is->height = screen->h;
956

    
957
    return 0;
958
}
959

    
960
/* display the current picture, if any */
961
static void video_display(VideoState *is)
962
{
963
    if(!screen)
964
        video_open(cur_stream);
965
    if (is->audio_st && is->show_audio)
966
        video_audio_display(is);
967
    else if (is->video_st)
968
        video_image_display(is);
969
}
970

    
971
static int refresh_thread(void *opaque)
972
{
973
    VideoState *is= opaque;
974
    while(!is->abort_request){
975
        SDL_Event event;
976
        event.type = FF_REFRESH_EVENT;
977
        event.user.data1 = opaque;
978
        if(!is->refresh){
979
            is->refresh=1;
980
            SDL_PushEvent(&event);
981
        }
982
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983
    }
984
    return 0;
985
}
986

    
987
/* get the current audio clock value */
988
static double get_audio_clock(VideoState *is)
989
{
990
    double pts;
991
    int hw_buf_size, bytes_per_sec;
992
    pts = is->audio_clock;
993
    hw_buf_size = audio_write_get_buf_size(is);
994
    bytes_per_sec = 0;
995
    if (is->audio_st) {
996
        bytes_per_sec = is->audio_st->codec->sample_rate *
997
            2 * is->audio_st->codec->channels;
998
    }
999
    if (bytes_per_sec)
1000
        pts -= (double)hw_buf_size / bytes_per_sec;
1001
    return pts;
1002
}
1003

    
1004
/* get the current video clock value */
1005
static double get_video_clock(VideoState *is)
1006
{
1007
    if (is->paused) {
1008
        return is->video_current_pts;
1009
    } else {
1010
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1011
    }
1012
}
1013

    
1014
/* get the current external clock value */
1015
static double get_external_clock(VideoState *is)
1016
{
1017
    int64_t ti;
1018
    ti = av_gettime();
1019
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1020
}
1021

    
1022
/* get the current master clock value */
1023
static double get_master_clock(VideoState *is)
1024
{
1025
    double val;
1026

    
1027
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1028
        if (is->video_st)
1029
            val = get_video_clock(is);
1030
        else
1031
            val = get_audio_clock(is);
1032
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1033
        if (is->audio_st)
1034
            val = get_audio_clock(is);
1035
        else
1036
            val = get_video_clock(is);
1037
    } else {
1038
        val = get_external_clock(is);
1039
    }
1040
    return val;
1041
}
1042

    
1043
/* seek in the stream */
1044
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1045
{
1046
    if (!is->seek_req) {
1047
        is->seek_pos = pos;
1048
        is->seek_rel = rel;
1049
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1050
        if (seek_by_bytes)
1051
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1052
        is->seek_req = 1;
1053
    }
1054
}
1055

    
1056
/* pause or resume the video */
1057
static void stream_pause(VideoState *is)
1058
{
1059
    if (is->paused) {
1060
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1061
        if(is->read_pause_return != AVERROR(ENOSYS)){
1062
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1063
        }
1064
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1065
    }
1066
    is->paused = !is->paused;
1067
}
1068

    
1069
static double compute_target_time(double frame_current_pts, VideoState *is)
1070
{
1071
    double delay, sync_threshold, diff;
1072

    
1073
    /* compute nominal delay */
1074
    delay = frame_current_pts - is->frame_last_pts;
1075
    if (delay <= 0 || delay >= 10.0) {
1076
        /* if incorrect delay, use previous one */
1077
        delay = is->frame_last_delay;
1078
    } else {
1079
        is->frame_last_delay = delay;
1080
    }
1081
    is->frame_last_pts = frame_current_pts;
1082

    
1083
    /* update delay to follow master synchronisation source */
1084
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1085
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1086
        /* if video is slave, we try to correct big delays by
1087
           duplicating or deleting a frame */
1088
        diff = get_video_clock(is) - get_master_clock(is);
1089

    
1090
        /* skip or repeat frame. We take into account the
1091
           delay to compute the threshold. I still don't know
1092
           if it is the best guess */
1093
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1094
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1095
            if (diff <= -sync_threshold)
1096
                delay = 0;
1097
            else if (diff >= sync_threshold)
1098
                delay = 2 * delay;
1099
        }
1100
    }
1101
    is->frame_timer += delay;
1102
#if defined(DEBUG_SYNC)
1103
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1104
            delay, actual_delay, frame_current_pts, -diff);
1105
#endif
1106

    
1107
    return is->frame_timer;
1108
}
1109

    
1110
/* called to display each frame */
1111
static void video_refresh_timer(void *opaque)
1112
{
1113
    VideoState *is = opaque;
1114
    VideoPicture *vp;
1115

    
1116
    SubPicture *sp, *sp2;
1117

    
1118
    if (is->video_st) {
1119
retry:
1120
        if (is->pictq_size == 0) {
1121
            //nothing to do, no picture to display in the que
1122
        } else {
1123
            double time= av_gettime()/1000000.0;
1124
            double next_target;
1125
            /* dequeue the picture */
1126
            vp = &is->pictq[is->pictq_rindex];
1127

    
1128
            if(time < vp->target_clock)
1129
                return;
1130
            /* update current video pts */
1131
            is->video_current_pts = vp->pts;
1132
            is->video_current_pts_drift = is->video_current_pts - time;
1133
            is->video_current_pos = vp->pos;
1134
            if(is->pictq_size > 1){
1135
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1136
                assert(nextvp->target_clock >= vp->target_clock);
1137
                next_target= nextvp->target_clock;
1138
            }else{
1139
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1140
            }
1141
            if(framedrop && time > next_target){
1142
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1143
                if(is->pictq_size > 1 || time > next_target + 0.5){
1144
                    /* update queue size and signal for next picture */
1145
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1146
                        is->pictq_rindex = 0;
1147

    
1148
                    SDL_LockMutex(is->pictq_mutex);
1149
                    is->pictq_size--;
1150
                    SDL_CondSignal(is->pictq_cond);
1151
                    SDL_UnlockMutex(is->pictq_mutex);
1152
                    goto retry;
1153
                }
1154
            }
1155

    
1156
            if(is->subtitle_st) {
1157
                if (is->subtitle_stream_changed) {
1158
                    SDL_LockMutex(is->subpq_mutex);
1159

    
1160
                    while (is->subpq_size) {
1161
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1162

    
1163
                        /* update queue size and signal for next picture */
1164
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1165
                            is->subpq_rindex = 0;
1166

    
1167
                        is->subpq_size--;
1168
                    }
1169
                    is->subtitle_stream_changed = 0;
1170

    
1171
                    SDL_CondSignal(is->subpq_cond);
1172
                    SDL_UnlockMutex(is->subpq_mutex);
1173
                } else {
1174
                    if (is->subpq_size > 0) {
1175
                        sp = &is->subpq[is->subpq_rindex];
1176

    
1177
                        if (is->subpq_size > 1)
1178
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1179
                        else
1180
                            sp2 = NULL;
1181

    
1182
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1183
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1184
                        {
1185
                            free_subpicture(sp);
1186

    
1187
                            /* update queue size and signal for next picture */
1188
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1189
                                is->subpq_rindex = 0;
1190

    
1191
                            SDL_LockMutex(is->subpq_mutex);
1192
                            is->subpq_size--;
1193
                            SDL_CondSignal(is->subpq_cond);
1194
                            SDL_UnlockMutex(is->subpq_mutex);
1195
                        }
1196
                    }
1197
                }
1198
            }
1199

    
1200
            /* display picture */
1201
            if (!display_disable)
1202
                video_display(is);
1203

    
1204
            /* update queue size and signal for next picture */
1205
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1206
                is->pictq_rindex = 0;
1207

    
1208
            SDL_LockMutex(is->pictq_mutex);
1209
            is->pictq_size--;
1210
            SDL_CondSignal(is->pictq_cond);
1211
            SDL_UnlockMutex(is->pictq_mutex);
1212
        }
1213
    } else if (is->audio_st) {
1214
        /* draw the next audio frame */
1215

    
1216
        /* if only audio stream, then display the audio bars (better
1217
           than nothing, just to test the implementation */
1218

    
1219
        /* display picture */
1220
        if (!display_disable)
1221
            video_display(is);
1222
    }
1223
    if (show_status) {
1224
        static int64_t last_time;
1225
        int64_t cur_time;
1226
        int aqsize, vqsize, sqsize;
1227
        double av_diff;
1228

    
1229
        cur_time = av_gettime();
1230
        if (!last_time || (cur_time - last_time) >= 30000) {
1231
            aqsize = 0;
1232
            vqsize = 0;
1233
            sqsize = 0;
1234
            if (is->audio_st)
1235
                aqsize = is->audioq.size;
1236
            if (is->video_st)
1237
                vqsize = is->videoq.size;
1238
            if (is->subtitle_st)
1239
                sqsize = is->subtitleq.size;
1240
            av_diff = 0;
1241
            if (is->audio_st && is->video_st)
1242
                av_diff = get_audio_clock(is) - get_video_clock(is);
1243
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1244
                   get_master_clock(is),
1245
                   av_diff,
1246
                   FFMAX(is->skip_frames-1, 0),
1247
                   aqsize / 1024,
1248
                   vqsize / 1024,
1249
                   sqsize,
1250
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1251
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1252
            fflush(stdout);
1253
            last_time = cur_time;
1254
        }
1255
    }
1256
}
1257

    
1258
static void stream_close(VideoState *is)
1259
{
1260
    VideoPicture *vp;
1261
    int i;
1262
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1263
    is->abort_request = 1;
1264
    SDL_WaitThread(is->parse_tid, NULL);
1265
    SDL_WaitThread(is->refresh_tid, NULL);
1266

    
1267
    /* free all pictures */
1268
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1269
        vp = &is->pictq[i];
1270
#if CONFIG_AVFILTER
1271
        if (vp->picref) {
1272
            avfilter_unref_buffer(vp->picref);
1273
            vp->picref = NULL;
1274
        }
1275
#endif
1276
        if (vp->bmp) {
1277
            SDL_FreeYUVOverlay(vp->bmp);
1278
            vp->bmp = NULL;
1279
        }
1280
    }
1281
    SDL_DestroyMutex(is->pictq_mutex);
1282
    SDL_DestroyCond(is->pictq_cond);
1283
    SDL_DestroyMutex(is->subpq_mutex);
1284
    SDL_DestroyCond(is->subpq_cond);
1285
#if !CONFIG_AVFILTER
1286
    if (is->img_convert_ctx)
1287
        sws_freeContext(is->img_convert_ctx);
1288
#endif
1289
    av_free(is);
1290
}
1291

    
1292
static void do_exit(void)
1293
{
1294
    if (cur_stream) {
1295
        stream_close(cur_stream);
1296
        cur_stream = NULL;
1297
    }
1298
    uninit_opts();
1299
#if CONFIG_AVFILTER
1300
    avfilter_uninit();
1301
#endif
1302
    if (show_status)
1303
        printf("\n");
1304
    SDL_Quit();
1305
    av_log(NULL, AV_LOG_QUIET, "");
1306
    exit(0);
1307
}
1308

    
1309
/* allocate a picture (needs to do that in main thread to avoid
1310
   potential locking problems */
1311
static void alloc_picture(void *opaque)
1312
{
1313
    VideoState *is = opaque;
1314
    VideoPicture *vp;
1315

    
1316
    vp = &is->pictq[is->pictq_windex];
1317

    
1318
    if (vp->bmp)
1319
        SDL_FreeYUVOverlay(vp->bmp);
1320

    
1321
#if CONFIG_AVFILTER
1322
    if (vp->picref)
1323
        avfilter_unref_buffer(vp->picref);
1324
    vp->picref = NULL;
1325

    
1326
    vp->width   = is->out_video_filter->inputs[0]->w;
1327
    vp->height  = is->out_video_filter->inputs[0]->h;
1328
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1329
#else
1330
    vp->width   = is->video_st->codec->width;
1331
    vp->height  = is->video_st->codec->height;
1332
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1333
#endif
1334

    
1335
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1336
                                   SDL_YV12_OVERLAY,
1337
                                   screen);
1338
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1339
        /* SDL allocates a buffer smaller than requested if the video
1340
         * overlay hardware is unable to support the requested size. */
1341
        fprintf(stderr, "Error: the video system does not support an image\n"
1342
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1343
                        "to reduce the image size.\n", vp->width, vp->height );
1344
        do_exit();
1345
    }
1346

    
1347
    SDL_LockMutex(is->pictq_mutex);
1348
    vp->allocated = 1;
1349
    SDL_CondSignal(is->pictq_cond);
1350
    SDL_UnlockMutex(is->pictq_mutex);
1351
}
1352

    
1353
/**
1354
 *
1355
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1356
 */
1357
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1358
{
1359
    VideoPicture *vp;
1360

    
1361
    /* wait until we have space to put a new picture */
1362
    SDL_LockMutex(is->pictq_mutex);
1363

    
1364
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1365
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1366

    
1367
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1368
           !is->videoq.abort_request) {
1369
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1370
    }
1371
    SDL_UnlockMutex(is->pictq_mutex);
1372

    
1373
    if (is->videoq.abort_request)
1374
        return -1;
1375

    
1376
    vp = &is->pictq[is->pictq_windex];
1377

    
1378
    /* alloc or resize hardware picture buffer */
1379
    if (!vp->bmp ||
1380
#if CONFIG_AVFILTER
1381
        vp->width  != is->out_video_filter->inputs[0]->w ||
1382
        vp->height != is->out_video_filter->inputs[0]->h) {
1383
#else
1384
        vp->width != is->video_st->codec->width ||
1385
        vp->height != is->video_st->codec->height) {
1386
#endif
1387
        SDL_Event event;
1388

    
1389
        vp->allocated = 0;
1390

    
1391
        /* the allocation must be done in the main thread to avoid
1392
           locking problems */
1393
        event.type = FF_ALLOC_EVENT;
1394
        event.user.data1 = is;
1395
        SDL_PushEvent(&event);
1396

    
1397
        /* wait until the picture is allocated */
1398
        SDL_LockMutex(is->pictq_mutex);
1399
        while (!vp->allocated && !is->videoq.abort_request) {
1400
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1401
        }
1402
        SDL_UnlockMutex(is->pictq_mutex);
1403

    
1404
        if (is->videoq.abort_request)
1405
            return -1;
1406
    }
1407

    
1408
    /* if the frame is not skipped, then display it */
1409
    if (vp->bmp) {
1410
        AVPicture pict;
1411
#if CONFIG_AVFILTER
1412
        if(vp->picref)
1413
            avfilter_unref_buffer(vp->picref);
1414
        vp->picref = src_frame->opaque;
1415
#endif
1416

    
1417
        /* get a pointer on the bitmap */
1418
        SDL_LockYUVOverlay (vp->bmp);
1419

    
1420
        memset(&pict,0,sizeof(AVPicture));
1421
        pict.data[0] = vp->bmp->pixels[0];
1422
        pict.data[1] = vp->bmp->pixels[2];
1423
        pict.data[2] = vp->bmp->pixels[1];
1424

    
1425
        pict.linesize[0] = vp->bmp->pitches[0];
1426
        pict.linesize[1] = vp->bmp->pitches[2];
1427
        pict.linesize[2] = vp->bmp->pitches[1];
1428

    
1429
#if CONFIG_AVFILTER
1430
        //FIXME use direct rendering
1431
        av_picture_copy(&pict, (AVPicture *)src_frame,
1432
                        vp->pix_fmt, vp->width, vp->height);
1433
#else
1434
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1435
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1436
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1437
            PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1438
        if (is->img_convert_ctx == NULL) {
1439
            fprintf(stderr, "Cannot initialize the conversion context\n");
1440
            exit(1);
1441
        }
1442
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1443
                  0, vp->height, pict.data, pict.linesize);
1444
#endif
1445
        /* update the bitmap content */
1446
        SDL_UnlockYUVOverlay(vp->bmp);
1447

    
1448
        vp->pts = pts;
1449
        vp->pos = pos;
1450

    
1451
        /* now we can update the picture count */
1452
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1453
            is->pictq_windex = 0;
1454
        SDL_LockMutex(is->pictq_mutex);
1455
        vp->target_clock= compute_target_time(vp->pts, is);
1456

    
1457
        is->pictq_size++;
1458
        SDL_UnlockMutex(is->pictq_mutex);
1459
    }
1460
    return 0;
1461
}
1462

    
1463
/**
1464
 * compute the exact PTS for the picture if it is omitted in the stream
1465
 * @param pts1 the dts of the pkt / pts of the frame
1466
 */
1467
static int output_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1468
{
1469
    double frame_delay, pts;
1470

    
1471
    pts = pts1;
1472

    
1473
    if (pts != 0) {
1474
        /* update video clock with pts, if present */
1475
        is->video_clock = pts;
1476
    } else {
1477
        pts = is->video_clock;
1478
    }
1479
    /* update video clock for next frame */
1480
    frame_delay = av_q2d(is->video_st->codec->time_base);
1481
    /* for MPEG2, the frame can be repeated, so we update the
1482
       clock accordingly */
1483
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1484
    is->video_clock += frame_delay;
1485

    
1486
#if defined(DEBUG_SYNC) && 0
1487
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1488
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1489
#endif
1490
    return queue_picture(is, src_frame, pts, pos);
1491
}
1492

    
1493
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1494
{
1495
    int len1, got_picture, i;
1496

    
1497
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1498
        return -1;
1499

    
1500
    if (pkt->data == flush_pkt.data) {
1501
        avcodec_flush_buffers(is->video_st->codec);
1502

    
1503
        SDL_LockMutex(is->pictq_mutex);
1504
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1505
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1506
            is->pictq[i].target_clock= 0;
1507
        }
1508
        while (is->pictq_size && !is->videoq.abort_request) {
1509
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1510
        }
1511
        is->video_current_pos = -1;
1512
        SDL_UnlockMutex(is->pictq_mutex);
1513

    
1514
        is->frame_last_pts = AV_NOPTS_VALUE;
1515
        is->frame_last_delay = 0;
1516
        is->frame_timer = (double)av_gettime() / 1000000.0;
1517
        is->skip_frames = 1;
1518
        is->skip_frames_index = 0;
1519
        return 0;
1520
    }
1521

    
1522
    len1 = avcodec_decode_video2(is->video_st->codec,
1523
                                 frame, &got_picture,
1524
                                 pkt);
1525

    
1526
    if (got_picture) {
1527
        if (decoder_reorder_pts == -1) {
1528
            *pts = frame->best_effort_timestamp;
1529
        } else if (decoder_reorder_pts) {
1530
            *pts = frame->pkt_pts;
1531
        } else {
1532
            *pts = frame->pkt_dts;
1533
        }
1534

    
1535
        if (*pts == AV_NOPTS_VALUE) {
1536
            *pts = 0;
1537
        }
1538

    
1539
        is->skip_frames_index += 1;
1540
        if(is->skip_frames_index >= is->skip_frames){
1541
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1542
            return 1;
1543
        }
1544

    
1545
    }
1546
    return 0;
1547
}
1548

    
1549
#if CONFIG_AVFILTER
1550
typedef struct {
1551
    VideoState *is;
1552
    AVFrame *frame;
1553
    int use_dr1;
1554
} FilterPriv;
1555

    
1556
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1557
{
1558
    AVFilterContext *ctx = codec->opaque;
1559
    AVFilterBufferRef  *ref;
1560
    int perms = AV_PERM_WRITE;
1561
    int i, w, h, stride[4];
1562
    unsigned edge;
1563
    int pixel_size;
1564

    
1565
    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1566

    
1567
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1568
        perms |= AV_PERM_NEG_LINESIZES;
1569

    
1570
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1571
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1572
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1573
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1574
    }
1575
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1576

    
1577
    w = codec->width;
1578
    h = codec->height;
1579

    
1580
    if(av_image_check_size(w, h, 0, codec))
1581
        return -1;
1582

    
1583
    avcodec_align_dimensions2(codec, &w, &h, stride);
1584
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1585
    w += edge << 1;
1586
    h += edge << 1;
1587

    
1588
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1589
        return -1;
1590

    
1591
    pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1592
    ref->video->w = codec->width;
1593
    ref->video->h = codec->height;
1594
    for(i = 0; i < 4; i ++) {
1595
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1596
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1597

    
1598
        if (ref->data[i]) {
1599
            ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1600
        }
1601
        pic->data[i]     = ref->data[i];
1602
        pic->linesize[i] = ref->linesize[i];
1603
    }
1604
    pic->opaque = ref;
1605
    pic->age    = INT_MAX;
1606
    pic->type   = FF_BUFFER_TYPE_USER;
1607
    pic->reordered_opaque = codec->reordered_opaque;
1608
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1609
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1610
    return 0;
1611
}
1612

    
1613
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1614
{
1615
    memset(pic->data, 0, sizeof(pic->data));
1616
    avfilter_unref_buffer(pic->opaque);
1617
}
1618

    
1619
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1620
{
1621
    AVFilterBufferRef *ref = pic->opaque;
1622

    
1623
    if (pic->data[0] == NULL) {
1624
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1625
        return codec->get_buffer(codec, pic);
1626
    }
1627

    
1628
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1629
        (codec->pix_fmt != ref->format)) {
1630
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1631
        return -1;
1632
    }
1633

    
1634
    pic->reordered_opaque = codec->reordered_opaque;
1635
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1636
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1637
    return 0;
1638
}
1639

    
1640
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1641
{
1642
    FilterPriv *priv = ctx->priv;
1643
    AVCodecContext *codec;
1644
    if(!opaque) return -1;
1645

    
1646
    priv->is = opaque;
1647
    codec    = priv->is->video_st->codec;
1648
    codec->opaque = ctx;
1649
    if((codec->codec->capabilities & CODEC_CAP_DR1)
1650
    ) {
1651
        codec->flags |= CODEC_FLAG_EMU_EDGE;
1652
        priv->use_dr1 = 1;
1653
        codec->get_buffer     = input_get_buffer;
1654
        codec->release_buffer = input_release_buffer;
1655
        codec->reget_buffer   = input_reget_buffer;
1656
        codec->thread_safe_callbacks = 1;
1657
    }
1658

    
1659
    priv->frame = avcodec_alloc_frame();
1660

    
1661
    return 0;
1662
}
1663

    
1664
static void input_uninit(AVFilterContext *ctx)
1665
{
1666
    FilterPriv *priv = ctx->priv;
1667
    av_free(priv->frame);
1668
}
1669

    
1670
static int input_request_frame(AVFilterLink *link)
1671
{
1672
    FilterPriv *priv = link->src->priv;
1673
    AVFilterBufferRef *picref;
1674
    int64_t pts = 0;
1675
    AVPacket pkt;
1676
    int ret;
1677

    
1678
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1679
        av_free_packet(&pkt);
1680
    if (ret < 0)
1681
        return -1;
1682

    
1683
    if(priv->use_dr1) {
1684
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1685
    } else {
1686
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1687
        av_image_copy(picref->data, picref->linesize,
1688
                      priv->frame->data, priv->frame->linesize,
1689
                      picref->format, link->w, link->h);
1690
    }
1691
    av_free_packet(&pkt);
1692

    
1693
    picref->pts = pts;
1694
    picref->pos = pkt.pos;
1695
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1696
    avfilter_start_frame(link, picref);
1697
    avfilter_draw_slice(link, 0, link->h, 1);
1698
    avfilter_end_frame(link);
1699

    
1700
    return 0;
1701
}
1702

    
1703
static int input_query_formats(AVFilterContext *ctx)
1704
{
1705
    FilterPriv *priv = ctx->priv;
1706
    enum PixelFormat pix_fmts[] = {
1707
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1708
    };
1709

    
1710
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1711
    return 0;
1712
}
1713

    
1714
static int input_config_props(AVFilterLink *link)
1715
{
1716
    FilterPriv *priv  = link->src->priv;
1717
    AVCodecContext *c = priv->is->video_st->codec;
1718

    
1719
    link->w = c->width;
1720
    link->h = c->height;
1721
    link->time_base = priv->is->video_st->time_base;
1722

    
1723
    return 0;
1724
}
1725

    
1726
static AVFilter input_filter =
1727
{
1728
    .name      = "ffplay_input",
1729

    
1730
    .priv_size = sizeof(FilterPriv),
1731

    
1732
    .init      = input_init,
1733
    .uninit    = input_uninit,
1734

    
1735
    .query_formats = input_query_formats,
1736

    
1737
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1738
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1739
                                    .type = AVMEDIA_TYPE_VIDEO,
1740
                                    .request_frame = input_request_frame,
1741
                                    .config_props  = input_config_props, },
1742
                                  { .name = NULL }},
1743
};
1744

    
1745
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1746
{
1747
    char sws_flags_str[128];
1748
    int ret;
1749
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1750
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1751
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1752
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1753

    
1754
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1755
                                            NULL, is, graph)) < 0)
1756
        goto the_end;
1757
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1758
                                            NULL, &ffsink_ctx, graph)) < 0)
1759
        goto the_end;
1760

    
1761
    if(vfilters) {
1762
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1763
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1764

    
1765
        outputs->name    = av_strdup("in");
1766
        outputs->filter_ctx = filt_src;
1767
        outputs->pad_idx = 0;
1768
        outputs->next    = NULL;
1769

    
1770
        inputs->name    = av_strdup("out");
1771
        inputs->filter_ctx = filt_out;
1772
        inputs->pad_idx = 0;
1773
        inputs->next    = NULL;
1774

    
1775
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1776
            goto the_end;
1777
        av_freep(&vfilters);
1778
    } else {
1779
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1780
            goto the_end;
1781
    }
1782

    
1783
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1784
        goto the_end;
1785

    
1786
    is->out_video_filter = filt_out;
1787
the_end:
1788
    return ret;
1789
}
1790

    
1791
#endif  /* CONFIG_AVFILTER */
1792

    
1793
static int video_thread(void *arg)
1794
{
1795
    VideoState *is = arg;
1796
    AVFrame *frame= avcodec_alloc_frame();
1797
    int64_t pts_int;
1798
    double pts;
1799
    int ret;
1800

    
1801
#if CONFIG_AVFILTER
1802
    AVFilterGraph *graph = avfilter_graph_alloc();
1803
    AVFilterContext *filt_out = NULL;
1804
    int64_t pos;
1805

    
1806
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1807
        goto the_end;
1808
    filt_out = is->out_video_filter;
1809
#endif
1810

    
1811
    for(;;) {
1812
#if !CONFIG_AVFILTER
1813
        AVPacket pkt;
1814
#else
1815
        AVFilterBufferRef *picref;
1816
        AVRational tb;
1817
#endif
1818
        while (is->paused && !is->videoq.abort_request)
1819
            SDL_Delay(10);
1820
#if CONFIG_AVFILTER
1821
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1822
        if (picref) {
1823
            pts_int = picref->pts;
1824
            pos     = picref->pos;
1825
            frame->opaque = picref;
1826
        }
1827

    
1828
        if (av_cmp_q(tb, is->video_st->time_base)) {
1829
            av_unused int64_t pts1 = pts_int;
1830
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1831
            av_dlog(NULL, "video_thread(): "
1832
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1833
                    tb.num, tb.den, pts1,
1834
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1835
        }
1836
#else
1837
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1838
#endif
1839

    
1840
        if (ret < 0) goto the_end;
1841

    
1842
        if (!ret)
1843
            continue;
1844

    
1845
        pts = pts_int*av_q2d(is->video_st->time_base);
1846

    
1847
#if CONFIG_AVFILTER
1848
        ret = output_picture(is, frame, pts, pos);
1849
#else
1850
        ret = output_picture(is, frame, pts,  pkt.pos);
1851
        av_free_packet(&pkt);
1852
#endif
1853
        if (ret < 0)
1854
            goto the_end;
1855

    
1856
        if (step)
1857
            if (cur_stream)
1858
                stream_pause(cur_stream);
1859
    }
1860
 the_end:
1861
#if CONFIG_AVFILTER
1862
    avfilter_graph_free(&graph);
1863
#endif
1864
    av_free(frame);
1865
    return 0;
1866
}
1867

    
1868
static int subtitle_thread(void *arg)
1869
{
1870
    VideoState *is = arg;
1871
    SubPicture *sp;
1872
    AVPacket pkt1, *pkt = &pkt1;
1873
    int len1, got_subtitle;
1874
    double pts;
1875
    int i, j;
1876
    int r, g, b, y, u, v, a;
1877

    
1878
    for(;;) {
1879
        while (is->paused && !is->subtitleq.abort_request) {
1880
            SDL_Delay(10);
1881
        }
1882
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1883
            break;
1884

    
1885
        if(pkt->data == flush_pkt.data){
1886
            avcodec_flush_buffers(is->subtitle_st->codec);
1887
            continue;
1888
        }
1889
        SDL_LockMutex(is->subpq_mutex);
1890
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1891
               !is->subtitleq.abort_request) {
1892
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1893
        }
1894
        SDL_UnlockMutex(is->subpq_mutex);
1895

    
1896
        if (is->subtitleq.abort_request)
1897
            goto the_end;
1898

    
1899
        sp = &is->subpq[is->subpq_windex];
1900

    
1901
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1902
           this packet, if any */
1903
        pts = 0;
1904
        if (pkt->pts != AV_NOPTS_VALUE)
1905
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1906

    
1907
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1908
                                    &sp->sub, &got_subtitle,
1909
                                    pkt);
1910
//            if (len1 < 0)
1911
//                break;
1912
        if (got_subtitle && sp->sub.format == 0) {
1913
            sp->pts = pts;
1914

    
1915
            for (i = 0; i < sp->sub.num_rects; i++)
1916
            {
1917
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1918
                {
1919
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1920
                    y = RGB_TO_Y_CCIR(r, g, b);
1921
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1922
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1923
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1924
                }
1925
            }
1926

    
1927
            /* now we can update the picture count */
1928
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1929
                is->subpq_windex = 0;
1930
            SDL_LockMutex(is->subpq_mutex);
1931
            is->subpq_size++;
1932
            SDL_UnlockMutex(is->subpq_mutex);
1933
        }
1934
        av_free_packet(pkt);
1935
//        if (step)
1936
//            if (cur_stream)
1937
//                stream_pause(cur_stream);
1938
    }
1939
 the_end:
1940
    return 0;
1941
}
1942

    
1943
/* copy samples for viewing in editor window */
1944
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1945
{
1946
    int size, len, channels;
1947

    
1948
    channels = is->audio_st->codec->channels;
1949

    
1950
    size = samples_size / sizeof(short);
1951
    while (size > 0) {
1952
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1953
        if (len > size)
1954
            len = size;
1955
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1956
        samples += len;
1957
        is->sample_array_index += len;
1958
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1959
            is->sample_array_index = 0;
1960
        size -= len;
1961
    }
1962
}
1963

    
1964
/* return the new audio buffer size (samples can be added or deleted
1965
   to get better sync if video or external master clock) */
1966
static int synchronize_audio(VideoState *is, short *samples,
1967
                             int samples_size1, double pts)
1968
{
1969
    int n, samples_size;
1970
    double ref_clock;
1971

    
1972
    n = 2 * is->audio_st->codec->channels;
1973
    samples_size = samples_size1;
1974

    
1975
    /* if not master, then we try to remove or add samples to correct the clock */
1976
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1977
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1978
        double diff, avg_diff;
1979
        int wanted_size, min_size, max_size, nb_samples;
1980

    
1981
        ref_clock = get_master_clock(is);
1982
        diff = get_audio_clock(is) - ref_clock;
1983

    
1984
        if (diff < AV_NOSYNC_THRESHOLD) {
1985
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1986
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1987
                /* not enough measures to have a correct estimate */
1988
                is->audio_diff_avg_count++;
1989
            } else {
1990
                /* estimate the A-V difference */
1991
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1992

    
1993
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1994
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1995
                    nb_samples = samples_size / n;
1996

    
1997
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1998
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1999
                    if (wanted_size < min_size)
2000
                        wanted_size = min_size;
2001
                    else if (wanted_size > max_size)
2002
                        wanted_size = max_size;
2003

    
2004
                    /* add or remove samples to correction the synchro */
2005
                    if (wanted_size < samples_size) {
2006
                        /* remove samples */
2007
                        samples_size = wanted_size;
2008
                    } else if (wanted_size > samples_size) {
2009
                        uint8_t *samples_end, *q;
2010
                        int nb;
2011

    
2012
                        /* add samples */
2013
                        nb = (samples_size - wanted_size);
2014
                        samples_end = (uint8_t *)samples + samples_size - n;
2015
                        q = samples_end + n;
2016
                        while (nb > 0) {
2017
                            memcpy(q, samples_end, n);
2018
                            q += n;
2019
                            nb -= n;
2020
                        }
2021
                        samples_size = wanted_size;
2022
                    }
2023
                }
2024
#if 0
2025
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2026
                       diff, avg_diff, samples_size - samples_size1,
2027
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2028
#endif
2029
            }
2030
        } else {
2031
            /* too big difference : may be initial PTS errors, so
2032
               reset A-V filter */
2033
            is->audio_diff_avg_count = 0;
2034
            is->audio_diff_cum = 0;
2035
        }
2036
    }
2037

    
2038
    return samples_size;
2039
}
2040

    
2041
/* decode one audio frame and returns its uncompressed size */
2042
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2043
{
2044
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2045
    AVPacket *pkt = &is->audio_pkt;
2046
    AVCodecContext *dec= is->audio_st->codec;
2047
    int n, len1, data_size;
2048
    double pts;
2049

    
2050
    for(;;) {
2051
        /* NOTE: the audio packet can contain several frames */
2052
        while (pkt_temp->size > 0) {
2053
            data_size = sizeof(is->audio_buf1);
2054
            len1 = avcodec_decode_audio3(dec,
2055
                                        (int16_t *)is->audio_buf1, &data_size,
2056
                                        pkt_temp);
2057
            if (len1 < 0) {
2058
                /* if error, we skip the frame */
2059
                pkt_temp->size = 0;
2060
                break;
2061
            }
2062

    
2063
            pkt_temp->data += len1;
2064
            pkt_temp->size -= len1;
2065
            if (data_size <= 0)
2066
                continue;
2067

    
2068
            if (dec->sample_fmt != is->audio_src_fmt) {
2069
                if (is->reformat_ctx)
2070
                    av_audio_convert_free(is->reformat_ctx);
2071
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2072
                                                         dec->sample_fmt, 1, NULL, 0);
2073
                if (!is->reformat_ctx) {
2074
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2075
                        av_get_sample_fmt_name(dec->sample_fmt),
2076
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2077
                        break;
2078
                }
2079
                is->audio_src_fmt= dec->sample_fmt;
2080
            }
2081

    
2082
            if (is->reformat_ctx) {
2083
                const void *ibuf[6]= {is->audio_buf1};
2084
                void *obuf[6]= {is->audio_buf2};
2085
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2086
                int ostride[6]= {2};
2087
                int len= data_size/istride[0];
2088
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2089
                    printf("av_audio_convert() failed\n");
2090
                    break;
2091
                }
2092
                is->audio_buf= is->audio_buf2;
2093
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2094
                          remove this legacy cruft */
2095
                data_size= len*2;
2096
            }else{
2097
                is->audio_buf= is->audio_buf1;
2098
            }
2099

    
2100
            /* if no pts, then compute it */
2101
            pts = is->audio_clock;
2102
            *pts_ptr = pts;
2103
            n = 2 * dec->channels;
2104
            is->audio_clock += (double)data_size /
2105
                (double)(n * dec->sample_rate);
2106
#if defined(DEBUG_SYNC)
2107
            {
2108
                static double last_clock;
2109
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2110
                       is->audio_clock - last_clock,
2111
                       is->audio_clock, pts);
2112
                last_clock = is->audio_clock;
2113
            }
2114
#endif
2115
            return data_size;
2116
        }
2117

    
2118
        /* free the current packet */
2119
        if (pkt->data)
2120
            av_free_packet(pkt);
2121

    
2122
        if (is->paused || is->audioq.abort_request) {
2123
            return -1;
2124
        }
2125

    
2126
        /* read next packet */
2127
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2128
            return -1;
2129
        if(pkt->data == flush_pkt.data){
2130
            avcodec_flush_buffers(dec);
2131
            continue;
2132
        }
2133

    
2134
        pkt_temp->data = pkt->data;
2135
        pkt_temp->size = pkt->size;
2136

    
2137
        /* if update the audio clock with the pts */
2138
        if (pkt->pts != AV_NOPTS_VALUE) {
2139
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2140
        }
2141
    }
2142
}
2143

    
2144
/* get the current audio output buffer size, in samples. With SDL, we
2145
   cannot have a precise information */
2146
static int audio_write_get_buf_size(VideoState *is)
2147
{
2148
    return is->audio_buf_size - is->audio_buf_index;
2149
}
2150

    
2151

    
2152
/* prepare a new audio buffer */
2153
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2154
{
2155
    VideoState *is = opaque;
2156
    int audio_size, len1;
2157
    double pts;
2158

    
2159
    audio_callback_time = av_gettime();
2160

    
2161
    while (len > 0) {
2162
        if (is->audio_buf_index >= is->audio_buf_size) {
2163
           audio_size = audio_decode_frame(is, &pts);
2164
           if (audio_size < 0) {
2165
                /* if error, just output silence */
2166
               is->audio_buf = is->audio_buf1;
2167
               is->audio_buf_size = 1024;
2168
               memset(is->audio_buf, 0, is->audio_buf_size);
2169
           } else {
2170
               if (is->show_audio)
2171
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2172
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2173
                                              pts);
2174
               is->audio_buf_size = audio_size;
2175
           }
2176
           is->audio_buf_index = 0;
2177
        }
2178
        len1 = is->audio_buf_size - is->audio_buf_index;
2179
        if (len1 > len)
2180
            len1 = len;
2181
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2182
        len -= len1;
2183
        stream += len1;
2184
        is->audio_buf_index += len1;
2185
    }
2186
}
2187

    
2188
/* open a given stream. Return 0 if OK */
2189
static int stream_component_open(VideoState *is, int stream_index)
2190
{
2191
    AVFormatContext *ic = is->ic;
2192
    AVCodecContext *avctx;
2193
    AVCodec *codec;
2194
    SDL_AudioSpec wanted_spec, spec;
2195

    
2196
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2197
        return -1;
2198
    avctx = ic->streams[stream_index]->codec;
2199

    
2200
    /* prepare audio output */
2201
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2202
        if (avctx->channels > 0) {
2203
            avctx->request_channels = FFMIN(2, avctx->channels);
2204
        } else {
2205
            avctx->request_channels = 2;
2206
        }
2207
    }
2208

    
2209
    codec = avcodec_find_decoder(avctx->codec_id);
2210
    avctx->debug_mv = debug_mv;
2211
    avctx->debug = debug;
2212
    avctx->workaround_bugs = workaround_bugs;
2213
    avctx->lowres = lowres;
2214
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2215
    avctx->idct_algo= idct;
2216
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2217
    avctx->skip_frame= skip_frame;
2218
    avctx->skip_idct= skip_idct;
2219
    avctx->skip_loop_filter= skip_loop_filter;
2220
    avctx->error_recognition= error_recognition;
2221
    avctx->error_concealment= error_concealment;
2222
    avctx->thread_count= thread_count;
2223

    
2224
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2225

    
2226
    if (!codec ||
2227
        avcodec_open(avctx, codec) < 0)
2228
        return -1;
2229

    
2230
    /* prepare audio output */
2231
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2232
        wanted_spec.freq = avctx->sample_rate;
2233
        wanted_spec.format = AUDIO_S16SYS;
2234
        wanted_spec.channels = avctx->channels;
2235
        wanted_spec.silence = 0;
2236
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2237
        wanted_spec.callback = sdl_audio_callback;
2238
        wanted_spec.userdata = is;
2239
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2240
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2241
            return -1;
2242
        }
2243
        is->audio_hw_buf_size = spec.size;
2244
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2245
    }
2246

    
2247
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2248
    switch(avctx->codec_type) {
2249
    case AVMEDIA_TYPE_AUDIO:
2250
        is->audio_stream = stream_index;
2251
        is->audio_st = ic->streams[stream_index];
2252
        is->audio_buf_size = 0;
2253
        is->audio_buf_index = 0;
2254

    
2255
        /* init averaging filter */
2256
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2257
        is->audio_diff_avg_count = 0;
2258
        /* since we do not have a precise anough audio fifo fullness,
2259
           we correct audio sync only if larger than this threshold */
2260
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2261

    
2262
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2263
        packet_queue_init(&is->audioq);
2264
        SDL_PauseAudio(0);
2265
        break;
2266
    case AVMEDIA_TYPE_VIDEO:
2267
        is->video_stream = stream_index;
2268
        is->video_st = ic->streams[stream_index];
2269

    
2270
//        is->video_current_pts_time = av_gettime();
2271

    
2272
        packet_queue_init(&is->videoq);
2273
        is->video_tid = SDL_CreateThread(video_thread, is);
2274
        break;
2275
    case AVMEDIA_TYPE_SUBTITLE:
2276
        is->subtitle_stream = stream_index;
2277
        is->subtitle_st = ic->streams[stream_index];
2278
        packet_queue_init(&is->subtitleq);
2279

    
2280
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2281
        break;
2282
    default:
2283
        break;
2284
    }
2285
    return 0;
2286
}
2287

    
2288
static void stream_component_close(VideoState *is, int stream_index)
2289
{
2290
    AVFormatContext *ic = is->ic;
2291
    AVCodecContext *avctx;
2292

    
2293
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2294
        return;
2295
    avctx = ic->streams[stream_index]->codec;
2296

    
2297
    switch(avctx->codec_type) {
2298
    case AVMEDIA_TYPE_AUDIO:
2299
        packet_queue_abort(&is->audioq);
2300

    
2301
        SDL_CloseAudio();
2302

    
2303
        packet_queue_end(&is->audioq);
2304
        if (is->reformat_ctx)
2305
            av_audio_convert_free(is->reformat_ctx);
2306
        is->reformat_ctx = NULL;
2307
        break;
2308
    case AVMEDIA_TYPE_VIDEO:
2309
        packet_queue_abort(&is->videoq);
2310

    
2311
        /* note: we also signal this mutex to make sure we deblock the
2312
           video thread in all cases */
2313
        SDL_LockMutex(is->pictq_mutex);
2314
        SDL_CondSignal(is->pictq_cond);
2315
        SDL_UnlockMutex(is->pictq_mutex);
2316

    
2317
        SDL_WaitThread(is->video_tid, NULL);
2318

    
2319
        packet_queue_end(&is->videoq);
2320
        break;
2321
    case AVMEDIA_TYPE_SUBTITLE:
2322
        packet_queue_abort(&is->subtitleq);
2323

    
2324
        /* note: we also signal this mutex to make sure we deblock the
2325
           video thread in all cases */
2326
        SDL_LockMutex(is->subpq_mutex);
2327
        is->subtitle_stream_changed = 1;
2328

    
2329
        SDL_CondSignal(is->subpq_cond);
2330
        SDL_UnlockMutex(is->subpq_mutex);
2331

    
2332
        SDL_WaitThread(is->subtitle_tid, NULL);
2333

    
2334
        packet_queue_end(&is->subtitleq);
2335
        break;
2336
    default:
2337
        break;
2338
    }
2339

    
2340
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2341
    avcodec_close(avctx);
2342
    switch(avctx->codec_type) {
2343
    case AVMEDIA_TYPE_AUDIO:
2344
        is->audio_st = NULL;
2345
        is->audio_stream = -1;
2346
        break;
2347
    case AVMEDIA_TYPE_VIDEO:
2348
        is->video_st = NULL;
2349
        is->video_stream = -1;
2350
        break;
2351
    case AVMEDIA_TYPE_SUBTITLE:
2352
        is->subtitle_st = NULL;
2353
        is->subtitle_stream = -1;
2354
        break;
2355
    default:
2356
        break;
2357
    }
2358
}
2359

    
2360
/* since we have only one decoding thread, we can use a global
2361
   variable instead of a thread local variable */
2362
static VideoState *global_video_state;
2363

    
2364
static int decode_interrupt_cb(void)
2365
{
2366
    return (global_video_state && global_video_state->abort_request);
2367
}
2368

    
2369
/* this thread gets the stream from the disk or the network */
2370
static int decode_thread(void *arg)
2371
{
2372
    VideoState *is = arg;
2373
    AVFormatContext *ic;
2374
    int err, i, ret;
2375
    int st_index[AVMEDIA_TYPE_NB];
2376
    AVPacket pkt1, *pkt = &pkt1;
2377
    AVFormatParameters params, *ap = &params;
2378
    int eof=0;
2379
    int pkt_in_play_range = 0;
2380

    
2381
    ic = avformat_alloc_context();
2382

    
2383
    memset(st_index, -1, sizeof(st_index));
2384
    is->video_stream = -1;
2385
    is->audio_stream = -1;
2386
    is->subtitle_stream = -1;
2387

    
2388
    global_video_state = is;
2389
    avio_set_interrupt_cb(decode_interrupt_cb);
2390

    
2391
    memset(ap, 0, sizeof(*ap));
2392

    
2393
    ap->prealloced_context = 1;
2394
    ap->width = frame_width;
2395
    ap->height= frame_height;
2396
    ap->time_base= (AVRational){1, 25};
2397
    ap->pix_fmt = frame_pix_fmt;
2398

    
2399
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2400

    
2401
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2402
    if (err < 0) {
2403
        print_error(is->filename, err);
2404
        ret = -1;
2405
        goto fail;
2406
    }
2407
    is->ic = ic;
2408

    
2409
    if(genpts)
2410
        ic->flags |= AVFMT_FLAG_GENPTS;
2411

    
2412
    err = av_find_stream_info(ic);
2413
    if (err < 0) {
2414
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2415
        ret = -1;
2416
        goto fail;
2417
    }
2418
    if(ic->pb)
2419
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2420

    
2421
    if(seek_by_bytes<0)
2422
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2423

    
2424
    /* if seeking requested, we execute it */
2425
    if (start_time != AV_NOPTS_VALUE) {
2426
        int64_t timestamp;
2427

    
2428
        timestamp = start_time;
2429
        /* add the stream start time */
2430
        if (ic->start_time != AV_NOPTS_VALUE)
2431
            timestamp += ic->start_time;
2432
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2433
        if (ret < 0) {
2434
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2435
                    is->filename, (double)timestamp / AV_TIME_BASE);
2436
        }
2437
    }
2438

    
2439
    for (i = 0; i < ic->nb_streams; i++)
2440
        ic->streams[i]->discard = AVDISCARD_ALL;
2441
    if (!video_disable)
2442
        st_index[AVMEDIA_TYPE_VIDEO] =
2443
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2444
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2445
    if (!audio_disable)
2446
        st_index[AVMEDIA_TYPE_AUDIO] =
2447
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2448
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2449
                                st_index[AVMEDIA_TYPE_VIDEO],
2450
                                NULL, 0);
2451
    if (!video_disable)
2452
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2453
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2454
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2455
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2456
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2457
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2458
                                NULL, 0);
2459
    if (show_status) {
2460
        av_dump_format(ic, 0, is->filename, 0);
2461
    }
2462

    
2463
    /* open the streams */
2464
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2465
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2466
    }
2467

    
2468
    ret=-1;
2469
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2470
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2471
    }
2472
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2473
    if(ret<0) {
2474
        if (!display_disable)
2475
            is->show_audio = 2;
2476
    }
2477

    
2478
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2479
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2480
    }
2481

    
2482
    if (is->video_stream < 0 && is->audio_stream < 0) {
2483
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2484
        ret = -1;
2485
        goto fail;
2486
    }
2487

    
2488
    for(;;) {
2489
        if (is->abort_request)
2490
            break;
2491
        if (is->paused != is->last_paused) {
2492
            is->last_paused = is->paused;
2493
            if (is->paused)
2494
                is->read_pause_return= av_read_pause(ic);
2495
            else
2496
                av_read_play(ic);
2497
        }
2498
#if CONFIG_RTSP_DEMUXER
2499
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2500
            /* wait 10 ms to avoid trying to get another packet */
2501
            /* XXX: horrible */
2502
            SDL_Delay(10);
2503
            continue;
2504
        }
2505
#endif
2506
        if (is->seek_req) {
2507
            int64_t seek_target= is->seek_pos;
2508
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2509
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2510
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2511
//      of the seek_pos/seek_rel variables
2512

    
2513
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2514
            if (ret < 0) {
2515
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2516
            }else{
2517
                if (is->audio_stream >= 0) {
2518
                    packet_queue_flush(&is->audioq);
2519
                    packet_queue_put(&is->audioq, &flush_pkt);
2520
                }
2521
                if (is->subtitle_stream >= 0) {
2522
                    packet_queue_flush(&is->subtitleq);
2523
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2524
                }
2525
                if (is->video_stream >= 0) {
2526
                    packet_queue_flush(&is->videoq);
2527
                    packet_queue_put(&is->videoq, &flush_pkt);
2528
                }
2529
            }
2530
            is->seek_req = 0;
2531
            eof= 0;
2532
        }
2533

    
2534
        /* if the queue are full, no need to read more */
2535
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2536
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2537
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2538
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2539
            /* wait 10 ms */
2540
            SDL_Delay(10);
2541
            continue;
2542
        }
2543
        if(eof) {
2544
            if(is->video_stream >= 0){
2545
                av_init_packet(pkt);
2546
                pkt->data=NULL;
2547
                pkt->size=0;
2548
                pkt->stream_index= is->video_stream;
2549
                packet_queue_put(&is->videoq, pkt);
2550
            }
2551
            SDL_Delay(10);
2552
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2553
                if(loop!=1 && (!loop || --loop)){
2554
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2555
                }else if(autoexit){
2556
                    ret=AVERROR_EOF;
2557
                    goto fail;
2558
                }
2559
            }
2560
            eof=0;
2561
            continue;
2562
        }
2563
        ret = av_read_frame(ic, pkt);
2564
        if (ret < 0) {
2565
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2566
                eof=1;
2567
            if (ic->pb && ic->pb->error)
2568
                break;
2569
            SDL_Delay(100); /* wait for user event */
2570
            continue;
2571
        }
2572
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2573
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2574
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2575
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2576
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2577
                <= ((double)duration/1000000);
2578
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2579
            packet_queue_put(&is->audioq, pkt);
2580
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2581
            packet_queue_put(&is->videoq, pkt);
2582
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2583
            packet_queue_put(&is->subtitleq, pkt);
2584
        } else {
2585
            av_free_packet(pkt);
2586
        }
2587
    }
2588
    /* wait until the end */
2589
    while (!is->abort_request) {
2590
        SDL_Delay(100);
2591
    }
2592

    
2593
    ret = 0;
2594
 fail:
2595
    /* disable interrupting */
2596
    global_video_state = NULL;
2597

    
2598
    /* close each stream */
2599
    if (is->audio_stream >= 0)
2600
        stream_component_close(is, is->audio_stream);
2601
    if (is->video_stream >= 0)
2602
        stream_component_close(is, is->video_stream);
2603
    if (is->subtitle_stream >= 0)
2604
        stream_component_close(is, is->subtitle_stream);
2605
    if (is->ic) {
2606
        av_close_input_file(is->ic);
2607
        is->ic = NULL; /* safety */
2608
    }
2609
    avio_set_interrupt_cb(NULL);
2610

    
2611
    if (ret != 0) {
2612
        SDL_Event event;
2613

    
2614
        event.type = FF_QUIT_EVENT;
2615
        event.user.data1 = is;
2616
        SDL_PushEvent(&event);
2617
    }
2618
    return 0;
2619
}
2620

    
2621
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2622
{
2623
    VideoState *is;
2624

    
2625
    is = av_mallocz(sizeof(VideoState));
2626
    if (!is)
2627
        return NULL;
2628
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2629
    is->iformat = iformat;
2630
    is->ytop = 0;
2631
    is->xleft = 0;
2632

    
2633
    /* start video display */
2634
    is->pictq_mutex = SDL_CreateMutex();
2635
    is->pictq_cond = SDL_CreateCond();
2636

    
2637
    is->subpq_mutex = SDL_CreateMutex();
2638
    is->subpq_cond = SDL_CreateCond();
2639

    
2640
    is->av_sync_type = av_sync_type;
2641
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2642
    if (!is->parse_tid) {
2643
        av_free(is);
2644
        return NULL;
2645
    }
2646
    return is;
2647
}
2648

    
2649
static void stream_cycle_channel(VideoState *is, int codec_type)
2650
{
2651
    AVFormatContext *ic = is->ic;
2652
    int start_index, stream_index;
2653
    AVStream *st;
2654

    
2655
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2656
        start_index = is->video_stream;
2657
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2658
        start_index = is->audio_stream;
2659
    else
2660
        start_index = is->subtitle_stream;
2661
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2662
        return;
2663
    stream_index = start_index;
2664
    for(;;) {
2665
        if (++stream_index >= is->ic->nb_streams)
2666
        {
2667
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2668
            {
2669
                stream_index = -1;
2670
                goto the_end;
2671
            } else
2672
                stream_index = 0;
2673
        }
2674
        if (stream_index == start_index)
2675
            return;
2676
        st = ic->streams[stream_index];
2677
        if (st->codec->codec_type == codec_type) {
2678
            /* check that parameters are OK */
2679
            switch(codec_type) {
2680
            case AVMEDIA_TYPE_AUDIO:
2681
                if (st->codec->sample_rate != 0 &&
2682
                    st->codec->channels != 0)
2683
                    goto the_end;
2684
                break;
2685
            case AVMEDIA_TYPE_VIDEO:
2686
            case AVMEDIA_TYPE_SUBTITLE:
2687
                goto the_end;
2688
            default:
2689
                break;
2690
            }
2691
        }
2692
    }
2693
 the_end:
2694
    stream_component_close(is, start_index);
2695
    stream_component_open(is, stream_index);
2696
}
2697

    
2698

    
2699
static void toggle_full_screen(void)
2700
{
2701
    is_full_screen = !is_full_screen;
2702
    if (!fs_screen_width) {
2703
        /* use default SDL method */
2704
//        SDL_WM_ToggleFullScreen(screen);
2705
    }
2706
    video_open(cur_stream);
2707
}
2708

    
2709
static void toggle_pause(void)
2710
{
2711
    if (cur_stream)
2712
        stream_pause(cur_stream);
2713
    step = 0;
2714
}
2715

    
2716
static void step_to_next_frame(void)
2717
{
2718
    if (cur_stream) {
2719
        /* if the stream is paused unpause it, then step */
2720
        if (cur_stream->paused)
2721
            stream_pause(cur_stream);
2722
    }
2723
    step = 1;
2724
}
2725

    
2726
static void toggle_audio_display(void)
2727
{
2728
    if (cur_stream) {
2729
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2730
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2731
        fill_rectangle(screen,
2732
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2733
                    bgcolor);
2734
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2735
    }
2736
}
2737

    
2738
/* handle an event sent by the GUI */
2739
static void event_loop(void)
2740
{
2741
    SDL_Event event;
2742
    double incr, pos, frac;
2743

    
2744
    for(;;) {
2745
        double x;
2746
        SDL_WaitEvent(&event);
2747
        switch(event.type) {
2748
        case SDL_KEYDOWN:
2749
            if (exit_on_keydown) {
2750
                do_exit();
2751
                break;
2752
            }
2753
            switch(event.key.keysym.sym) {
2754
            case SDLK_ESCAPE:
2755
            case SDLK_q:
2756
                do_exit();
2757
                break;
2758
            case SDLK_f:
2759
                toggle_full_screen();
2760
                break;
2761
            case SDLK_p:
2762
            case SDLK_SPACE:
2763
                toggle_pause();
2764
                break;
2765
            case SDLK_s: //S: Step to next frame
2766
                step_to_next_frame();
2767
                break;
2768
            case SDLK_a:
2769
                if (cur_stream)
2770
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2771
                break;
2772
            case SDLK_v:
2773
                if (cur_stream)
2774
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2775
                break;
2776
            case SDLK_t:
2777
                if (cur_stream)
2778
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2779
                break;
2780
            case SDLK_w:
2781
                toggle_audio_display();
2782
                break;
2783
            case SDLK_LEFT:
2784
                incr = -10.0;
2785
                goto do_seek;
2786
            case SDLK_RIGHT:
2787
                incr = 10.0;
2788
                goto do_seek;
2789
            case SDLK_UP:
2790
                incr = 60.0;
2791
                goto do_seek;
2792
            case SDLK_DOWN:
2793
                incr = -60.0;
2794
            do_seek:
2795
                if (cur_stream) {
2796
                    if (seek_by_bytes) {
2797
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2798
                            pos= cur_stream->video_current_pos;
2799
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2800
                            pos= cur_stream->audio_pkt.pos;
2801
                        }else
2802
                            pos = avio_tell(cur_stream->ic->pb);
2803
                        if (cur_stream->ic->bit_rate)
2804
                            incr *= cur_stream->ic->bit_rate / 8.0;
2805
                        else
2806
                            incr *= 180000.0;
2807
                        pos += incr;
2808
                        stream_seek(cur_stream, pos, incr, 1);
2809
                    } else {
2810
                        pos = get_master_clock(cur_stream);
2811
                        pos += incr;
2812
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2813
                    }
2814
                }
2815
                break;
2816
            default:
2817
                break;
2818
            }
2819
            break;
2820
        case SDL_MOUSEBUTTONDOWN:
2821
            if (exit_on_mousedown) {
2822
                do_exit();
2823
                break;
2824
            }
2825
        case SDL_MOUSEMOTION:
2826
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2827
                x= event.button.x;
2828
            }else{
2829
                if(event.motion.state != SDL_PRESSED)
2830
                    break;
2831
                x= event.motion.x;
2832
            }
2833
            if (cur_stream) {
2834
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2835
                    uint64_t size=  avio_size(cur_stream->ic->pb);
2836
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2837
                }else{
2838
                    int64_t ts;
2839
                    int ns, hh, mm, ss;
2840
                    int tns, thh, tmm, tss;
2841
                    tns = cur_stream->ic->duration/1000000LL;
2842
                    thh = tns/3600;
2843
                    tmm = (tns%3600)/60;
2844
                    tss = (tns%60);
2845
                    frac = x/cur_stream->width;
2846
                    ns = frac*tns;
2847
                    hh = ns/3600;
2848
                    mm = (ns%3600)/60;
2849
                    ss = (ns%60);
2850
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2851
                            hh, mm, ss, thh, tmm, tss);
2852
                    ts = frac*cur_stream->ic->duration;
2853
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2854
                        ts += cur_stream->ic->start_time;
2855
                    stream_seek(cur_stream, ts, 0, 0);
2856
                }
2857
            }
2858
            break;
2859
        case SDL_VIDEORESIZE:
2860
            if (cur_stream) {
2861
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2862
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2863
                screen_width = cur_stream->width = event.resize.w;
2864
                screen_height= cur_stream->height= event.resize.h;
2865
            }
2866
            break;
2867
        case SDL_QUIT:
2868
        case FF_QUIT_EVENT:
2869
            do_exit();
2870
            break;
2871
        case FF_ALLOC_EVENT:
2872
            video_open(event.user.data1);
2873
            alloc_picture(event.user.data1);
2874
            break;
2875
        case FF_REFRESH_EVENT:
2876
            video_refresh_timer(event.user.data1);
2877
            cur_stream->refresh=0;
2878
            break;
2879
        default:
2880
            break;
2881
        }
2882
    }
2883
}
2884

    
2885
static void opt_frame_size(const char *arg)
2886
{
2887
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2888
        fprintf(stderr, "Incorrect frame size\n");
2889
        exit(1);
2890
    }
2891
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2892
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2893
        exit(1);
2894
    }
2895
}
2896

    
2897
static int opt_width(const char *opt, const char *arg)
2898
{
2899
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2900
    return 0;
2901
}
2902

    
2903
static int opt_height(const char *opt, const char *arg)
2904
{
2905
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2906
    return 0;
2907
}
2908

    
2909
static void opt_format(const char *arg)
2910
{
2911
    file_iformat = av_find_input_format(arg);
2912
    if (!file_iformat) {
2913
        fprintf(stderr, "Unknown input format: %s\n", arg);
2914
        exit(1);
2915
    }
2916
}
2917

    
2918
static void opt_frame_pix_fmt(const char *arg)
2919
{
2920
    frame_pix_fmt = av_get_pix_fmt(arg);
2921
}
2922

    
2923
static int opt_sync(const char *opt, const char *arg)
2924
{
2925
    if (!strcmp(arg, "audio"))
2926
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2927
    else if (!strcmp(arg, "video"))
2928
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2929
    else if (!strcmp(arg, "ext"))
2930
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2931
    else {
2932
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2933
        exit(1);
2934
    }
2935
    return 0;
2936
}
2937

    
2938
static int opt_seek(const char *opt, const char *arg)
2939
{
2940
    start_time = parse_time_or_die(opt, arg, 1);
2941
    return 0;
2942
}
2943

    
2944
static int opt_duration(const char *opt, const char *arg)
2945
{
2946
    duration = parse_time_or_die(opt, arg, 1);
2947
    return 0;
2948
}
2949

    
2950
static int opt_debug(const char *opt, const char *arg)
2951
{
2952
    av_log_set_level(99);
2953
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2954
    return 0;
2955
}
2956

    
2957
static int opt_vismv(const char *opt, const char *arg)
2958
{
2959
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2960
    return 0;
2961
}
2962

    
2963
static int opt_thread_count(const char *opt, const char *arg)
2964
{
2965
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2966
#if !HAVE_THREADS
2967
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2968
#endif
2969
    return 0;
2970
}
2971

    
2972
static const OptionDef options[] = {
2973
#include "cmdutils_common_opts.h"
2974
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2975
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2976
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2977
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2978
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2979
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2980
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2981
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2982
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2983
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2984
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2985
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2986
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2987
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2988
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2989
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2990
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2991
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2992
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2993
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2994
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2995
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2996
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2997
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2998
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2999
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3000
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3001
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3002
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3003
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3004
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3005
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3006
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3007
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3008
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3009
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3010
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3011
#if CONFIG_AVFILTER
3012
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3013
#endif
3014
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3015
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3016
    { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3017
    { NULL, },
3018
};
3019

    
3020
static void show_usage(void)
3021
{
3022
    printf("Simple media player\n");
3023
    printf("usage: ffplay [options] input_file\n");
3024
    printf("\n");
3025
}
3026

    
3027
static void show_help(void)
3028
{
3029
    av_log_set_callback(log_callback_help);
3030
    show_usage();
3031
    show_help_options(options, "Main options:\n",
3032
                      OPT_EXPERT, 0);
3033
    show_help_options(options, "\nAdvanced options:\n",
3034
                      OPT_EXPERT, OPT_EXPERT);
3035
    printf("\n");
3036
    av_opt_show2(avcodec_opts[0], NULL,
3037
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3038
    printf("\n");
3039
    av_opt_show2(avformat_opts, NULL,
3040
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3041
#if !CONFIG_AVFILTER
3042
    printf("\n");
3043
    av_opt_show2(sws_opts, NULL,
3044
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3045
#endif
3046
    printf("\nWhile playing:\n"
3047
           "q, ESC              quit\n"
3048
           "f                   toggle full screen\n"
3049
           "p, SPC              pause\n"
3050
           "a                   cycle audio channel\n"
3051
           "v                   cycle video channel\n"
3052
           "t                   cycle subtitle channel\n"
3053
           "w                   show audio waves\n"
3054
           "s                   activate frame-step mode\n"
3055
           "left/right          seek backward/forward 10 seconds\n"
3056
           "down/up             seek backward/forward 1 minute\n"
3057
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3058
           );
3059
}
3060

    
3061
static void opt_input_file(const char *filename)
3062
{
3063
    if (input_filename) {
3064
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3065
                filename, input_filename);
3066
        exit(1);
3067
    }
3068
    if (!strcmp(filename, "-"))
3069
        filename = "pipe:";
3070
    input_filename = filename;
3071
}
3072

    
3073
/* Called from the main */
3074
int main(int argc, char **argv)
3075
{
3076
    int flags;
3077

    
3078
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3079

    
3080
    /* register all codecs, demux and protocols */
3081
    avcodec_register_all();
3082
#if CONFIG_AVDEVICE
3083
    avdevice_register_all();
3084
#endif
3085
#if CONFIG_AVFILTER
3086
    avfilter_register_all();
3087
#endif
3088
    av_register_all();
3089

    
3090
    init_opts();
3091

    
3092
    show_banner();
3093

    
3094
    parse_options(argc, argv, options, opt_input_file);
3095

    
3096
    if (!input_filename) {
3097
        show_usage();
3098
        fprintf(stderr, "An input file must be specified\n");
3099
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3100
        exit(1);
3101
    }
3102

    
3103
    if (display_disable) {
3104
        video_disable = 1;
3105
    }
3106
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3107
#if !defined(__MINGW32__) && !defined(__APPLE__)
3108
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3109
#endif
3110
    if (SDL_Init (flags)) {
3111
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3112
        exit(1);
3113
    }
3114

    
3115
    if (!display_disable) {
3116
#if HAVE_SDL_VIDEO_SIZE
3117
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3118
        fs_screen_width = vi->current_w;
3119
        fs_screen_height = vi->current_h;
3120
#endif
3121
    }
3122

    
3123
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3124
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3125
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3126

    
3127
    av_init_packet(&flush_pkt);
3128
    flush_pkt.data= "FLUSH";
3129

    
3130
    cur_stream = stream_open(input_filename, file_iformat);
3131

    
3132
    event_loop();
3133

    
3134
    /* never returns */
3135

    
3136
    return 0;
3137
}