Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 97dd1e4a

History | View | Annotate | Download (98.8 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavcore/imgutils.h"
32
#include "libavcore/parseutils.h"
33
#include "libavcore/samplefmt.h"
34
#include "libavformat/avformat.h"
35
#include "libavdevice/avdevice.h"
36
#include "libswscale/swscale.h"
37
#include "libavcodec/audioconvert.h"
38
#include "libavcodec/opt.h"
39
#include "libavcodec/avfft.h"
40

    
41
#if CONFIG_AVFILTER
42
# include "libavfilter/avfilter.h"
43
# include "libavfilter/avfiltergraph.h"
44
# include "libavfilter/graphparser.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG_SYNC
63

    
64
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
65
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
66
#define MIN_FRAMES 5
67

    
68
/* SDL audio buffer size, in samples. Should be small to have precise
69
   A/V sync as SDL does not have hardware buffer fullness info. */
70
#define SDL_AUDIO_BUFFER_SIZE 1024
71

    
72
/* no AV sync correction is done if below the AV sync threshold */
73
#define AV_SYNC_THRESHOLD 0.01
74
/* no AV correction is done if too big error */
75
#define AV_NOSYNC_THRESHOLD 10.0
76

    
77
#define FRAME_SKIP_FACTOR 0.05
78

    
79
/* maximum audio speed change to get correct sync */
80
#define SAMPLE_CORRECTION_PERCENT_MAX 10
81

    
82
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
83
#define AUDIO_DIFF_AVG_NB   20
84

    
85
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
86
#define SAMPLE_ARRAY_SIZE (2*65536)
87

    
88
static int sws_flags = SWS_BICUBIC;
89

    
90
typedef struct PacketQueue {
91
    AVPacketList *first_pkt, *last_pkt;
92
    int nb_packets;
93
    int size;
94
    int abort_request;
95
    SDL_mutex *mutex;
96
    SDL_cond *cond;
97
} PacketQueue;
98

    
99
#define VIDEO_PICTURE_QUEUE_SIZE 2
100
#define SUBPICTURE_QUEUE_SIZE 4
101

    
102
typedef struct VideoPicture {
103
    double pts;                                  ///<presentation time stamp for this picture
104
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
105
    int64_t pos;                                 ///<byte position in file
106
    SDL_Overlay *bmp;
107
    int width, height; /* source height & width */
108
    int allocated;
109
    enum PixelFormat pix_fmt;
110

    
111
#if CONFIG_AVFILTER
112
    AVFilterBufferRef *picref;
113
#endif
114
} VideoPicture;
115

    
116
typedef struct SubPicture {
117
    double pts; /* presentation time stamp for this picture */
118
    AVSubtitle sub;
119
} SubPicture;
120

    
121
enum {
122
    AV_SYNC_AUDIO_MASTER, /* default choice */
123
    AV_SYNC_VIDEO_MASTER,
124
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
125
};
126

    
127
typedef struct VideoState {
128
    SDL_Thread *parse_tid;
129
    SDL_Thread *video_tid;
130
    SDL_Thread *refresh_tid;
131
    AVInputFormat *iformat;
132
    int no_background;
133
    int abort_request;
134
    int paused;
135
    int last_paused;
136
    int seek_req;
137
    int seek_flags;
138
    int64_t seek_pos;
139
    int64_t seek_rel;
140
    int read_pause_return;
141
    AVFormatContext *ic;
142
    int dtg_active_format;
143

    
144
    int audio_stream;
145

    
146
    int av_sync_type;
147
    double external_clock; /* external clock base */
148
    int64_t external_clock_time;
149

    
150
    double audio_clock;
151
    double audio_diff_cum; /* used for AV difference average computation */
152
    double audio_diff_avg_coef;
153
    double audio_diff_threshold;
154
    int audio_diff_avg_count;
155
    AVStream *audio_st;
156
    PacketQueue audioq;
157
    int audio_hw_buf_size;
158
    /* samples output by the codec. we reserve more space for avsync
159
       compensation */
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    uint8_t *audio_buf;
163
    unsigned int audio_buf_size; /* in bytes */
164
    int audio_buf_index; /* in bytes */
165
    AVPacket audio_pkt_temp;
166
    AVPacket audio_pkt;
167
    enum SampleFormat audio_src_fmt;
168
    AVAudioConvert *reformat_ctx;
169

    
170
    int show_audio; /* if true, display audio samples */
171
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
172
    int sample_array_index;
173
    int last_i_start;
174
    RDFTContext *rdft;
175
    int rdft_bits;
176
    FFTSample *rdft_data;
177
    int xpos;
178

    
179
    SDL_Thread *subtitle_tid;
180
    int subtitle_stream;
181
    int subtitle_stream_changed;
182
    AVStream *subtitle_st;
183
    PacketQueue subtitleq;
184
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185
    int subpq_size, subpq_rindex, subpq_windex;
186
    SDL_mutex *subpq_mutex;
187
    SDL_cond *subpq_cond;
188

    
189
    double frame_timer;
190
    double frame_last_pts;
191
    double frame_last_delay;
192
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193
    int video_stream;
194
    AVStream *video_st;
195
    PacketQueue videoq;
196
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198
    int64_t video_current_pos;                   ///<current displayed file pos
199
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200
    int pictq_size, pictq_rindex, pictq_windex;
201
    SDL_mutex *pictq_mutex;
202
    SDL_cond *pictq_cond;
203
#if !CONFIG_AVFILTER
204
    struct SwsContext *img_convert_ctx;
205
#endif
206

    
207
    //    QETimer *video_timer;
208
    char filename[1024];
209
    int width, height, xleft, ytop;
210

    
211
    PtsCorrectionContext pts_ctx;
212

    
213
#if CONFIG_AVFILTER
214
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
215
#endif
216

    
217
    float skip_frames;
218
    float skip_frames_index;
219
    int refresh;
220
} VideoState;
221

    
222
static void show_help(void);
223
static int audio_write_get_buf_size(VideoState *is);
224

    
225
/* options specified by the user */
226
static AVInputFormat *file_iformat;
227
static const char *input_filename;
228
static const char *window_title;
229
static int fs_screen_width;
230
static int fs_screen_height;
231
static int screen_width = 0;
232
static int screen_height = 0;
233
static int frame_width = 0;
234
static int frame_height = 0;
235
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
236
static int audio_disable;
237
static int video_disable;
238
static int wanted_stream[AVMEDIA_TYPE_NB]={
239
    [AVMEDIA_TYPE_AUDIO]=-1,
240
    [AVMEDIA_TYPE_VIDEO]=-1,
241
    [AVMEDIA_TYPE_SUBTITLE]=-1,
242
};
243
static int seek_by_bytes=-1;
244
static int display_disable;
245
static int show_status = 1;
246
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
247
static int64_t start_time = AV_NOPTS_VALUE;
248
static int64_t duration = AV_NOPTS_VALUE;
249
static int debug = 0;
250
static int debug_mv = 0;
251
static int step = 0;
252
static int thread_count = 1;
253
static int workaround_bugs = 1;
254
static int fast = 0;
255
static int genpts = 0;
256
static int lowres = 0;
257
static int idct = FF_IDCT_AUTO;
258
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
259
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
260
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
261
static int error_recognition = FF_ER_CAREFUL;
262
static int error_concealment = 3;
263
static int decoder_reorder_pts= -1;
264
static int autoexit;
265
static int exit_on_keydown;
266
static int exit_on_mousedown;
267
static int loop=1;
268
static int framedrop=1;
269

    
270
static int rdftspeed=20;
271
#if CONFIG_AVFILTER
272
static char *vfilters = NULL;
273
#endif
274

    
275
/* current context */
276
static int is_full_screen;
277
static VideoState *cur_stream;
278
static int64_t audio_callback_time;
279

    
280
static AVPacket flush_pkt;
281

    
282
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
283
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
284
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
285

    
286
static SDL_Surface *screen;
287

    
288
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
289

    
290
/* packet queue handling */
291
static void packet_queue_init(PacketQueue *q)
292
{
293
    memset(q, 0, sizeof(PacketQueue));
294
    q->mutex = SDL_CreateMutex();
295
    q->cond = SDL_CreateCond();
296
    packet_queue_put(q, &flush_pkt);
297
}
298

    
299
static void packet_queue_flush(PacketQueue *q)
300
{
301
    AVPacketList *pkt, *pkt1;
302

    
303
    SDL_LockMutex(q->mutex);
304
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
305
        pkt1 = pkt->next;
306
        av_free_packet(&pkt->pkt);
307
        av_freep(&pkt);
308
    }
309
    q->last_pkt = NULL;
310
    q->first_pkt = NULL;
311
    q->nb_packets = 0;
312
    q->size = 0;
313
    SDL_UnlockMutex(q->mutex);
314
}
315

    
316
static void packet_queue_end(PacketQueue *q)
317
{
318
    packet_queue_flush(q);
319
    SDL_DestroyMutex(q->mutex);
320
    SDL_DestroyCond(q->cond);
321
}
322

    
323
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
324
{
325
    AVPacketList *pkt1;
326

    
327
    /* duplicate the packet */
328
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
329
        return -1;
330

    
331
    pkt1 = av_malloc(sizeof(AVPacketList));
332
    if (!pkt1)
333
        return -1;
334
    pkt1->pkt = *pkt;
335
    pkt1->next = NULL;
336

    
337

    
338
    SDL_LockMutex(q->mutex);
339

    
340
    if (!q->last_pkt)
341

    
342
        q->first_pkt = pkt1;
343
    else
344
        q->last_pkt->next = pkt1;
345
    q->last_pkt = pkt1;
346
    q->nb_packets++;
347
    q->size += pkt1->pkt.size + sizeof(*pkt1);
348
    /* XXX: should duplicate packet data in DV case */
349
    SDL_CondSignal(q->cond);
350

    
351
    SDL_UnlockMutex(q->mutex);
352
    return 0;
353
}
354

    
355
static void packet_queue_abort(PacketQueue *q)
356
{
357
    SDL_LockMutex(q->mutex);
358

    
359
    q->abort_request = 1;
360

    
361
    SDL_CondSignal(q->cond);
362

    
363
    SDL_UnlockMutex(q->mutex);
364
}
365

    
366
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
367
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
368
{
369
    AVPacketList *pkt1;
370
    int ret;
371

    
372
    SDL_LockMutex(q->mutex);
373

    
374
    for(;;) {
375
        if (q->abort_request) {
376
            ret = -1;
377
            break;
378
        }
379

    
380
        pkt1 = q->first_pkt;
381
        if (pkt1) {
382
            q->first_pkt = pkt1->next;
383
            if (!q->first_pkt)
384
                q->last_pkt = NULL;
385
            q->nb_packets--;
386
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
387
            *pkt = pkt1->pkt;
388
            av_free(pkt1);
389
            ret = 1;
390
            break;
391
        } else if (!block) {
392
            ret = 0;
393
            break;
394
        } else {
395
            SDL_CondWait(q->cond, q->mutex);
396
        }
397
    }
398
    SDL_UnlockMutex(q->mutex);
399
    return ret;
400
}
401

    
402
static inline void fill_rectangle(SDL_Surface *screen,
403
                                  int x, int y, int w, int h, int color)
404
{
405
    SDL_Rect rect;
406
    rect.x = x;
407
    rect.y = y;
408
    rect.w = w;
409
    rect.h = h;
410
    SDL_FillRect(screen, &rect, color);
411
}
412

    
413
#if 0
414
/* draw only the border of a rectangle */
415
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
416
{
417
    int w1, w2, h1, h2;
418

419
    /* fill the background */
420
    w1 = x;
421
    if (w1 < 0)
422
        w1 = 0;
423
    w2 = s->width - (x + w);
424
    if (w2 < 0)
425
        w2 = 0;
426
    h1 = y;
427
    if (h1 < 0)
428
        h1 = 0;
429
    h2 = s->height - (y + h);
430
    if (h2 < 0)
431
        h2 = 0;
432
    fill_rectangle(screen,
433
                   s->xleft, s->ytop,
434
                   w1, s->height,
435
                   color);
436
    fill_rectangle(screen,
437
                   s->xleft + s->width - w2, s->ytop,
438
                   w2, s->height,
439
                   color);
440
    fill_rectangle(screen,
441
                   s->xleft + w1, s->ytop,
442
                   s->width - w1 - w2, h1,
443
                   color);
444
    fill_rectangle(screen,
445
                   s->xleft + w1, s->ytop + s->height - h2,
446
                   s->width - w1 - w2, h2,
447
                   color);
448
}
449
#endif
450

    
451
#define ALPHA_BLEND(a, oldp, newp, s)\
452
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
453

    
454
#define RGBA_IN(r, g, b, a, s)\
455
{\
456
    unsigned int v = ((const uint32_t *)(s))[0];\
457
    a = (v >> 24) & 0xff;\
458
    r = (v >> 16) & 0xff;\
459
    g = (v >> 8) & 0xff;\
460
    b = v & 0xff;\
461
}
462

    
463
#define YUVA_IN(y, u, v, a, s, pal)\
464
{\
465
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
466
    a = (val >> 24) & 0xff;\
467
    y = (val >> 16) & 0xff;\
468
    u = (val >> 8) & 0xff;\
469
    v = val & 0xff;\
470
}
471

    
472
#define YUVA_OUT(d, y, u, v, a)\
473
{\
474
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
475
}
476

    
477

    
478
#define BPP 1
479

    
480
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
481
{
482
    int wrap, wrap3, width2, skip2;
483
    int y, u, v, a, u1, v1, a1, w, h;
484
    uint8_t *lum, *cb, *cr;
485
    const uint8_t *p;
486
    const uint32_t *pal;
487
    int dstx, dsty, dstw, dsth;
488

    
489
    dstw = av_clip(rect->w, 0, imgw);
490
    dsth = av_clip(rect->h, 0, imgh);
491
    dstx = av_clip(rect->x, 0, imgw - dstw);
492
    dsty = av_clip(rect->y, 0, imgh - dsth);
493
    lum = dst->data[0] + dsty * dst->linesize[0];
494
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
495
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
496

    
497
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
498
    skip2 = dstx >> 1;
499
    wrap = dst->linesize[0];
500
    wrap3 = rect->pict.linesize[0];
501
    p = rect->pict.data[0];
502
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
503

    
504
    if (dsty & 1) {
505
        lum += dstx;
506
        cb += skip2;
507
        cr += skip2;
508

    
509
        if (dstx & 1) {
510
            YUVA_IN(y, u, v, a, p, pal);
511
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
513
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
514
            cb++;
515
            cr++;
516
            lum++;
517
            p += BPP;
518
        }
519
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520
            YUVA_IN(y, u, v, a, p, pal);
521
            u1 = u;
522
            v1 = v;
523
            a1 = a;
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525

    
526
            YUVA_IN(y, u, v, a, p + BPP, pal);
527
            u1 += u;
528
            v1 += v;
529
            a1 += a;
530
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
532
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
533
            cb++;
534
            cr++;
535
            p += 2 * BPP;
536
            lum += 2;
537
        }
538
        if (w) {
539
            YUVA_IN(y, u, v, a, p, pal);
540
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
541
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
542
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
543
            p++;
544
            lum++;
545
        }
546
        p += wrap3 - dstw * BPP;
547
        lum += wrap - dstw - dstx;
548
        cb += dst->linesize[1] - width2 - skip2;
549
        cr += dst->linesize[2] - width2 - skip2;
550
    }
551
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
552
        lum += dstx;
553
        cb += skip2;
554
        cr += skip2;
555

    
556
        if (dstx & 1) {
557
            YUVA_IN(y, u, v, a, p, pal);
558
            u1 = u;
559
            v1 = v;
560
            a1 = a;
561
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
562
            p += wrap3;
563
            lum += wrap;
564
            YUVA_IN(y, u, v, a, p, pal);
565
            u1 += u;
566
            v1 += v;
567
            a1 += a;
568
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
570
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
571
            cb++;
572
            cr++;
573
            p += -wrap3 + BPP;
574
            lum += -wrap + 1;
575
        }
576
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
577
            YUVA_IN(y, u, v, a, p, pal);
578
            u1 = u;
579
            v1 = v;
580
            a1 = a;
581
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
582

    
583
            YUVA_IN(y, u, v, a, p + BPP, pal);
584
            u1 += u;
585
            v1 += v;
586
            a1 += a;
587
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
588
            p += wrap3;
589
            lum += wrap;
590

    
591
            YUVA_IN(y, u, v, a, p, pal);
592
            u1 += u;
593
            v1 += v;
594
            a1 += a;
595
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596

    
597
            YUVA_IN(y, u, v, a, p + BPP, pal);
598
            u1 += u;
599
            v1 += v;
600
            a1 += a;
601
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
602

    
603
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
604
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
605

    
606
            cb++;
607
            cr++;
608
            p += -wrap3 + 2 * BPP;
609
            lum += -wrap + 2;
610
        }
611
        if (w) {
612
            YUVA_IN(y, u, v, a, p, pal);
613
            u1 = u;
614
            v1 = v;
615
            a1 = a;
616
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
            p += wrap3;
618
            lum += wrap;
619
            YUVA_IN(y, u, v, a, p, pal);
620
            u1 += u;
621
            v1 += v;
622
            a1 += a;
623
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
625
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
626
            cb++;
627
            cr++;
628
            p += -wrap3 + BPP;
629
            lum += -wrap + 1;
630
        }
631
        p += wrap3 + (wrap3 - dstw * BPP);
632
        lum += wrap + (wrap - dstw - dstx);
633
        cb += dst->linesize[1] - width2 - skip2;
634
        cr += dst->linesize[2] - width2 - skip2;
635
    }
636
    /* handle odd height */
637
    if (h) {
638
        lum += dstx;
639
        cb += skip2;
640
        cr += skip2;
641

    
642
        if (dstx & 1) {
643
            YUVA_IN(y, u, v, a, p, pal);
644
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
645
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
646
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
647
            cb++;
648
            cr++;
649
            lum++;
650
            p += BPP;
651
        }
652
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
653
            YUVA_IN(y, u, v, a, p, pal);
654
            u1 = u;
655
            v1 = v;
656
            a1 = a;
657
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
658

    
659
            YUVA_IN(y, u, v, a, p + BPP, pal);
660
            u1 += u;
661
            v1 += v;
662
            a1 += a;
663
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
664
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
665
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
666
            cb++;
667
            cr++;
668
            p += 2 * BPP;
669
            lum += 2;
670
        }
671
        if (w) {
672
            YUVA_IN(y, u, v, a, p, pal);
673
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
674
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
675
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
676
        }
677
    }
678
}
679

    
680
static void free_subpicture(SubPicture *sp)
681
{
682
    avsubtitle_free(&sp->sub);
683
}
684

    
685
static void video_image_display(VideoState *is)
686
{
687
    VideoPicture *vp;
688
    SubPicture *sp;
689
    AVPicture pict;
690
    float aspect_ratio;
691
    int width, height, x, y;
692
    SDL_Rect rect;
693
    int i;
694

    
695
    vp = &is->pictq[is->pictq_rindex];
696
    if (vp->bmp) {
697
#if CONFIG_AVFILTER
698
         if (vp->picref->video->pixel_aspect.num == 0)
699
             aspect_ratio = 0;
700
         else
701
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
702
#else
703

    
704
        /* XXX: use variable in the frame */
705
        if (is->video_st->sample_aspect_ratio.num)
706
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
707
        else if (is->video_st->codec->sample_aspect_ratio.num)
708
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
709
        else
710
            aspect_ratio = 0;
711
#endif
712
        if (aspect_ratio <= 0.0)
713
            aspect_ratio = 1.0;
714
        aspect_ratio *= (float)vp->width / (float)vp->height;
715
        /* if an active format is indicated, then it overrides the
716
           mpeg format */
717
#if 0
718
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
719
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
720
            printf("dtg_active_format=%d\n", is->dtg_active_format);
721
        }
722
#endif
723
#if 0
724
        switch(is->video_st->codec->dtg_active_format) {
725
        case FF_DTG_AFD_SAME:
726
        default:
727
            /* nothing to do */
728
            break;
729
        case FF_DTG_AFD_4_3:
730
            aspect_ratio = 4.0 / 3.0;
731
            break;
732
        case FF_DTG_AFD_16_9:
733
            aspect_ratio = 16.0 / 9.0;
734
            break;
735
        case FF_DTG_AFD_14_9:
736
            aspect_ratio = 14.0 / 9.0;
737
            break;
738
        case FF_DTG_AFD_4_3_SP_14_9:
739
            aspect_ratio = 14.0 / 9.0;
740
            break;
741
        case FF_DTG_AFD_16_9_SP_14_9:
742
            aspect_ratio = 14.0 / 9.0;
743
            break;
744
        case FF_DTG_AFD_SP_4_3:
745
            aspect_ratio = 4.0 / 3.0;
746
            break;
747
        }
748
#endif
749

    
750
        if (is->subtitle_st)
751
        {
752
            if (is->subpq_size > 0)
753
            {
754
                sp = &is->subpq[is->subpq_rindex];
755

    
756
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
757
                {
758
                    SDL_LockYUVOverlay (vp->bmp);
759

    
760
                    pict.data[0] = vp->bmp->pixels[0];
761
                    pict.data[1] = vp->bmp->pixels[2];
762
                    pict.data[2] = vp->bmp->pixels[1];
763

    
764
                    pict.linesize[0] = vp->bmp->pitches[0];
765
                    pict.linesize[1] = vp->bmp->pitches[2];
766
                    pict.linesize[2] = vp->bmp->pitches[1];
767

    
768
                    for (i = 0; i < sp->sub.num_rects; i++)
769
                        blend_subrect(&pict, sp->sub.rects[i],
770
                                      vp->bmp->w, vp->bmp->h);
771

    
772
                    SDL_UnlockYUVOverlay (vp->bmp);
773
                }
774
            }
775
        }
776

    
777

    
778
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
779
        height = is->height;
780
        width = ((int)rint(height * aspect_ratio)) & ~1;
781
        if (width > is->width) {
782
            width = is->width;
783
            height = ((int)rint(width / aspect_ratio)) & ~1;
784
        }
785
        x = (is->width - width) / 2;
786
        y = (is->height - height) / 2;
787
        if (!is->no_background) {
788
            /* fill the background */
789
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
790
        } else {
791
            is->no_background = 0;
792
        }
793
        rect.x = is->xleft + x;
794
        rect.y = is->ytop  + y;
795
        rect.w = width;
796
        rect.h = height;
797
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
798
    } else {
799
#if 0
800
        fill_rectangle(screen,
801
                       is->xleft, is->ytop, is->width, is->height,
802
                       QERGB(0x00, 0x00, 0x00));
803
#endif
804
    }
805
}
806

    
807
static inline int compute_mod(int a, int b)
808
{
809
    a = a % b;
810
    if (a >= 0)
811
        return a;
812
    else
813
        return a + b;
814
}
815

    
816
static void video_audio_display(VideoState *s)
817
{
818
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
819
    int ch, channels, h, h2, bgcolor, fgcolor;
820
    int16_t time_diff;
821
    int rdft_bits, nb_freq;
822

    
823
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
824
        ;
825
    nb_freq= 1<<(rdft_bits-1);
826

    
827
    /* compute display index : center on currently output samples */
828
    channels = s->audio_st->codec->channels;
829
    nb_display_channels = channels;
830
    if (!s->paused) {
831
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
832
        n = 2 * channels;
833
        delay = audio_write_get_buf_size(s);
834
        delay /= n;
835

    
836
        /* to be more precise, we take into account the time spent since
837
           the last buffer computation */
838
        if (audio_callback_time) {
839
            time_diff = av_gettime() - audio_callback_time;
840
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
841
        }
842

    
843
        delay += 2*data_used;
844
        if (delay < data_used)
845
            delay = data_used;
846

    
847
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
848
        if(s->show_audio==1){
849
            h= INT_MIN;
850
            for(i=0; i<1000; i+=channels){
851
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
852
                int a= s->sample_array[idx];
853
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
854
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
855
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
856
                int score= a-d;
857
                if(h<score && (b^c)<0){
858
                    h= score;
859
                    i_start= idx;
860
                }
861
            }
862
        }
863

    
864
        s->last_i_start = i_start;
865
    } else {
866
        i_start = s->last_i_start;
867
    }
868

    
869
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
870
    if(s->show_audio==1){
871
        fill_rectangle(screen,
872
                       s->xleft, s->ytop, s->width, s->height,
873
                       bgcolor);
874

    
875
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
876

    
877
        /* total height for one channel */
878
        h = s->height / nb_display_channels;
879
        /* graph height / 2 */
880
        h2 = (h * 9) / 20;
881
        for(ch = 0;ch < nb_display_channels; ch++) {
882
            i = i_start + ch;
883
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
884
            for(x = 0; x < s->width; x++) {
885
                y = (s->sample_array[i] * h2) >> 15;
886
                if (y < 0) {
887
                    y = -y;
888
                    ys = y1 - y;
889
                } else {
890
                    ys = y1;
891
                }
892
                fill_rectangle(screen,
893
                               s->xleft + x, ys, 1, y,
894
                               fgcolor);
895
                i += channels;
896
                if (i >= SAMPLE_ARRAY_SIZE)
897
                    i -= SAMPLE_ARRAY_SIZE;
898
            }
899
        }
900

    
901
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
902

    
903
        for(ch = 1;ch < nb_display_channels; ch++) {
904
            y = s->ytop + ch * h;
905
            fill_rectangle(screen,
906
                           s->xleft, y, s->width, 1,
907
                           fgcolor);
908
        }
909
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
910
    }else{
911
        nb_display_channels= FFMIN(nb_display_channels, 2);
912
        if(rdft_bits != s->rdft_bits){
913
            av_rdft_end(s->rdft);
914
            av_free(s->rdft_data);
915
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
916
            s->rdft_bits= rdft_bits;
917
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
918
        }
919
        {
920
            FFTSample *data[2];
921
            for(ch = 0;ch < nb_display_channels; ch++) {
922
                data[ch] = s->rdft_data + 2*nb_freq*ch;
923
                i = i_start + ch;
924
                for(x = 0; x < 2*nb_freq; x++) {
925
                    double w= (x-nb_freq)*(1.0/nb_freq);
926
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
927
                    i += channels;
928
                    if (i >= SAMPLE_ARRAY_SIZE)
929
                        i -= SAMPLE_ARRAY_SIZE;
930
                }
931
                av_rdft_calc(s->rdft, data[ch]);
932
            }
933
            //least efficient way to do this, we should of course directly access it but its more than fast enough
934
            for(y=0; y<s->height; y++){
935
                double w= 1/sqrt(nb_freq);
936
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
937
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
938
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
939
                a= FFMIN(a,255);
940
                b= FFMIN(b,255);
941
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
942

    
943
                fill_rectangle(screen,
944
                            s->xpos, s->height-y, 1, 1,
945
                            fgcolor);
946
            }
947
        }
948
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
949
        s->xpos++;
950
        if(s->xpos >= s->width)
951
            s->xpos= s->xleft;
952
    }
953
}
954

    
955
static int video_open(VideoState *is){
956
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
957
    int w,h;
958

    
959
    if(is_full_screen) flags |= SDL_FULLSCREEN;
960
    else               flags |= SDL_RESIZABLE;
961

    
962
    if (is_full_screen && fs_screen_width) {
963
        w = fs_screen_width;
964
        h = fs_screen_height;
965
    } else if(!is_full_screen && screen_width){
966
        w = screen_width;
967
        h = screen_height;
968
#if CONFIG_AVFILTER
969
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
970
        w = is->out_video_filter->inputs[0]->w;
971
        h = is->out_video_filter->inputs[0]->h;
972
#else
973
    }else if (is->video_st && is->video_st->codec->width){
974
        w = is->video_st->codec->width;
975
        h = is->video_st->codec->height;
976
#endif
977
    } else {
978
        w = 640;
979
        h = 480;
980
    }
981
    if(screen && is->width == screen->w && screen->w == w
982
       && is->height== screen->h && screen->h == h)
983
        return 0;
984

    
985
#ifndef __APPLE__
986
    screen = SDL_SetVideoMode(w, h, 0, flags);
987
#else
988
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
989
    screen = SDL_SetVideoMode(w, h, 24, flags);
990
#endif
991
    if (!screen) {
992
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
993
        return -1;
994
    }
995
    if (!window_title)
996
        window_title = input_filename;
997
    SDL_WM_SetCaption(window_title, window_title);
998

    
999
    is->width = screen->w;
1000
    is->height = screen->h;
1001

    
1002
    return 0;
1003
}
1004

    
1005
/* display the current picture, if any */
1006
static void video_display(VideoState *is)
1007
{
1008
    if(!screen)
1009
        video_open(cur_stream);
1010
    if (is->audio_st && is->show_audio)
1011
        video_audio_display(is);
1012
    else if (is->video_st)
1013
        video_image_display(is);
1014
}
1015

    
1016
static int refresh_thread(void *opaque)
1017
{
1018
    VideoState *is= opaque;
1019
    while(!is->abort_request){
1020
    SDL_Event event;
1021
    event.type = FF_REFRESH_EVENT;
1022
    event.user.data1 = opaque;
1023
        if(!is->refresh){
1024
            is->refresh=1;
1025
    SDL_PushEvent(&event);
1026
        }
1027
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1028
    }
1029
    return 0;
1030
}
1031

    
1032
/* get the current audio clock value */
1033
static double get_audio_clock(VideoState *is)
1034
{
1035
    double pts;
1036
    int hw_buf_size, bytes_per_sec;
1037
    pts = is->audio_clock;
1038
    hw_buf_size = audio_write_get_buf_size(is);
1039
    bytes_per_sec = 0;
1040
    if (is->audio_st) {
1041
        bytes_per_sec = is->audio_st->codec->sample_rate *
1042
            2 * is->audio_st->codec->channels;
1043
    }
1044
    if (bytes_per_sec)
1045
        pts -= (double)hw_buf_size / bytes_per_sec;
1046
    return pts;
1047
}
1048

    
1049
/* get the current video clock value */
1050
static double get_video_clock(VideoState *is)
1051
{
1052
    if (is->paused) {
1053
        return is->video_current_pts;
1054
    } else {
1055
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1056
    }
1057
}
1058

    
1059
/* get the current external clock value */
1060
static double get_external_clock(VideoState *is)
1061
{
1062
    int64_t ti;
1063
    ti = av_gettime();
1064
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1065
}
1066

    
1067
/* get the current master clock value */
1068
static double get_master_clock(VideoState *is)
1069
{
1070
    double val;
1071

    
1072
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1073
        if (is->video_st)
1074
            val = get_video_clock(is);
1075
        else
1076
            val = get_audio_clock(is);
1077
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1078
        if (is->audio_st)
1079
            val = get_audio_clock(is);
1080
        else
1081
            val = get_video_clock(is);
1082
    } else {
1083
        val = get_external_clock(is);
1084
    }
1085
    return val;
1086
}
1087

    
1088
/* seek in the stream */
1089
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1090
{
1091
    if (!is->seek_req) {
1092
        is->seek_pos = pos;
1093
        is->seek_rel = rel;
1094
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1095
        if (seek_by_bytes)
1096
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1097
        is->seek_req = 1;
1098
    }
1099
}
1100

    
1101
/* pause or resume the video */
1102
static void stream_pause(VideoState *is)
1103
{
1104
    if (is->paused) {
1105
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1106
        if(is->read_pause_return != AVERROR(ENOSYS)){
1107
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1108
        }
1109
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1110
    }
1111
    is->paused = !is->paused;
1112
}
1113

    
1114
static double compute_target_time(double frame_current_pts, VideoState *is)
1115
{
1116
    double delay, sync_threshold, diff;
1117

    
1118
    /* compute nominal delay */
1119
    delay = frame_current_pts - is->frame_last_pts;
1120
    if (delay <= 0 || delay >= 10.0) {
1121
        /* if incorrect delay, use previous one */
1122
        delay = is->frame_last_delay;
1123
    } else {
1124
        is->frame_last_delay = delay;
1125
    }
1126
    is->frame_last_pts = frame_current_pts;
1127

    
1128
    /* update delay to follow master synchronisation source */
1129
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1130
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1131
        /* if video is slave, we try to correct big delays by
1132
           duplicating or deleting a frame */
1133
        diff = get_video_clock(is) - get_master_clock(is);
1134

    
1135
        /* skip or repeat frame. We take into account the
1136
           delay to compute the threshold. I still don't know
1137
           if it is the best guess */
1138
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1139
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1140
            if (diff <= -sync_threshold)
1141
                delay = 0;
1142
            else if (diff >= sync_threshold)
1143
                delay = 2 * delay;
1144
        }
1145
    }
1146
    is->frame_timer += delay;
1147
#if defined(DEBUG_SYNC)
1148
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1149
            delay, actual_delay, frame_current_pts, -diff);
1150
#endif
1151

    
1152
    return is->frame_timer;
1153
}
1154

    
1155
/* called to display each frame */
1156
static void video_refresh_timer(void *opaque)
1157
{
1158
    VideoState *is = opaque;
1159
    VideoPicture *vp;
1160

    
1161
    SubPicture *sp, *sp2;
1162

    
1163
    if (is->video_st) {
1164
retry:
1165
        if (is->pictq_size == 0) {
1166
            //nothing to do, no picture to display in the que
1167
        } else {
1168
            double time= av_gettime()/1000000.0;
1169
            double next_target;
1170
            /* dequeue the picture */
1171
            vp = &is->pictq[is->pictq_rindex];
1172

    
1173
            if(time < vp->target_clock)
1174
                return;
1175
            /* update current video pts */
1176
            is->video_current_pts = vp->pts;
1177
            is->video_current_pts_drift = is->video_current_pts - time;
1178
            is->video_current_pos = vp->pos;
1179
            if(is->pictq_size > 1){
1180
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1181
                assert(nextvp->target_clock >= vp->target_clock);
1182
                next_target= nextvp->target_clock;
1183
            }else{
1184
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1185
            }
1186
            if(framedrop && time > next_target){
1187
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1188
                if(is->pictq_size > 1 || time > next_target + 0.5){
1189
                    /* update queue size and signal for next picture */
1190
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1191
                        is->pictq_rindex = 0;
1192

    
1193
                    SDL_LockMutex(is->pictq_mutex);
1194
                    is->pictq_size--;
1195
                    SDL_CondSignal(is->pictq_cond);
1196
                    SDL_UnlockMutex(is->pictq_mutex);
1197
                    goto retry;
1198
                }
1199
            }
1200

    
1201
            if(is->subtitle_st) {
1202
                if (is->subtitle_stream_changed) {
1203
                    SDL_LockMutex(is->subpq_mutex);
1204

    
1205
                    while (is->subpq_size) {
1206
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1207

    
1208
                        /* update queue size and signal for next picture */
1209
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1210
                            is->subpq_rindex = 0;
1211

    
1212
                        is->subpq_size--;
1213
                    }
1214
                    is->subtitle_stream_changed = 0;
1215

    
1216
                    SDL_CondSignal(is->subpq_cond);
1217
                    SDL_UnlockMutex(is->subpq_mutex);
1218
                } else {
1219
                    if (is->subpq_size > 0) {
1220
                        sp = &is->subpq[is->subpq_rindex];
1221

    
1222
                        if (is->subpq_size > 1)
1223
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1224
                        else
1225
                            sp2 = NULL;
1226

    
1227
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1228
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1229
                        {
1230
                            free_subpicture(sp);
1231

    
1232
                            /* update queue size and signal for next picture */
1233
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1234
                                is->subpq_rindex = 0;
1235

    
1236
                            SDL_LockMutex(is->subpq_mutex);
1237
                            is->subpq_size--;
1238
                            SDL_CondSignal(is->subpq_cond);
1239
                            SDL_UnlockMutex(is->subpq_mutex);
1240
                        }
1241
                    }
1242
                }
1243
            }
1244

    
1245
            /* display picture */
1246
            video_display(is);
1247

    
1248
            /* update queue size and signal for next picture */
1249
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1250
                is->pictq_rindex = 0;
1251

    
1252
            SDL_LockMutex(is->pictq_mutex);
1253
            is->pictq_size--;
1254
            SDL_CondSignal(is->pictq_cond);
1255
            SDL_UnlockMutex(is->pictq_mutex);
1256
        }
1257
    } else if (is->audio_st) {
1258
        /* draw the next audio frame */
1259

    
1260
        /* if only audio stream, then display the audio bars (better
1261
           than nothing, just to test the implementation */
1262

    
1263
        /* display picture */
1264
        video_display(is);
1265
    }
1266
    if (show_status) {
1267
        static int64_t last_time;
1268
        int64_t cur_time;
1269
        int aqsize, vqsize, sqsize;
1270
        double av_diff;
1271

    
1272
        cur_time = av_gettime();
1273
        if (!last_time || (cur_time - last_time) >= 30000) {
1274
            aqsize = 0;
1275
            vqsize = 0;
1276
            sqsize = 0;
1277
            if (is->audio_st)
1278
                aqsize = is->audioq.size;
1279
            if (is->video_st)
1280
                vqsize = is->videoq.size;
1281
            if (is->subtitle_st)
1282
                sqsize = is->subtitleq.size;
1283
            av_diff = 0;
1284
            if (is->audio_st && is->video_st)
1285
                av_diff = get_audio_clock(is) - get_video_clock(is);
1286
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1287
                   get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->pts_ctx.num_faulty_dts, is->pts_ctx.num_faulty_pts);
1288
            fflush(stdout);
1289
            last_time = cur_time;
1290
        }
1291
    }
1292
}
1293

    
1294
static void stream_close(VideoState *is)
1295
{
1296
    VideoPicture *vp;
1297
    int i;
1298
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1299
    is->abort_request = 1;
1300
    SDL_WaitThread(is->parse_tid, NULL);
1301
    SDL_WaitThread(is->refresh_tid, NULL);
1302

    
1303
    /* free all pictures */
1304
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1305
        vp = &is->pictq[i];
1306
#if CONFIG_AVFILTER
1307
        if (vp->picref) {
1308
            avfilter_unref_buffer(vp->picref);
1309
            vp->picref = NULL;
1310
        }
1311
#endif
1312
        if (vp->bmp) {
1313
            SDL_FreeYUVOverlay(vp->bmp);
1314
            vp->bmp = NULL;
1315
        }
1316
    }
1317
    SDL_DestroyMutex(is->pictq_mutex);
1318
    SDL_DestroyCond(is->pictq_cond);
1319
    SDL_DestroyMutex(is->subpq_mutex);
1320
    SDL_DestroyCond(is->subpq_cond);
1321
#if !CONFIG_AVFILTER
1322
    if (is->img_convert_ctx)
1323
        sws_freeContext(is->img_convert_ctx);
1324
#endif
1325
    av_free(is);
1326
}
1327

    
1328
static void do_exit(void)
1329
{
1330
    if (cur_stream) {
1331
        stream_close(cur_stream);
1332
        cur_stream = NULL;
1333
    }
1334
    uninit_opts();
1335
#if CONFIG_AVFILTER
1336
    avfilter_uninit();
1337
#endif
1338
    if (show_status)
1339
        printf("\n");
1340
    SDL_Quit();
1341
    av_log(NULL, AV_LOG_QUIET, "");
1342
    exit(0);
1343
}
1344

    
1345
/* allocate a picture (needs to do that in main thread to avoid
1346
   potential locking problems */
1347
static void alloc_picture(void *opaque)
1348
{
1349
    VideoState *is = opaque;
1350
    VideoPicture *vp;
1351

    
1352
    vp = &is->pictq[is->pictq_windex];
1353

    
1354
    if (vp->bmp)
1355
        SDL_FreeYUVOverlay(vp->bmp);
1356

    
1357
#if CONFIG_AVFILTER
1358
    if (vp->picref)
1359
        avfilter_unref_buffer(vp->picref);
1360
    vp->picref = NULL;
1361

    
1362
    vp->width   = is->out_video_filter->inputs[0]->w;
1363
    vp->height  = is->out_video_filter->inputs[0]->h;
1364
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1365
#else
1366
    vp->width   = is->video_st->codec->width;
1367
    vp->height  = is->video_st->codec->height;
1368
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1369
#endif
1370

    
1371
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1372
                                   SDL_YV12_OVERLAY,
1373
                                   screen);
1374
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1375
        /* SDL allocates a buffer smaller than requested if the video
1376
         * overlay hardware is unable to support the requested size. */
1377
        fprintf(stderr, "Error: the video system does not support an image\n"
1378
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1379
                        "to reduce the image size.\n", vp->width, vp->height );
1380
        do_exit();
1381
    }
1382

    
1383
    SDL_LockMutex(is->pictq_mutex);
1384
    vp->allocated = 1;
1385
    SDL_CondSignal(is->pictq_cond);
1386
    SDL_UnlockMutex(is->pictq_mutex);
1387
}
1388

    
1389
/**
1390
 *
1391
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1392
 */
1393
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1394
{
1395
    VideoPicture *vp;
1396
    int dst_pix_fmt;
1397
#if CONFIG_AVFILTER
1398
    AVPicture pict_src;
1399
#endif
1400
    /* wait until we have space to put a new picture */
1401
    SDL_LockMutex(is->pictq_mutex);
1402

    
1403
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1404
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1405

    
1406
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1407
           !is->videoq.abort_request) {
1408
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1409
    }
1410
    SDL_UnlockMutex(is->pictq_mutex);
1411

    
1412
    if (is->videoq.abort_request)
1413
        return -1;
1414

    
1415
    vp = &is->pictq[is->pictq_windex];
1416

    
1417
    /* alloc or resize hardware picture buffer */
1418
    if (!vp->bmp ||
1419
#if CONFIG_AVFILTER
1420
        vp->width  != is->out_video_filter->inputs[0]->w ||
1421
        vp->height != is->out_video_filter->inputs[0]->h) {
1422
#else
1423
        vp->width != is->video_st->codec->width ||
1424
        vp->height != is->video_st->codec->height) {
1425
#endif
1426
        SDL_Event event;
1427

    
1428
        vp->allocated = 0;
1429

    
1430
        /* the allocation must be done in the main thread to avoid
1431
           locking problems */
1432
        event.type = FF_ALLOC_EVENT;
1433
        event.user.data1 = is;
1434
        SDL_PushEvent(&event);
1435

    
1436
        /* wait until the picture is allocated */
1437
        SDL_LockMutex(is->pictq_mutex);
1438
        while (!vp->allocated && !is->videoq.abort_request) {
1439
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1440
        }
1441
        SDL_UnlockMutex(is->pictq_mutex);
1442

    
1443
        if (is->videoq.abort_request)
1444
            return -1;
1445
    }
1446

    
1447
    /* if the frame is not skipped, then display it */
1448
    if (vp->bmp) {
1449
        AVPicture pict;
1450
#if CONFIG_AVFILTER
1451
        if(vp->picref)
1452
            avfilter_unref_buffer(vp->picref);
1453
        vp->picref = src_frame->opaque;
1454
#endif
1455

    
1456
        /* get a pointer on the bitmap */
1457
        SDL_LockYUVOverlay (vp->bmp);
1458

    
1459
        dst_pix_fmt = PIX_FMT_YUV420P;
1460
        memset(&pict,0,sizeof(AVPicture));
1461
        pict.data[0] = vp->bmp->pixels[0];
1462
        pict.data[1] = vp->bmp->pixels[2];
1463
        pict.data[2] = vp->bmp->pixels[1];
1464

    
1465
        pict.linesize[0] = vp->bmp->pitches[0];
1466
        pict.linesize[1] = vp->bmp->pitches[2];
1467
        pict.linesize[2] = vp->bmp->pitches[1];
1468

    
1469
#if CONFIG_AVFILTER
1470
        pict_src.data[0] = src_frame->data[0];
1471
        pict_src.data[1] = src_frame->data[1];
1472
        pict_src.data[2] = src_frame->data[2];
1473

    
1474
        pict_src.linesize[0] = src_frame->linesize[0];
1475
        pict_src.linesize[1] = src_frame->linesize[1];
1476
        pict_src.linesize[2] = src_frame->linesize[2];
1477

    
1478
        //FIXME use direct rendering
1479
        av_picture_copy(&pict, &pict_src,
1480
                        vp->pix_fmt, vp->width, vp->height);
1481
#else
1482
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1483
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1484
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1485
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1486
        if (is->img_convert_ctx == NULL) {
1487
            fprintf(stderr, "Cannot initialize the conversion context\n");
1488
            exit(1);
1489
        }
1490
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1491
                  0, vp->height, pict.data, pict.linesize);
1492
#endif
1493
        /* update the bitmap content */
1494
        SDL_UnlockYUVOverlay(vp->bmp);
1495

    
1496
        vp->pts = pts;
1497
        vp->pos = pos;
1498

    
1499
        /* now we can update the picture count */
1500
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1501
            is->pictq_windex = 0;
1502
        SDL_LockMutex(is->pictq_mutex);
1503
        vp->target_clock= compute_target_time(vp->pts, is);
1504

    
1505
        is->pictq_size++;
1506
        SDL_UnlockMutex(is->pictq_mutex);
1507
    }
1508
    return 0;
1509
}
1510

    
1511
/**
1512
 * compute the exact PTS for the picture if it is omitted in the stream
1513
 * @param pts1 the dts of the pkt / pts of the frame
1514
 */
1515
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1516
{
1517
    double frame_delay, pts;
1518

    
1519
    pts = pts1;
1520

    
1521
    if (pts != 0) {
1522
        /* update video clock with pts, if present */
1523
        is->video_clock = pts;
1524
    } else {
1525
        pts = is->video_clock;
1526
    }
1527
    /* update video clock for next frame */
1528
    frame_delay = av_q2d(is->video_st->codec->time_base);
1529
    /* for MPEG2, the frame can be repeated, so we update the
1530
       clock accordingly */
1531
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1532
    is->video_clock += frame_delay;
1533

    
1534
#if defined(DEBUG_SYNC) && 0
1535
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1536
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1537
#endif
1538
    return queue_picture(is, src_frame, pts, pos);
1539
}
1540

    
1541
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1542
{
1543
    int len1, got_picture, i;
1544

    
1545
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1546
            return -1;
1547

    
1548
        if(pkt->data == flush_pkt.data){
1549
            avcodec_flush_buffers(is->video_st->codec);
1550

    
1551
            SDL_LockMutex(is->pictq_mutex);
1552
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1553
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1554
                is->pictq[i].target_clock= 0;
1555
            }
1556
            while (is->pictq_size && !is->videoq.abort_request) {
1557
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1558
            }
1559
            is->video_current_pos= -1;
1560
            SDL_UnlockMutex(is->pictq_mutex);
1561

    
1562
            init_pts_correction(&is->pts_ctx);
1563
            is->frame_last_pts= AV_NOPTS_VALUE;
1564
            is->frame_last_delay = 0;
1565
            is->frame_timer = (double)av_gettime() / 1000000.0;
1566
            is->skip_frames= 1;
1567
            is->skip_frames_index= 0;
1568
            return 0;
1569
        }
1570

    
1571
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1572
           this packet, if any */
1573
        is->video_st->codec->reordered_opaque= pkt->pts;
1574
        len1 = avcodec_decode_video2(is->video_st->codec,
1575
                                    frame, &got_picture,
1576
                                    pkt);
1577

    
1578
        if (got_picture) {
1579
            if (decoder_reorder_pts == -1) {
1580
                *pts = guess_correct_pts(&is->pts_ctx, frame->reordered_opaque, pkt->dts);
1581
            } else if (decoder_reorder_pts) {
1582
                *pts = frame->reordered_opaque;
1583
            } else {
1584
                *pts = pkt->dts;
1585
            }
1586

    
1587
            if (*pts == AV_NOPTS_VALUE) {
1588
                *pts = 0;
1589
            }
1590
        }
1591

    
1592
//            if (len1 < 0)
1593
//                break;
1594
    if (got_picture){
1595
        is->skip_frames_index += 1;
1596
        if(is->skip_frames_index >= is->skip_frames){
1597
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1598
            return 1;
1599
        }
1600

    
1601
    }
1602
    return 0;
1603
}
1604

    
1605
#if CONFIG_AVFILTER
1606
typedef struct {
1607
    VideoState *is;
1608
    AVFrame *frame;
1609
    int use_dr1;
1610
} FilterPriv;
1611

    
1612
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1613
{
1614
    AVFilterContext *ctx = codec->opaque;
1615
    AVFilterBufferRef  *ref;
1616
    int perms = AV_PERM_WRITE;
1617
    int i, w, h, stride[4];
1618
    unsigned edge;
1619

    
1620
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1621
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1622
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1623
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1624
    }
1625
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1626

    
1627
    w = codec->width;
1628
    h = codec->height;
1629
    avcodec_align_dimensions2(codec, &w, &h, stride);
1630
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1631
    w += edge << 1;
1632
    h += edge << 1;
1633

    
1634
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1635
        return -1;
1636

    
1637
    ref->video->w = codec->width;
1638
    ref->video->h = codec->height;
1639
    for(i = 0; i < 4; i ++) {
1640
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1641
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1642

    
1643
        if (ref->data[i]) {
1644
            ref->data[i]    += (edge >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1645
        }
1646
        pic->data[i]     = ref->data[i];
1647
        pic->linesize[i] = ref->linesize[i];
1648
    }
1649
    pic->opaque = ref;
1650
    pic->age    = INT_MAX;
1651
    pic->type   = FF_BUFFER_TYPE_USER;
1652
    pic->reordered_opaque = codec->reordered_opaque;
1653
    return 0;
1654
}
1655

    
1656
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1657
{
1658
    memset(pic->data, 0, sizeof(pic->data));
1659
    avfilter_unref_buffer(pic->opaque);
1660
}
1661

    
1662
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1663
{
1664
    AVFilterBufferRef *ref = pic->opaque;
1665

    
1666
    if (pic->data[0] == NULL) {
1667
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1668
        return codec->get_buffer(codec, pic);
1669
    }
1670

    
1671
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1672
        (codec->pix_fmt != ref->format)) {
1673
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1674
        return -1;
1675
    }
1676

    
1677
    pic->reordered_opaque = codec->reordered_opaque;
1678
    return 0;
1679
}
1680

    
1681
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1682
{
1683
    FilterPriv *priv = ctx->priv;
1684
    AVCodecContext *codec;
1685
    if(!opaque) return -1;
1686

    
1687
    priv->is = opaque;
1688
    codec    = priv->is->video_st->codec;
1689
    codec->opaque = ctx;
1690
    if(codec->codec->capabilities & CODEC_CAP_DR1) {
1691
        priv->use_dr1 = 1;
1692
        codec->get_buffer     = input_get_buffer;
1693
        codec->release_buffer = input_release_buffer;
1694
        codec->reget_buffer   = input_reget_buffer;
1695
    }
1696

    
1697
    priv->frame = avcodec_alloc_frame();
1698

    
1699
    return 0;
1700
}
1701

    
1702
static void input_uninit(AVFilterContext *ctx)
1703
{
1704
    FilterPriv *priv = ctx->priv;
1705
    av_free(priv->frame);
1706
}
1707

    
1708
static int input_request_frame(AVFilterLink *link)
1709
{
1710
    FilterPriv *priv = link->src->priv;
1711
    AVFilterBufferRef *picref;
1712
    int64_t pts = 0;
1713
    AVPacket pkt;
1714
    int ret;
1715

    
1716
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1717
        av_free_packet(&pkt);
1718
    if (ret < 0)
1719
        return -1;
1720

    
1721
    if(priv->use_dr1) {
1722
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1723
    } else {
1724
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1725
        av_image_copy(picref->data, picref->linesize,
1726
                      priv->frame->data, priv->frame->linesize,
1727
                      picref->format, link->w, link->h);
1728
    }
1729
    av_free_packet(&pkt);
1730

    
1731
    picref->pts = pts;
1732
    picref->pos = pkt.pos;
1733
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1734
    avfilter_start_frame(link, picref);
1735
    avfilter_draw_slice(link, 0, link->h, 1);
1736
    avfilter_end_frame(link);
1737

    
1738
    return 0;
1739
}
1740

    
1741
static int input_query_formats(AVFilterContext *ctx)
1742
{
1743
    FilterPriv *priv = ctx->priv;
1744
    enum PixelFormat pix_fmts[] = {
1745
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1746
    };
1747

    
1748
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1749
    return 0;
1750
}
1751

    
1752
static int input_config_props(AVFilterLink *link)
1753
{
1754
    FilterPriv *priv  = link->src->priv;
1755
    AVCodecContext *c = priv->is->video_st->codec;
1756

    
1757
    link->w = c->width;
1758
    link->h = c->height;
1759
    link->time_base = priv->is->video_st->time_base;
1760

    
1761
    return 0;
1762
}
1763

    
1764
static AVFilter input_filter =
1765
{
1766
    .name      = "ffplay_input",
1767

    
1768
    .priv_size = sizeof(FilterPriv),
1769

    
1770
    .init      = input_init,
1771
    .uninit    = input_uninit,
1772

    
1773
    .query_formats = input_query_formats,
1774

    
1775
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1776
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1777
                                    .type = AVMEDIA_TYPE_VIDEO,
1778
                                    .request_frame = input_request_frame,
1779
                                    .config_props  = input_config_props, },
1780
                                  { .name = NULL }},
1781
};
1782

    
1783
#endif  /* CONFIG_AVFILTER */
1784

    
1785
static int video_thread(void *arg)
1786
{
1787
    VideoState *is = arg;
1788
    AVFrame *frame= avcodec_alloc_frame();
1789
    int64_t pts_int;
1790
    double pts;
1791
    int ret;
1792

    
1793
#if CONFIG_AVFILTER
1794
    int64_t pos;
1795
    char sws_flags_str[128];
1796
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1797
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1798
    AVFilterGraph *graph = avfilter_graph_alloc();
1799
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1800
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1801

    
1802
    if (avfilter_open(&filt_src, &input_filter,  "src") < 0) goto the_end;
1803
    if (avfilter_open(&filt_out, &ffsink      ,  "out") < 0) goto the_end;
1804

    
1805
    if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1806
    if(avfilter_init_filter(filt_out, NULL, &ffsink_ctx))    goto the_end;
1807

    
1808

    
1809
    if(vfilters) {
1810
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1811
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1812

    
1813
        outputs->name    = av_strdup("in");
1814
        outputs->filter  = filt_src;
1815
        outputs->pad_idx = 0;
1816
        outputs->next    = NULL;
1817

    
1818
        inputs->name    = av_strdup("out");
1819
        inputs->filter  = filt_out;
1820
        inputs->pad_idx = 0;
1821
        inputs->next    = NULL;
1822

    
1823
        if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1824
            goto the_end;
1825
        av_freep(&vfilters);
1826
    } else {
1827
        if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1828
    }
1829
    avfilter_graph_add_filter(graph, filt_src);
1830
    avfilter_graph_add_filter(graph, filt_out);
1831

    
1832
    if (avfilter_graph_config(graph, NULL) < 0)
1833
        goto the_end;
1834

    
1835
    is->out_video_filter = filt_out;
1836
#endif
1837

    
1838
    for(;;) {
1839
#if !CONFIG_AVFILTER
1840
        AVPacket pkt;
1841
#else
1842
        AVFilterBufferRef *picref;
1843
        AVRational tb;
1844
#endif
1845
        while (is->paused && !is->videoq.abort_request)
1846
            SDL_Delay(10);
1847
#if CONFIG_AVFILTER
1848
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1849
        if (picref) {
1850
            pts_int = picref->pts;
1851
            pos     = picref->pos;
1852
            frame->opaque = picref;
1853
        }
1854

    
1855
        if (av_cmp_q(tb, is->video_st->time_base)) {
1856
            int64_t pts1 = pts_int;
1857
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1858
            av_log(NULL, AV_LOG_DEBUG, "video_thread(): "
1859
                   "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1860
                   tb.num, tb.den, pts1,
1861
                   is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1862
        }
1863
#else
1864
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1865
#endif
1866

    
1867
        if (ret < 0) goto the_end;
1868

    
1869
        if (!ret)
1870
            continue;
1871

    
1872
        pts = pts_int*av_q2d(is->video_st->time_base);
1873

    
1874
#if CONFIG_AVFILTER
1875
        ret = output_picture2(is, frame, pts, pos);
1876
#else
1877
        ret = output_picture2(is, frame, pts,  pkt.pos);
1878
        av_free_packet(&pkt);
1879
#endif
1880
        if (ret < 0)
1881
            goto the_end;
1882

    
1883
        if (step)
1884
            if (cur_stream)
1885
                stream_pause(cur_stream);
1886
    }
1887
 the_end:
1888
#if CONFIG_AVFILTER
1889
    avfilter_graph_free(graph);
1890
    av_freep(&graph);
1891
#endif
1892
    av_free(frame);
1893
    return 0;
1894
}
1895

    
1896
static int subtitle_thread(void *arg)
1897
{
1898
    VideoState *is = arg;
1899
    SubPicture *sp;
1900
    AVPacket pkt1, *pkt = &pkt1;
1901
    int len1, got_subtitle;
1902
    double pts;
1903
    int i, j;
1904
    int r, g, b, y, u, v, a;
1905

    
1906
    for(;;) {
1907
        while (is->paused && !is->subtitleq.abort_request) {
1908
            SDL_Delay(10);
1909
        }
1910
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1911
            break;
1912

    
1913
        if(pkt->data == flush_pkt.data){
1914
            avcodec_flush_buffers(is->subtitle_st->codec);
1915
            continue;
1916
        }
1917
        SDL_LockMutex(is->subpq_mutex);
1918
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1919
               !is->subtitleq.abort_request) {
1920
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1921
        }
1922
        SDL_UnlockMutex(is->subpq_mutex);
1923

    
1924
        if (is->subtitleq.abort_request)
1925
            goto the_end;
1926

    
1927
        sp = &is->subpq[is->subpq_windex];
1928

    
1929
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1930
           this packet, if any */
1931
        pts = 0;
1932
        if (pkt->pts != AV_NOPTS_VALUE)
1933
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1934

    
1935
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1936
                                    &sp->sub, &got_subtitle,
1937
                                    pkt);
1938
//            if (len1 < 0)
1939
//                break;
1940
        if (got_subtitle && sp->sub.format == 0) {
1941
            sp->pts = pts;
1942

    
1943
            for (i = 0; i < sp->sub.num_rects; i++)
1944
            {
1945
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1946
                {
1947
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1948
                    y = RGB_TO_Y_CCIR(r, g, b);
1949
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1950
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1951
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1952
                }
1953
            }
1954

    
1955
            /* now we can update the picture count */
1956
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1957
                is->subpq_windex = 0;
1958
            SDL_LockMutex(is->subpq_mutex);
1959
            is->subpq_size++;
1960
            SDL_UnlockMutex(is->subpq_mutex);
1961
        }
1962
        av_free_packet(pkt);
1963
//        if (step)
1964
//            if (cur_stream)
1965
//                stream_pause(cur_stream);
1966
    }
1967
 the_end:
1968
    return 0;
1969
}
1970

    
1971
/* copy samples for viewing in editor window */
1972
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1973
{
1974
    int size, len, channels;
1975

    
1976
    channels = is->audio_st->codec->channels;
1977

    
1978
    size = samples_size / sizeof(short);
1979
    while (size > 0) {
1980
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1981
        if (len > size)
1982
            len = size;
1983
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1984
        samples += len;
1985
        is->sample_array_index += len;
1986
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1987
            is->sample_array_index = 0;
1988
        size -= len;
1989
    }
1990
}
1991

    
1992
/* return the new audio buffer size (samples can be added or deleted
1993
   to get better sync if video or external master clock) */
1994
static int synchronize_audio(VideoState *is, short *samples,
1995
                             int samples_size1, double pts)
1996
{
1997
    int n, samples_size;
1998
    double ref_clock;
1999

    
2000
    n = 2 * is->audio_st->codec->channels;
2001
    samples_size = samples_size1;
2002

    
2003
    /* if not master, then we try to remove or add samples to correct the clock */
2004
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
2005
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
2006
        double diff, avg_diff;
2007
        int wanted_size, min_size, max_size, nb_samples;
2008

    
2009
        ref_clock = get_master_clock(is);
2010
        diff = get_audio_clock(is) - ref_clock;
2011

    
2012
        if (diff < AV_NOSYNC_THRESHOLD) {
2013
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2014
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
2015
                /* not enough measures to have a correct estimate */
2016
                is->audio_diff_avg_count++;
2017
            } else {
2018
                /* estimate the A-V difference */
2019
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2020

    
2021
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2022
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2023
                    nb_samples = samples_size / n;
2024

    
2025
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2026
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2027
                    if (wanted_size < min_size)
2028
                        wanted_size = min_size;
2029
                    else if (wanted_size > max_size)
2030
                        wanted_size = max_size;
2031

    
2032
                    /* add or remove samples to correction the synchro */
2033
                    if (wanted_size < samples_size) {
2034
                        /* remove samples */
2035
                        samples_size = wanted_size;
2036
                    } else if (wanted_size > samples_size) {
2037
                        uint8_t *samples_end, *q;
2038
                        int nb;
2039

    
2040
                        /* add samples */
2041
                        nb = (samples_size - wanted_size);
2042
                        samples_end = (uint8_t *)samples + samples_size - n;
2043
                        q = samples_end + n;
2044
                        while (nb > 0) {
2045
                            memcpy(q, samples_end, n);
2046
                            q += n;
2047
                            nb -= n;
2048
                        }
2049
                        samples_size = wanted_size;
2050
                    }
2051
                }
2052
#if 0
2053
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2054
                       diff, avg_diff, samples_size - samples_size1,
2055
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2056
#endif
2057
            }
2058
        } else {
2059
            /* too big difference : may be initial PTS errors, so
2060
               reset A-V filter */
2061
            is->audio_diff_avg_count = 0;
2062
            is->audio_diff_cum = 0;
2063
        }
2064
    }
2065

    
2066
    return samples_size;
2067
}
2068

    
2069
/* decode one audio frame and returns its uncompressed size */
2070
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2071
{
2072
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2073
    AVPacket *pkt = &is->audio_pkt;
2074
    AVCodecContext *dec= is->audio_st->codec;
2075
    int n, len1, data_size;
2076
    double pts;
2077

    
2078
    for(;;) {
2079
        /* NOTE: the audio packet can contain several frames */
2080
        while (pkt_temp->size > 0) {
2081
            data_size = sizeof(is->audio_buf1);
2082
            len1 = avcodec_decode_audio3(dec,
2083
                                        (int16_t *)is->audio_buf1, &data_size,
2084
                                        pkt_temp);
2085
            if (len1 < 0) {
2086
                /* if error, we skip the frame */
2087
                pkt_temp->size = 0;
2088
                break;
2089
            }
2090

    
2091
            pkt_temp->data += len1;
2092
            pkt_temp->size -= len1;
2093
            if (data_size <= 0)
2094
                continue;
2095

    
2096
            if (dec->sample_fmt != is->audio_src_fmt) {
2097
                if (is->reformat_ctx)
2098
                    av_audio_convert_free(is->reformat_ctx);
2099
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
2100
                                                         dec->sample_fmt, 1, NULL, 0);
2101
                if (!is->reformat_ctx) {
2102
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2103
                        av_get_sample_fmt_name(dec->sample_fmt),
2104
                        av_get_sample_fmt_name(SAMPLE_FMT_S16));
2105
                        break;
2106
                }
2107
                is->audio_src_fmt= dec->sample_fmt;
2108
            }
2109

    
2110
            if (is->reformat_ctx) {
2111
                const void *ibuf[6]= {is->audio_buf1};
2112
                void *obuf[6]= {is->audio_buf2};
2113
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2114
                int ostride[6]= {2};
2115
                int len= data_size/istride[0];
2116
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2117
                    printf("av_audio_convert() failed\n");
2118
                    break;
2119
                }
2120
                is->audio_buf= is->audio_buf2;
2121
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2122
                          remove this legacy cruft */
2123
                data_size= len*2;
2124
            }else{
2125
                is->audio_buf= is->audio_buf1;
2126
            }
2127

    
2128
            /* if no pts, then compute it */
2129
            pts = is->audio_clock;
2130
            *pts_ptr = pts;
2131
            n = 2 * dec->channels;
2132
            is->audio_clock += (double)data_size /
2133
                (double)(n * dec->sample_rate);
2134
#if defined(DEBUG_SYNC)
2135
            {
2136
                static double last_clock;
2137
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2138
                       is->audio_clock - last_clock,
2139
                       is->audio_clock, pts);
2140
                last_clock = is->audio_clock;
2141
            }
2142
#endif
2143
            return data_size;
2144
        }
2145

    
2146
        /* free the current packet */
2147
        if (pkt->data)
2148
            av_free_packet(pkt);
2149

    
2150
        if (is->paused || is->audioq.abort_request) {
2151
            return -1;
2152
        }
2153

    
2154
        /* read next packet */
2155
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2156
            return -1;
2157
        if(pkt->data == flush_pkt.data){
2158
            avcodec_flush_buffers(dec);
2159
            continue;
2160
        }
2161

    
2162
        pkt_temp->data = pkt->data;
2163
        pkt_temp->size = pkt->size;
2164

    
2165
        /* if update the audio clock with the pts */
2166
        if (pkt->pts != AV_NOPTS_VALUE) {
2167
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2168
        }
2169
    }
2170
}
2171

    
2172
/* get the current audio output buffer size, in samples. With SDL, we
2173
   cannot have a precise information */
2174
static int audio_write_get_buf_size(VideoState *is)
2175
{
2176
    return is->audio_buf_size - is->audio_buf_index;
2177
}
2178

    
2179

    
2180
/* prepare a new audio buffer */
2181
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2182
{
2183
    VideoState *is = opaque;
2184
    int audio_size, len1;
2185
    double pts;
2186

    
2187
    audio_callback_time = av_gettime();
2188

    
2189
    while (len > 0) {
2190
        if (is->audio_buf_index >= is->audio_buf_size) {
2191
           audio_size = audio_decode_frame(is, &pts);
2192
           if (audio_size < 0) {
2193
                /* if error, just output silence */
2194
               is->audio_buf = is->audio_buf1;
2195
               is->audio_buf_size = 1024;
2196
               memset(is->audio_buf, 0, is->audio_buf_size);
2197
           } else {
2198
               if (is->show_audio)
2199
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2200
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2201
                                              pts);
2202
               is->audio_buf_size = audio_size;
2203
           }
2204
           is->audio_buf_index = 0;
2205
        }
2206
        len1 = is->audio_buf_size - is->audio_buf_index;
2207
        if (len1 > len)
2208
            len1 = len;
2209
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2210
        len -= len1;
2211
        stream += len1;
2212
        is->audio_buf_index += len1;
2213
    }
2214
}
2215

    
2216
/* open a given stream. Return 0 if OK */
2217
static int stream_component_open(VideoState *is, int stream_index)
2218
{
2219
    AVFormatContext *ic = is->ic;
2220
    AVCodecContext *avctx;
2221
    AVCodec *codec;
2222
    SDL_AudioSpec wanted_spec, spec;
2223

    
2224
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2225
        return -1;
2226
    avctx = ic->streams[stream_index]->codec;
2227

    
2228
    /* prepare audio output */
2229
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2230
        if (avctx->channels > 0) {
2231
            avctx->request_channels = FFMIN(2, avctx->channels);
2232
        } else {
2233
            avctx->request_channels = 2;
2234
        }
2235
    }
2236

    
2237
    codec = avcodec_find_decoder(avctx->codec_id);
2238
    avctx->debug_mv = debug_mv;
2239
    avctx->debug = debug;
2240
    avctx->workaround_bugs = workaround_bugs;
2241
    avctx->lowres = lowres;
2242
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2243
    avctx->idct_algo= idct;
2244
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2245
    avctx->skip_frame= skip_frame;
2246
    avctx->skip_idct= skip_idct;
2247
    avctx->skip_loop_filter= skip_loop_filter;
2248
    avctx->error_recognition= error_recognition;
2249
    avctx->error_concealment= error_concealment;
2250
    avcodec_thread_init(avctx, thread_count);
2251

    
2252
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2253

    
2254
    if (!codec ||
2255
        avcodec_open(avctx, codec) < 0)
2256
        return -1;
2257

    
2258
    /* prepare audio output */
2259
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2260
        wanted_spec.freq = avctx->sample_rate;
2261
        wanted_spec.format = AUDIO_S16SYS;
2262
        wanted_spec.channels = avctx->channels;
2263
        wanted_spec.silence = 0;
2264
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2265
        wanted_spec.callback = sdl_audio_callback;
2266
        wanted_spec.userdata = is;
2267
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2268
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2269
            return -1;
2270
        }
2271
        is->audio_hw_buf_size = spec.size;
2272
        is->audio_src_fmt= SAMPLE_FMT_S16;
2273
    }
2274

    
2275
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2276
    switch(avctx->codec_type) {
2277
    case AVMEDIA_TYPE_AUDIO:
2278
        is->audio_stream = stream_index;
2279
        is->audio_st = ic->streams[stream_index];
2280
        is->audio_buf_size = 0;
2281
        is->audio_buf_index = 0;
2282

    
2283
        /* init averaging filter */
2284
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2285
        is->audio_diff_avg_count = 0;
2286
        /* since we do not have a precise anough audio fifo fullness,
2287
           we correct audio sync only if larger than this threshold */
2288
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2289

    
2290
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2291
        packet_queue_init(&is->audioq);
2292
        SDL_PauseAudio(0);
2293
        break;
2294
    case AVMEDIA_TYPE_VIDEO:
2295
        is->video_stream = stream_index;
2296
        is->video_st = ic->streams[stream_index];
2297

    
2298
//        is->video_current_pts_time = av_gettime();
2299

    
2300
        packet_queue_init(&is->videoq);
2301
        is->video_tid = SDL_CreateThread(video_thread, is);
2302
        break;
2303
    case AVMEDIA_TYPE_SUBTITLE:
2304
        is->subtitle_stream = stream_index;
2305
        is->subtitle_st = ic->streams[stream_index];
2306
        packet_queue_init(&is->subtitleq);
2307

    
2308
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2309
        break;
2310
    default:
2311
        break;
2312
    }
2313
    return 0;
2314
}
2315

    
2316
static void stream_component_close(VideoState *is, int stream_index)
2317
{
2318
    AVFormatContext *ic = is->ic;
2319
    AVCodecContext *avctx;
2320

    
2321
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2322
        return;
2323
    avctx = ic->streams[stream_index]->codec;
2324

    
2325
    switch(avctx->codec_type) {
2326
    case AVMEDIA_TYPE_AUDIO:
2327
        packet_queue_abort(&is->audioq);
2328

    
2329
        SDL_CloseAudio();
2330

    
2331
        packet_queue_end(&is->audioq);
2332
        if (is->reformat_ctx)
2333
            av_audio_convert_free(is->reformat_ctx);
2334
        is->reformat_ctx = NULL;
2335
        break;
2336
    case AVMEDIA_TYPE_VIDEO:
2337
        packet_queue_abort(&is->videoq);
2338

    
2339
        /* note: we also signal this mutex to make sure we deblock the
2340
           video thread in all cases */
2341
        SDL_LockMutex(is->pictq_mutex);
2342
        SDL_CondSignal(is->pictq_cond);
2343
        SDL_UnlockMutex(is->pictq_mutex);
2344

    
2345
        SDL_WaitThread(is->video_tid, NULL);
2346

    
2347
        packet_queue_end(&is->videoq);
2348
        break;
2349
    case AVMEDIA_TYPE_SUBTITLE:
2350
        packet_queue_abort(&is->subtitleq);
2351

    
2352
        /* note: we also signal this mutex to make sure we deblock the
2353
           video thread in all cases */
2354
        SDL_LockMutex(is->subpq_mutex);
2355
        is->subtitle_stream_changed = 1;
2356

    
2357
        SDL_CondSignal(is->subpq_cond);
2358
        SDL_UnlockMutex(is->subpq_mutex);
2359

    
2360
        SDL_WaitThread(is->subtitle_tid, NULL);
2361

    
2362
        packet_queue_end(&is->subtitleq);
2363
        break;
2364
    default:
2365
        break;
2366
    }
2367

    
2368
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2369
    avcodec_close(avctx);
2370
    switch(avctx->codec_type) {
2371
    case AVMEDIA_TYPE_AUDIO:
2372
        is->audio_st = NULL;
2373
        is->audio_stream = -1;
2374
        break;
2375
    case AVMEDIA_TYPE_VIDEO:
2376
        is->video_st = NULL;
2377
        is->video_stream = -1;
2378
        break;
2379
    case AVMEDIA_TYPE_SUBTITLE:
2380
        is->subtitle_st = NULL;
2381
        is->subtitle_stream = -1;
2382
        break;
2383
    default:
2384
        break;
2385
    }
2386
}
2387

    
2388
/* since we have only one decoding thread, we can use a global
2389
   variable instead of a thread local variable */
2390
static VideoState *global_video_state;
2391

    
2392
static int decode_interrupt_cb(void)
2393
{
2394
    return (global_video_state && global_video_state->abort_request);
2395
}
2396

    
2397
/* this thread gets the stream from the disk or the network */
2398
static int decode_thread(void *arg)
2399
{
2400
    VideoState *is = arg;
2401
    AVFormatContext *ic;
2402
    int err, i, ret;
2403
    int st_index[AVMEDIA_TYPE_NB];
2404
    int st_count[AVMEDIA_TYPE_NB]={0};
2405
    int st_best_packet_count[AVMEDIA_TYPE_NB];
2406
    AVPacket pkt1, *pkt = &pkt1;
2407
    AVFormatParameters params, *ap = &params;
2408
    int eof=0;
2409
    int pkt_in_play_range = 0;
2410

    
2411
    ic = avformat_alloc_context();
2412

    
2413
    memset(st_index, -1, sizeof(st_index));
2414
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2415
    is->video_stream = -1;
2416
    is->audio_stream = -1;
2417
    is->subtitle_stream = -1;
2418

    
2419
    global_video_state = is;
2420
    url_set_interrupt_cb(decode_interrupt_cb);
2421

    
2422
    memset(ap, 0, sizeof(*ap));
2423

    
2424
    ap->prealloced_context = 1;
2425
    ap->width = frame_width;
2426
    ap->height= frame_height;
2427
    ap->time_base= (AVRational){1, 25};
2428
    ap->pix_fmt = frame_pix_fmt;
2429

    
2430
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2431

    
2432
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2433
    if (err < 0) {
2434
        print_error(is->filename, err);
2435
        ret = -1;
2436
        goto fail;
2437
    }
2438
    is->ic = ic;
2439

    
2440
    if(genpts)
2441
        ic->flags |= AVFMT_FLAG_GENPTS;
2442

    
2443
    err = av_find_stream_info(ic);
2444
    if (err < 0) {
2445
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2446
        ret = -1;
2447
        goto fail;
2448
    }
2449
    if(ic->pb)
2450
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2451

    
2452
    if(seek_by_bytes<0)
2453
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2454

    
2455
    /* if seeking requested, we execute it */
2456
    if (start_time != AV_NOPTS_VALUE) {
2457
        int64_t timestamp;
2458

    
2459
        timestamp = start_time;
2460
        /* add the stream start time */
2461
        if (ic->start_time != AV_NOPTS_VALUE)
2462
            timestamp += ic->start_time;
2463
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2464
        if (ret < 0) {
2465
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2466
                    is->filename, (double)timestamp / AV_TIME_BASE);
2467
        }
2468
    }
2469

    
2470
    for(i = 0; i < ic->nb_streams; i++) {
2471
        AVStream *st= ic->streams[i];
2472
        AVCodecContext *avctx = st->codec;
2473
        ic->streams[i]->discard = AVDISCARD_ALL;
2474
        if(avctx->codec_type >= (unsigned)AVMEDIA_TYPE_NB)
2475
            continue;
2476
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2477
            continue;
2478

    
2479
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2480
            continue;
2481
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2482

    
2483
        switch(avctx->codec_type) {
2484
        case AVMEDIA_TYPE_AUDIO:
2485
            if (!audio_disable)
2486
                st_index[AVMEDIA_TYPE_AUDIO] = i;
2487
            break;
2488
        case AVMEDIA_TYPE_VIDEO:
2489
        case AVMEDIA_TYPE_SUBTITLE:
2490
            if (!video_disable)
2491
                st_index[avctx->codec_type] = i;
2492
            break;
2493
        default:
2494
            break;
2495
        }
2496
    }
2497
    if (show_status) {
2498
        dump_format(ic, 0, is->filename, 0);
2499
    }
2500

    
2501
    /* open the streams */
2502
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2503
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2504
    }
2505

    
2506
    ret=-1;
2507
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2508
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2509
    }
2510
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2511
    if(ret<0) {
2512
        if (!display_disable)
2513
            is->show_audio = 2;
2514
    }
2515

    
2516
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2517
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2518
    }
2519

    
2520
    if (is->video_stream < 0 && is->audio_stream < 0) {
2521
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2522
        ret = -1;
2523
        goto fail;
2524
    }
2525

    
2526
    for(;;) {
2527
        if (is->abort_request)
2528
            break;
2529
        if (is->paused != is->last_paused) {
2530
            is->last_paused = is->paused;
2531
            if (is->paused)
2532
                is->read_pause_return= av_read_pause(ic);
2533
            else
2534
                av_read_play(ic);
2535
        }
2536
#if CONFIG_RTSP_DEMUXER
2537
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2538
            /* wait 10 ms to avoid trying to get another packet */
2539
            /* XXX: horrible */
2540
            SDL_Delay(10);
2541
            continue;
2542
        }
2543
#endif
2544
        if (is->seek_req) {
2545
            int64_t seek_target= is->seek_pos;
2546
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2547
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2548
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2549
//      of the seek_pos/seek_rel variables
2550

    
2551
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2552
            if (ret < 0) {
2553
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2554
            }else{
2555
                if (is->audio_stream >= 0) {
2556
                    packet_queue_flush(&is->audioq);
2557
                    packet_queue_put(&is->audioq, &flush_pkt);
2558
                }
2559
                if (is->subtitle_stream >= 0) {
2560
                    packet_queue_flush(&is->subtitleq);
2561
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2562
                }
2563
                if (is->video_stream >= 0) {
2564
                    packet_queue_flush(&is->videoq);
2565
                    packet_queue_put(&is->videoq, &flush_pkt);
2566
                }
2567
            }
2568
            is->seek_req = 0;
2569
            eof= 0;
2570
        }
2571

    
2572
        /* if the queue are full, no need to read more */
2573
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2574
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2575
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2576
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2577
            /* wait 10 ms */
2578
            SDL_Delay(10);
2579
            continue;
2580
        }
2581
        if(eof) {
2582
            if(is->video_stream >= 0){
2583
                av_init_packet(pkt);
2584
                pkt->data=NULL;
2585
                pkt->size=0;
2586
                pkt->stream_index= is->video_stream;
2587
                packet_queue_put(&is->videoq, pkt);
2588
            }
2589
            SDL_Delay(10);
2590
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2591
                if(loop!=1 && (!loop || --loop)){
2592
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2593
                }else if(autoexit){
2594
                    ret=AVERROR_EOF;
2595
                    goto fail;
2596
                }
2597
            }
2598
            continue;
2599
        }
2600
        ret = av_read_frame(ic, pkt);
2601
        if (ret < 0) {
2602
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2603
                eof=1;
2604
            if (url_ferror(ic->pb))
2605
                break;
2606
            SDL_Delay(100); /* wait for user event */
2607
            continue;
2608
        }
2609
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2610
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2611
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2612
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2613
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2614
                <= ((double)duration/1000000);
2615
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2616
            packet_queue_put(&is->audioq, pkt);
2617
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2618
            packet_queue_put(&is->videoq, pkt);
2619
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2620
            packet_queue_put(&is->subtitleq, pkt);
2621
        } else {
2622
            av_free_packet(pkt);
2623
        }
2624
    }
2625
    /* wait until the end */
2626
    while (!is->abort_request) {
2627
        SDL_Delay(100);
2628
    }
2629

    
2630
    ret = 0;
2631
 fail:
2632
    /* disable interrupting */
2633
    global_video_state = NULL;
2634

    
2635
    /* close each stream */
2636
    if (is->audio_stream >= 0)
2637
        stream_component_close(is, is->audio_stream);
2638
    if (is->video_stream >= 0)
2639
        stream_component_close(is, is->video_stream);
2640
    if (is->subtitle_stream >= 0)
2641
        stream_component_close(is, is->subtitle_stream);
2642
    if (is->ic) {
2643
        av_close_input_file(is->ic);
2644
        is->ic = NULL; /* safety */
2645
    }
2646
    url_set_interrupt_cb(NULL);
2647

    
2648
    if (ret != 0) {
2649
        SDL_Event event;
2650

    
2651
        event.type = FF_QUIT_EVENT;
2652
        event.user.data1 = is;
2653
        SDL_PushEvent(&event);
2654
    }
2655
    return 0;
2656
}
2657

    
2658
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2659
{
2660
    VideoState *is;
2661

    
2662
    is = av_mallocz(sizeof(VideoState));
2663
    if (!is)
2664
        return NULL;
2665
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2666
    is->iformat = iformat;
2667
    is->ytop = 0;
2668
    is->xleft = 0;
2669

    
2670
    /* start video display */
2671
    is->pictq_mutex = SDL_CreateMutex();
2672
    is->pictq_cond = SDL_CreateCond();
2673

    
2674
    is->subpq_mutex = SDL_CreateMutex();
2675
    is->subpq_cond = SDL_CreateCond();
2676

    
2677
    is->av_sync_type = av_sync_type;
2678
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2679
    if (!is->parse_tid) {
2680
        av_free(is);
2681
        return NULL;
2682
    }
2683
    return is;
2684
}
2685

    
2686
static void stream_cycle_channel(VideoState *is, int codec_type)
2687
{
2688
    AVFormatContext *ic = is->ic;
2689
    int start_index, stream_index;
2690
    AVStream *st;
2691

    
2692
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2693
        start_index = is->video_stream;
2694
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2695
        start_index = is->audio_stream;
2696
    else
2697
        start_index = is->subtitle_stream;
2698
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2699
        return;
2700
    stream_index = start_index;
2701
    for(;;) {
2702
        if (++stream_index >= is->ic->nb_streams)
2703
        {
2704
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2705
            {
2706
                stream_index = -1;
2707
                goto the_end;
2708
            } else
2709
                stream_index = 0;
2710
        }
2711
        if (stream_index == start_index)
2712
            return;
2713
        st = ic->streams[stream_index];
2714
        if (st->codec->codec_type == codec_type) {
2715
            /* check that parameters are OK */
2716
            switch(codec_type) {
2717
            case AVMEDIA_TYPE_AUDIO:
2718
                if (st->codec->sample_rate != 0 &&
2719
                    st->codec->channels != 0)
2720
                    goto the_end;
2721
                break;
2722
            case AVMEDIA_TYPE_VIDEO:
2723
            case AVMEDIA_TYPE_SUBTITLE:
2724
                goto the_end;
2725
            default:
2726
                break;
2727
            }
2728
        }
2729
    }
2730
 the_end:
2731
    stream_component_close(is, start_index);
2732
    stream_component_open(is, stream_index);
2733
}
2734

    
2735

    
2736
static void toggle_full_screen(void)
2737
{
2738
    is_full_screen = !is_full_screen;
2739
    if (!fs_screen_width) {
2740
        /* use default SDL method */
2741
//        SDL_WM_ToggleFullScreen(screen);
2742
    }
2743
    video_open(cur_stream);
2744
}
2745

    
2746
static void toggle_pause(void)
2747
{
2748
    if (cur_stream)
2749
        stream_pause(cur_stream);
2750
    step = 0;
2751
}
2752

    
2753
static void step_to_next_frame(void)
2754
{
2755
    if (cur_stream) {
2756
        /* if the stream is paused unpause it, then step */
2757
        if (cur_stream->paused)
2758
            stream_pause(cur_stream);
2759
    }
2760
    step = 1;
2761
}
2762

    
2763
static void toggle_audio_display(void)
2764
{
2765
    if (cur_stream) {
2766
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2767
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2768
        fill_rectangle(screen,
2769
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2770
                    bgcolor);
2771
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2772
    }
2773
}
2774

    
2775
/* handle an event sent by the GUI */
2776
static void event_loop(void)
2777
{
2778
    SDL_Event event;
2779
    double incr, pos, frac;
2780

    
2781
    for(;;) {
2782
        double x;
2783
        SDL_WaitEvent(&event);
2784
        switch(event.type) {
2785
        case SDL_KEYDOWN:
2786
            if (exit_on_keydown) {
2787
                do_exit();
2788
                break;
2789
            }
2790
            switch(event.key.keysym.sym) {
2791
            case SDLK_ESCAPE:
2792
            case SDLK_q:
2793
                do_exit();
2794
                break;
2795
            case SDLK_f:
2796
                toggle_full_screen();
2797
                break;
2798
            case SDLK_p:
2799
            case SDLK_SPACE:
2800
                toggle_pause();
2801
                break;
2802
            case SDLK_s: //S: Step to next frame
2803
                step_to_next_frame();
2804
                break;
2805
            case SDLK_a:
2806
                if (cur_stream)
2807
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2808
                break;
2809
            case SDLK_v:
2810
                if (cur_stream)
2811
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2812
                break;
2813
            case SDLK_t:
2814
                if (cur_stream)
2815
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2816
                break;
2817
            case SDLK_w:
2818
                toggle_audio_display();
2819
                break;
2820
            case SDLK_LEFT:
2821
                incr = -10.0;
2822
                goto do_seek;
2823
            case SDLK_RIGHT:
2824
                incr = 10.0;
2825
                goto do_seek;
2826
            case SDLK_UP:
2827
                incr = 60.0;
2828
                goto do_seek;
2829
            case SDLK_DOWN:
2830
                incr = -60.0;
2831
            do_seek:
2832
                if (cur_stream) {
2833
                    if (seek_by_bytes) {
2834
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2835
                            pos= cur_stream->video_current_pos;
2836
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2837
                            pos= cur_stream->audio_pkt.pos;
2838
                        }else
2839
                            pos = url_ftell(cur_stream->ic->pb);
2840
                        if (cur_stream->ic->bit_rate)
2841
                            incr *= cur_stream->ic->bit_rate / 8.0;
2842
                        else
2843
                            incr *= 180000.0;
2844
                        pos += incr;
2845
                        stream_seek(cur_stream, pos, incr, 1);
2846
                    } else {
2847
                        pos = get_master_clock(cur_stream);
2848
                        pos += incr;
2849
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2850
                    }
2851
                }
2852
                break;
2853
            default:
2854
                break;
2855
            }
2856
            break;
2857
        case SDL_MOUSEBUTTONDOWN:
2858
            if (exit_on_mousedown) {
2859
                do_exit();
2860
                break;
2861
            }
2862
        case SDL_MOUSEMOTION:
2863
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2864
                x= event.button.x;
2865
            }else{
2866
                if(event.motion.state != SDL_PRESSED)
2867
                    break;
2868
                x= event.motion.x;
2869
            }
2870
            if (cur_stream) {
2871
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2872
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2873
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2874
                }else{
2875
                    int64_t ts;
2876
                    int ns, hh, mm, ss;
2877
                    int tns, thh, tmm, tss;
2878
                    tns = cur_stream->ic->duration/1000000LL;
2879
                    thh = tns/3600;
2880
                    tmm = (tns%3600)/60;
2881
                    tss = (tns%60);
2882
                    frac = x/cur_stream->width;
2883
                    ns = frac*tns;
2884
                    hh = ns/3600;
2885
                    mm = (ns%3600)/60;
2886
                    ss = (ns%60);
2887
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2888
                            hh, mm, ss, thh, tmm, tss);
2889
                    ts = frac*cur_stream->ic->duration;
2890
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2891
                        ts += cur_stream->ic->start_time;
2892
                    stream_seek(cur_stream, ts, 0, 0);
2893
                }
2894
            }
2895
            break;
2896
        case SDL_VIDEORESIZE:
2897
            if (cur_stream) {
2898
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2899
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2900
                screen_width = cur_stream->width = event.resize.w;
2901
                screen_height= cur_stream->height= event.resize.h;
2902
            }
2903
            break;
2904
        case SDL_QUIT:
2905
        case FF_QUIT_EVENT:
2906
            do_exit();
2907
            break;
2908
        case FF_ALLOC_EVENT:
2909
            video_open(event.user.data1);
2910
            alloc_picture(event.user.data1);
2911
            break;
2912
        case FF_REFRESH_EVENT:
2913
            video_refresh_timer(event.user.data1);
2914
            cur_stream->refresh=0;
2915
            break;
2916
        default:
2917
            break;
2918
        }
2919
    }
2920
}
2921

    
2922
static void opt_frame_size(const char *arg)
2923
{
2924
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2925
        fprintf(stderr, "Incorrect frame size\n");
2926
        exit(1);
2927
    }
2928
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2929
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2930
        exit(1);
2931
    }
2932
}
2933

    
2934
static int opt_width(const char *opt, const char *arg)
2935
{
2936
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2937
    return 0;
2938
}
2939

    
2940
static int opt_height(const char *opt, const char *arg)
2941
{
2942
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2943
    return 0;
2944
}
2945

    
2946
static void opt_format(const char *arg)
2947
{
2948
    file_iformat = av_find_input_format(arg);
2949
    if (!file_iformat) {
2950
        fprintf(stderr, "Unknown input format: %s\n", arg);
2951
        exit(1);
2952
    }
2953
}
2954

    
2955
static void opt_frame_pix_fmt(const char *arg)
2956
{
2957
    frame_pix_fmt = av_get_pix_fmt(arg);
2958
}
2959

    
2960
static int opt_sync(const char *opt, const char *arg)
2961
{
2962
    if (!strcmp(arg, "audio"))
2963
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2964
    else if (!strcmp(arg, "video"))
2965
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2966
    else if (!strcmp(arg, "ext"))
2967
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2968
    else {
2969
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2970
        exit(1);
2971
    }
2972
    return 0;
2973
}
2974

    
2975
static int opt_seek(const char *opt, const char *arg)
2976
{
2977
    start_time = parse_time_or_die(opt, arg, 1);
2978
    return 0;
2979
}
2980

    
2981
static int opt_duration(const char *opt, const char *arg)
2982
{
2983
    duration = parse_time_or_die(opt, arg, 1);
2984
    return 0;
2985
}
2986

    
2987
static int opt_debug(const char *opt, const char *arg)
2988
{
2989
    av_log_set_level(99);
2990
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2991
    return 0;
2992
}
2993

    
2994
static int opt_vismv(const char *opt, const char *arg)
2995
{
2996
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2997
    return 0;
2998
}
2999

    
3000
static int opt_thread_count(const char *opt, const char *arg)
3001
{
3002
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
3003
#if !HAVE_THREADS
3004
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
3005
#endif
3006
    return 0;
3007
}
3008

    
3009
static const OptionDef options[] = {
3010
#include "cmdutils_common_opts.h"
3011
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
3012
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
3013
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
3014
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
3015
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
3016
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
3017
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
3018
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
3019
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
3020
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
3021
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
3022
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
3023
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
3024
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
3025
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
3026
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3027
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3028
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3029
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3030
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3031
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3032
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3033
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3034
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3035
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3036
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3037
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3038
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3039
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3040
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3041
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3042
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3043
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3044
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3045
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3046
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3047
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3048
#if CONFIG_AVFILTER
3049
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3050
#endif
3051
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3052
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3053
    { NULL, },
3054
};
3055

    
3056
static void show_usage(void)
3057
{
3058
    printf("Simple media player\n");
3059
    printf("usage: ffplay [options] input_file\n");
3060
    printf("\n");
3061
}
3062

    
3063
static void show_help(void)
3064
{
3065
    av_log_set_callback(log_callback_help);
3066
    show_usage();
3067
    show_help_options(options, "Main options:\n",
3068
                      OPT_EXPERT, 0);
3069
    show_help_options(options, "\nAdvanced options:\n",
3070
                      OPT_EXPERT, OPT_EXPERT);
3071
    printf("\n");
3072
    av_opt_show2(avcodec_opts[0], NULL,
3073
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3074
    printf("\n");
3075
    av_opt_show2(avformat_opts, NULL,
3076
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3077
#if !CONFIG_AVFILTER
3078
    printf("\n");
3079
    av_opt_show2(sws_opts, NULL,
3080
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3081
#endif
3082
    printf("\nWhile playing:\n"
3083
           "q, ESC              quit\n"
3084
           "f                   toggle full screen\n"
3085
           "p, SPC              pause\n"
3086
           "a                   cycle audio channel\n"
3087
           "v                   cycle video channel\n"
3088
           "t                   cycle subtitle channel\n"
3089
           "w                   show audio waves\n"
3090
           "s                   activate frame-step mode\n"
3091
           "left/right          seek backward/forward 10 seconds\n"
3092
           "down/up             seek backward/forward 1 minute\n"
3093
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3094
           );
3095
}
3096

    
3097
static void opt_input_file(const char *filename)
3098
{
3099
    if (input_filename) {
3100
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3101
                filename, input_filename);
3102
        exit(1);
3103
    }
3104
    if (!strcmp(filename, "-"))
3105
        filename = "pipe:";
3106
    input_filename = filename;
3107
}
3108

    
3109
/* Called from the main */
3110
int main(int argc, char **argv)
3111
{
3112
    int flags;
3113

    
3114
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3115

    
3116
    /* register all codecs, demux and protocols */
3117
    avcodec_register_all();
3118
#if CONFIG_AVDEVICE
3119
    avdevice_register_all();
3120
#endif
3121
#if CONFIG_AVFILTER
3122
    avfilter_register_all();
3123
#endif
3124
    av_register_all();
3125

    
3126
    init_opts();
3127

    
3128
    show_banner();
3129

    
3130
    parse_options(argc, argv, options, opt_input_file);
3131

    
3132
    if (!input_filename) {
3133
        show_usage();
3134
        fprintf(stderr, "An input file must be specified\n");
3135
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3136
        exit(1);
3137
    }
3138

    
3139
    if (display_disable) {
3140
        video_disable = 1;
3141
    }
3142
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3143
#if !defined(__MINGW32__) && !defined(__APPLE__)
3144
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3145
#endif
3146
    if (SDL_Init (flags)) {
3147
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3148
        exit(1);
3149
    }
3150

    
3151
    if (!display_disable) {
3152
#if HAVE_SDL_VIDEO_SIZE
3153
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3154
        fs_screen_width = vi->current_w;
3155
        fs_screen_height = vi->current_h;
3156
#endif
3157
    }
3158

    
3159
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3160
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3161
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3162

    
3163
    av_init_packet(&flush_pkt);
3164
    flush_pkt.data= "FLUSH";
3165

    
3166
    cur_stream = stream_open(input_filename, file_iformat);
3167

    
3168
    event_loop();
3169

    
3170
    /* never returns */
3171

    
3172
    return 0;
3173
}