Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ c1d4bb57

History | View | Annotate | Download (98.2 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#define _XOPEN_SOURCE 600
23

    
24
#include "config.h"
25
#include <inttypes.h>
26
#include <math.h>
27
#include <limits.h>
28
#include "libavutil/avstring.h"
29
#include "libavutil/colorspace.h"
30
#include "libavutil/pixdesc.h"
31
#include "libavutil/imgutils.h"
32
#include "libavutil/parseutils.h"
33
#include "libavutil/samplefmt.h"
34
#include "libavutil/avassert.h"
35
#include "libavformat/avformat.h"
36
#include "libavdevice/avdevice.h"
37
#include "libswscale/swscale.h"
38
#include "libavcodec/audioconvert.h"
39
#include "libavcodec/opt.h"
40
#include "libavcodec/avfft.h"
41

    
42
#if CONFIG_AVFILTER
43
# include "libavfilter/avfilter.h"
44
# include "libavfilter/avfiltergraph.h"
45
#endif
46

    
47
#include "cmdutils.h"
48

    
49
#include <SDL.h>
50
#include <SDL_thread.h>
51

    
52
#ifdef __MINGW32__
53
#undef main /* We don't want SDL to override our main() */
54
#endif
55

    
56
#include <unistd.h>
57
#include <assert.h>
58

    
59
const char program_name[] = "FFplay";
60
const int program_birth_year = 2003;
61

    
62
//#define DEBUG
63
//#define DEBUG_SYNC
64

    
65
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
66
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
67
#define MIN_FRAMES 5
68

    
69
/* SDL audio buffer size, in samples. Should be small to have precise
70
   A/V sync as SDL does not have hardware buffer fullness info. */
71
#define SDL_AUDIO_BUFFER_SIZE 1024
72

    
73
/* no AV sync correction is done if below the AV sync threshold */
74
#define AV_SYNC_THRESHOLD 0.01
75
/* no AV correction is done if too big error */
76
#define AV_NOSYNC_THRESHOLD 10.0
77

    
78
#define FRAME_SKIP_FACTOR 0.05
79

    
80
/* maximum audio speed change to get correct sync */
81
#define SAMPLE_CORRECTION_PERCENT_MAX 10
82

    
83
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
84
#define AUDIO_DIFF_AVG_NB   20
85

    
86
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
87
#define SAMPLE_ARRAY_SIZE (2*65536)
88

    
89
static int sws_flags = SWS_BICUBIC;
90

    
91
typedef struct PacketQueue {
92
    AVPacketList *first_pkt, *last_pkt;
93
    int nb_packets;
94
    int size;
95
    int abort_request;
96
    SDL_mutex *mutex;
97
    SDL_cond *cond;
98
} PacketQueue;
99

    
100
#define VIDEO_PICTURE_QUEUE_SIZE 2
101
#define SUBPICTURE_QUEUE_SIZE 4
102

    
103
typedef struct VideoPicture {
104
    double pts;                                  ///<presentation time stamp for this picture
105
    double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
106
    int64_t pos;                                 ///<byte position in file
107
    SDL_Overlay *bmp;
108
    int width, height; /* source height & width */
109
    int allocated;
110
    enum PixelFormat pix_fmt;
111

    
112
#if CONFIG_AVFILTER
113
    AVFilterBufferRef *picref;
114
#endif
115
} VideoPicture;
116

    
117
typedef struct SubPicture {
118
    double pts; /* presentation time stamp for this picture */
119
    AVSubtitle sub;
120
} SubPicture;
121

    
122
enum {
123
    AV_SYNC_AUDIO_MASTER, /* default choice */
124
    AV_SYNC_VIDEO_MASTER,
125
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
126
};
127

    
128
typedef struct VideoState {
129
    SDL_Thread *parse_tid;
130
    SDL_Thread *video_tid;
131
    SDL_Thread *refresh_tid;
132
    AVInputFormat *iformat;
133
    int no_background;
134
    int abort_request;
135
    int paused;
136
    int last_paused;
137
    int seek_req;
138
    int seek_flags;
139
    int64_t seek_pos;
140
    int64_t seek_rel;
141
    int read_pause_return;
142
    AVFormatContext *ic;
143

    
144
    int audio_stream;
145

    
146
    int av_sync_type;
147
    double external_clock; /* external clock base */
148
    int64_t external_clock_time;
149

    
150
    double audio_clock;
151
    double audio_diff_cum; /* used for AV difference average computation */
152
    double audio_diff_avg_coef;
153
    double audio_diff_threshold;
154
    int audio_diff_avg_count;
155
    AVStream *audio_st;
156
    PacketQueue audioq;
157
    int audio_hw_buf_size;
158
    /* samples output by the codec. we reserve more space for avsync
159
       compensation */
160
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
161
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
162
    uint8_t *audio_buf;
163
    unsigned int audio_buf_size; /* in bytes */
164
    int audio_buf_index; /* in bytes */
165
    AVPacket audio_pkt_temp;
166
    AVPacket audio_pkt;
167
    enum AVSampleFormat audio_src_fmt;
168
    AVAudioConvert *reformat_ctx;
169

    
170
    int show_audio; /* if true, display audio samples */
171
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
172
    int sample_array_index;
173
    int last_i_start;
174
    RDFTContext *rdft;
175
    int rdft_bits;
176
    FFTSample *rdft_data;
177
    int xpos;
178

    
179
    SDL_Thread *subtitle_tid;
180
    int subtitle_stream;
181
    int subtitle_stream_changed;
182
    AVStream *subtitle_st;
183
    PacketQueue subtitleq;
184
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
185
    int subpq_size, subpq_rindex, subpq_windex;
186
    SDL_mutex *subpq_mutex;
187
    SDL_cond *subpq_cond;
188

    
189
    double frame_timer;
190
    double frame_last_pts;
191
    double frame_last_delay;
192
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
193
    int video_stream;
194
    AVStream *video_st;
195
    PacketQueue videoq;
196
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
197
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
198
    int64_t video_current_pos;                   ///<current displayed file pos
199
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
200
    int pictq_size, pictq_rindex, pictq_windex;
201
    SDL_mutex *pictq_mutex;
202
    SDL_cond *pictq_cond;
203
#if !CONFIG_AVFILTER
204
    struct SwsContext *img_convert_ctx;
205
#endif
206

    
207
    //    QETimer *video_timer;
208
    char filename[1024];
209
    int width, height, xleft, ytop;
210

    
211
#if CONFIG_AVFILTER
212
    AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213
#endif
214

    
215
    float skip_frames;
216
    float skip_frames_index;
217
    int refresh;
218
} VideoState;
219

    
220
static void show_help(void);
221
static int audio_write_get_buf_size(VideoState *is);
222

    
223
/* options specified by the user */
224
static AVInputFormat *file_iformat;
225
static const char *input_filename;
226
static const char *window_title;
227
static int fs_screen_width;
228
static int fs_screen_height;
229
static int screen_width = 0;
230
static int screen_height = 0;
231
static int frame_width = 0;
232
static int frame_height = 0;
233
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234
static int audio_disable;
235
static int video_disable;
236
static int wanted_stream[AVMEDIA_TYPE_NB]={
237
    [AVMEDIA_TYPE_AUDIO]=-1,
238
    [AVMEDIA_TYPE_VIDEO]=-1,
239
    [AVMEDIA_TYPE_SUBTITLE]=-1,
240
};
241
static int seek_by_bytes=-1;
242
static int display_disable;
243
static int show_status = 1;
244
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245
static int64_t start_time = AV_NOPTS_VALUE;
246
static int64_t duration = AV_NOPTS_VALUE;
247
static int debug = 0;
248
static int debug_mv = 0;
249
static int step = 0;
250
static int thread_count = 1;
251
static int workaround_bugs = 1;
252
static int fast = 0;
253
static int genpts = 0;
254
static int lowres = 0;
255
static int idct = FF_IDCT_AUTO;
256
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259
static int error_recognition = FF_ER_CAREFUL;
260
static int error_concealment = 3;
261
static int decoder_reorder_pts= -1;
262
static int autoexit;
263
static int exit_on_keydown;
264
static int exit_on_mousedown;
265
static int loop=1;
266
static int framedrop=1;
267

    
268
static int rdftspeed=20;
269
#if CONFIG_AVFILTER
270
static char *vfilters = NULL;
271
#endif
272

    
273
/* current context */
274
static int is_full_screen;
275
static VideoState *cur_stream;
276
static int64_t audio_callback_time;
277

    
278
static AVPacket flush_pkt;
279

    
280
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
281
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
282
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
283

    
284
static SDL_Surface *screen;
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
287
{
288
    AVPacketList *pkt1;
289

    
290
    /* duplicate the packet */
291
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
292
        return -1;
293

    
294
    pkt1 = av_malloc(sizeof(AVPacketList));
295
    if (!pkt1)
296
        return -1;
297
    pkt1->pkt = *pkt;
298
    pkt1->next = NULL;
299

    
300

    
301
    SDL_LockMutex(q->mutex);
302

    
303
    if (!q->last_pkt)
304

    
305
        q->first_pkt = pkt1;
306
    else
307
        q->last_pkt->next = pkt1;
308
    q->last_pkt = pkt1;
309
    q->nb_packets++;
310
    q->size += pkt1->pkt.size + sizeof(*pkt1);
311
    /* XXX: should duplicate packet data in DV case */
312
    SDL_CondSignal(q->cond);
313

    
314
    SDL_UnlockMutex(q->mutex);
315
    return 0;
316
}
317

    
318
/* packet queue handling */
319
static void packet_queue_init(PacketQueue *q)
320
{
321
    memset(q, 0, sizeof(PacketQueue));
322
    q->mutex = SDL_CreateMutex();
323
    q->cond = SDL_CreateCond();
324
    packet_queue_put(q, &flush_pkt);
325
}
326

    
327
static void packet_queue_flush(PacketQueue *q)
328
{
329
    AVPacketList *pkt, *pkt1;
330

    
331
    SDL_LockMutex(q->mutex);
332
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
333
        pkt1 = pkt->next;
334
        av_free_packet(&pkt->pkt);
335
        av_freep(&pkt);
336
    }
337
    q->last_pkt = NULL;
338
    q->first_pkt = NULL;
339
    q->nb_packets = 0;
340
    q->size = 0;
341
    SDL_UnlockMutex(q->mutex);
342
}
343

    
344
static void packet_queue_end(PacketQueue *q)
345
{
346
    packet_queue_flush(q);
347
    SDL_DestroyMutex(q->mutex);
348
    SDL_DestroyCond(q->cond);
349
}
350

    
351
static void packet_queue_abort(PacketQueue *q)
352
{
353
    SDL_LockMutex(q->mutex);
354

    
355
    q->abort_request = 1;
356

    
357
    SDL_CondSignal(q->cond);
358

    
359
    SDL_UnlockMutex(q->mutex);
360
}
361

    
362
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
363
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
364
{
365
    AVPacketList *pkt1;
366
    int ret;
367

    
368
    SDL_LockMutex(q->mutex);
369

    
370
    for(;;) {
371
        if (q->abort_request) {
372
            ret = -1;
373
            break;
374
        }
375

    
376
        pkt1 = q->first_pkt;
377
        if (pkt1) {
378
            q->first_pkt = pkt1->next;
379
            if (!q->first_pkt)
380
                q->last_pkt = NULL;
381
            q->nb_packets--;
382
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
383
            *pkt = pkt1->pkt;
384
            av_free(pkt1);
385
            ret = 1;
386
            break;
387
        } else if (!block) {
388
            ret = 0;
389
            break;
390
        } else {
391
            SDL_CondWait(q->cond, q->mutex);
392
        }
393
    }
394
    SDL_UnlockMutex(q->mutex);
395
    return ret;
396
}
397

    
398
static inline void fill_rectangle(SDL_Surface *screen,
399
                                  int x, int y, int w, int h, int color)
400
{
401
    SDL_Rect rect;
402
    rect.x = x;
403
    rect.y = y;
404
    rect.w = w;
405
    rect.h = h;
406
    SDL_FillRect(screen, &rect, color);
407
}
408

    
409
#if 0
410
/* draw only the border of a rectangle */
411
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
412
{
413
    int w1, w2, h1, h2;
414

415
    /* fill the background */
416
    w1 = x;
417
    if (w1 < 0)
418
        w1 = 0;
419
    w2 = s->width - (x + w);
420
    if (w2 < 0)
421
        w2 = 0;
422
    h1 = y;
423
    if (h1 < 0)
424
        h1 = 0;
425
    h2 = s->height - (y + h);
426
    if (h2 < 0)
427
        h2 = 0;
428
    fill_rectangle(screen,
429
                   s->xleft, s->ytop,
430
                   w1, s->height,
431
                   color);
432
    fill_rectangle(screen,
433
                   s->xleft + s->width - w2, s->ytop,
434
                   w2, s->height,
435
                   color);
436
    fill_rectangle(screen,
437
                   s->xleft + w1, s->ytop,
438
                   s->width - w1 - w2, h1,
439
                   color);
440
    fill_rectangle(screen,
441
                   s->xleft + w1, s->ytop + s->height - h2,
442
                   s->width - w1 - w2, h2,
443
                   color);
444
}
445
#endif
446

    
447
#define ALPHA_BLEND(a, oldp, newp, s)\
448
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
449

    
450
#define RGBA_IN(r, g, b, a, s)\
451
{\
452
    unsigned int v = ((const uint32_t *)(s))[0];\
453
    a = (v >> 24) & 0xff;\
454
    r = (v >> 16) & 0xff;\
455
    g = (v >> 8) & 0xff;\
456
    b = v & 0xff;\
457
}
458

    
459
#define YUVA_IN(y, u, v, a, s, pal)\
460
{\
461
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
462
    a = (val >> 24) & 0xff;\
463
    y = (val >> 16) & 0xff;\
464
    u = (val >> 8) & 0xff;\
465
    v = val & 0xff;\
466
}
467

    
468
#define YUVA_OUT(d, y, u, v, a)\
469
{\
470
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
471
}
472

    
473

    
474
#define BPP 1
475

    
476
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
477
{
478
    int wrap, wrap3, width2, skip2;
479
    int y, u, v, a, u1, v1, a1, w, h;
480
    uint8_t *lum, *cb, *cr;
481
    const uint8_t *p;
482
    const uint32_t *pal;
483
    int dstx, dsty, dstw, dsth;
484

    
485
    dstw = av_clip(rect->w, 0, imgw);
486
    dsth = av_clip(rect->h, 0, imgh);
487
    dstx = av_clip(rect->x, 0, imgw - dstw);
488
    dsty = av_clip(rect->y, 0, imgh - dsth);
489
    lum = dst->data[0] + dsty * dst->linesize[0];
490
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
491
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
492

    
493
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
494
    skip2 = dstx >> 1;
495
    wrap = dst->linesize[0];
496
    wrap3 = rect->pict.linesize[0];
497
    p = rect->pict.data[0];
498
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
499

    
500
    if (dsty & 1) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
509
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
510
            cb++;
511
            cr++;
512
            lum++;
513
            p += BPP;
514
        }
515
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 = u;
518
            v1 = v;
519
            a1 = a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521

    
522
            YUVA_IN(y, u, v, a, p + BPP, pal);
523
            u1 += u;
524
            v1 += v;
525
            a1 += a;
526
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
528
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
529
            cb++;
530
            cr++;
531
            p += 2 * BPP;
532
            lum += 2;
533
        }
534
        if (w) {
535
            YUVA_IN(y, u, v, a, p, pal);
536
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
537
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
538
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
539
            p++;
540
            lum++;
541
        }
542
        p += wrap3 - dstw * BPP;
543
        lum += wrap - dstw - dstx;
544
        cb += dst->linesize[1] - width2 - skip2;
545
        cr += dst->linesize[2] - width2 - skip2;
546
    }
547
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
548
        lum += dstx;
549
        cb += skip2;
550
        cr += skip2;
551

    
552
        if (dstx & 1) {
553
            YUVA_IN(y, u, v, a, p, pal);
554
            u1 = u;
555
            v1 = v;
556
            a1 = a;
557
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
558
            p += wrap3;
559
            lum += wrap;
560
            YUVA_IN(y, u, v, a, p, pal);
561
            u1 += u;
562
            v1 += v;
563
            a1 += a;
564
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
566
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
567
            cb++;
568
            cr++;
569
            p += -wrap3 + BPP;
570
            lum += -wrap + 1;
571
        }
572
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
573
            YUVA_IN(y, u, v, a, p, pal);
574
            u1 = u;
575
            v1 = v;
576
            a1 = a;
577
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578

    
579
            YUVA_IN(y, u, v, a, p + BPP, pal);
580
            u1 += u;
581
            v1 += v;
582
            a1 += a;
583
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
584
            p += wrap3;
585
            lum += wrap;
586

    
587
            YUVA_IN(y, u, v, a, p, pal);
588
            u1 += u;
589
            v1 += v;
590
            a1 += a;
591
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592

    
593
            YUVA_IN(y, u, v, a, p + BPP, pal);
594
            u1 += u;
595
            v1 += v;
596
            a1 += a;
597
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
598

    
599
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
600
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
601

    
602
            cb++;
603
            cr++;
604
            p += -wrap3 + 2 * BPP;
605
            lum += -wrap + 2;
606
        }
607
        if (w) {
608
            YUVA_IN(y, u, v, a, p, pal);
609
            u1 = u;
610
            v1 = v;
611
            a1 = a;
612
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
            p += wrap3;
614
            lum += wrap;
615
            YUVA_IN(y, u, v, a, p, pal);
616
            u1 += u;
617
            v1 += v;
618
            a1 += a;
619
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
621
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
622
            cb++;
623
            cr++;
624
            p += -wrap3 + BPP;
625
            lum += -wrap + 1;
626
        }
627
        p += wrap3 + (wrap3 - dstw * BPP);
628
        lum += wrap + (wrap - dstw - dstx);
629
        cb += dst->linesize[1] - width2 - skip2;
630
        cr += dst->linesize[2] - width2 - skip2;
631
    }
632
    /* handle odd height */
633
    if (h) {
634
        lum += dstx;
635
        cb += skip2;
636
        cr += skip2;
637

    
638
        if (dstx & 1) {
639
            YUVA_IN(y, u, v, a, p, pal);
640
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
641
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
642
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
643
            cb++;
644
            cr++;
645
            lum++;
646
            p += BPP;
647
        }
648
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
649
            YUVA_IN(y, u, v, a, p, pal);
650
            u1 = u;
651
            v1 = v;
652
            a1 = a;
653
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
654

    
655
            YUVA_IN(y, u, v, a, p + BPP, pal);
656
            u1 += u;
657
            v1 += v;
658
            a1 += a;
659
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
660
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
661
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
662
            cb++;
663
            cr++;
664
            p += 2 * BPP;
665
            lum += 2;
666
        }
667
        if (w) {
668
            YUVA_IN(y, u, v, a, p, pal);
669
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
670
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
671
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
672
        }
673
    }
674
}
675

    
676
static void free_subpicture(SubPicture *sp)
677
{
678
    avsubtitle_free(&sp->sub);
679
}
680

    
681
static void video_image_display(VideoState *is)
682
{
683
    VideoPicture *vp;
684
    SubPicture *sp;
685
    AVPicture pict;
686
    float aspect_ratio;
687
    int width, height, x, y;
688
    SDL_Rect rect;
689
    int i;
690

    
691
    vp = &is->pictq[is->pictq_rindex];
692
    if (vp->bmp) {
693
#if CONFIG_AVFILTER
694
         if (vp->picref->video->pixel_aspect.num == 0)
695
             aspect_ratio = 0;
696
         else
697
             aspect_ratio = av_q2d(vp->picref->video->pixel_aspect);
698
#else
699

    
700
        /* XXX: use variable in the frame */
701
        if (is->video_st->sample_aspect_ratio.num)
702
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
703
        else if (is->video_st->codec->sample_aspect_ratio.num)
704
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
705
        else
706
            aspect_ratio = 0;
707
#endif
708
        if (aspect_ratio <= 0.0)
709
            aspect_ratio = 1.0;
710
        aspect_ratio *= (float)vp->width / (float)vp->height;
711

    
712
        if (is->subtitle_st) {
713
            if (is->subpq_size > 0) {
714
                sp = &is->subpq[is->subpq_rindex];
715

    
716
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
717
                    SDL_LockYUVOverlay (vp->bmp);
718

    
719
                    pict.data[0] = vp->bmp->pixels[0];
720
                    pict.data[1] = vp->bmp->pixels[2];
721
                    pict.data[2] = vp->bmp->pixels[1];
722

    
723
                    pict.linesize[0] = vp->bmp->pitches[0];
724
                    pict.linesize[1] = vp->bmp->pitches[2];
725
                    pict.linesize[2] = vp->bmp->pitches[1];
726

    
727
                    for (i = 0; i < sp->sub.num_rects; i++)
728
                        blend_subrect(&pict, sp->sub.rects[i],
729
                                      vp->bmp->w, vp->bmp->h);
730

    
731
                    SDL_UnlockYUVOverlay (vp->bmp);
732
                }
733
            }
734
        }
735

    
736

    
737
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
738
        height = is->height;
739
        width = ((int)rint(height * aspect_ratio)) & ~1;
740
        if (width > is->width) {
741
            width = is->width;
742
            height = ((int)rint(width / aspect_ratio)) & ~1;
743
        }
744
        x = (is->width - width) / 2;
745
        y = (is->height - height) / 2;
746
        if (!is->no_background) {
747
            /* fill the background */
748
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749
        } else {
750
            is->no_background = 0;
751
        }
752
        rect.x = is->xleft + x;
753
        rect.y = is->ytop  + y;
754
        rect.w = width;
755
        rect.h = height;
756
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
757
    } else {
758
#if 0
759
        fill_rectangle(screen,
760
                       is->xleft, is->ytop, is->width, is->height,
761
                       QERGB(0x00, 0x00, 0x00));
762
#endif
763
    }
764
}
765

    
766
static inline int compute_mod(int a, int b)
767
{
768
    return a < 0 ? a%b + b : a%b;
769
}
770

    
771
static void video_audio_display(VideoState *s)
772
{
773
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
774
    int ch, channels, h, h2, bgcolor, fgcolor;
775
    int16_t time_diff;
776
    int rdft_bits, nb_freq;
777

    
778
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
779
        ;
780
    nb_freq= 1<<(rdft_bits-1);
781

    
782
    /* compute display index : center on currently output samples */
783
    channels = s->audio_st->codec->channels;
784
    nb_display_channels = channels;
785
    if (!s->paused) {
786
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
787
        n = 2 * channels;
788
        delay = audio_write_get_buf_size(s);
789
        delay /= n;
790

    
791
        /* to be more precise, we take into account the time spent since
792
           the last buffer computation */
793
        if (audio_callback_time) {
794
            time_diff = av_gettime() - audio_callback_time;
795
            delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
796
        }
797

    
798
        delay += 2*data_used;
799
        if (delay < data_used)
800
            delay = data_used;
801

    
802
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
803
        if(s->show_audio==1){
804
            h= INT_MIN;
805
            for(i=0; i<1000; i+=channels){
806
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
807
                int a= s->sample_array[idx];
808
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
809
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
810
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
811
                int score= a-d;
812
                if(h<score && (b^c)<0){
813
                    h= score;
814
                    i_start= idx;
815
                }
816
            }
817
        }
818

    
819
        s->last_i_start = i_start;
820
    } else {
821
        i_start = s->last_i_start;
822
    }
823

    
824
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
825
    if(s->show_audio==1){
826
        fill_rectangle(screen,
827
                       s->xleft, s->ytop, s->width, s->height,
828
                       bgcolor);
829

    
830
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
831

    
832
        /* total height for one channel */
833
        h = s->height / nb_display_channels;
834
        /* graph height / 2 */
835
        h2 = (h * 9) / 20;
836
        for(ch = 0;ch < nb_display_channels; ch++) {
837
            i = i_start + ch;
838
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
839
            for(x = 0; x < s->width; x++) {
840
                y = (s->sample_array[i] * h2) >> 15;
841
                if (y < 0) {
842
                    y = -y;
843
                    ys = y1 - y;
844
                } else {
845
                    ys = y1;
846
                }
847
                fill_rectangle(screen,
848
                               s->xleft + x, ys, 1, y,
849
                               fgcolor);
850
                i += channels;
851
                if (i >= SAMPLE_ARRAY_SIZE)
852
                    i -= SAMPLE_ARRAY_SIZE;
853
            }
854
        }
855

    
856
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
857

    
858
        for(ch = 1;ch < nb_display_channels; ch++) {
859
            y = s->ytop + ch * h;
860
            fill_rectangle(screen,
861
                           s->xleft, y, s->width, 1,
862
                           fgcolor);
863
        }
864
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
865
    }else{
866
        nb_display_channels= FFMIN(nb_display_channels, 2);
867
        if(rdft_bits != s->rdft_bits){
868
            av_rdft_end(s->rdft);
869
            av_free(s->rdft_data);
870
            s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
871
            s->rdft_bits= rdft_bits;
872
            s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
873
        }
874
        {
875
            FFTSample *data[2];
876
            for(ch = 0;ch < nb_display_channels; ch++) {
877
                data[ch] = s->rdft_data + 2*nb_freq*ch;
878
                i = i_start + ch;
879
                for(x = 0; x < 2*nb_freq; x++) {
880
                    double w= (x-nb_freq)*(1.0/nb_freq);
881
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
882
                    i += channels;
883
                    if (i >= SAMPLE_ARRAY_SIZE)
884
                        i -= SAMPLE_ARRAY_SIZE;
885
                }
886
                av_rdft_calc(s->rdft, data[ch]);
887
            }
888
            //least efficient way to do this, we should of course directly access it but its more than fast enough
889
            for(y=0; y<s->height; y++){
890
                double w= 1/sqrt(nb_freq);
891
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
892
                int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
893
                       + data[1][2*y+1]*data[1][2*y+1])) : a;
894
                a= FFMIN(a,255);
895
                b= FFMIN(b,255);
896
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
897

    
898
                fill_rectangle(screen,
899
                            s->xpos, s->height-y, 1, 1,
900
                            fgcolor);
901
            }
902
        }
903
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
904
        s->xpos++;
905
        if(s->xpos >= s->width)
906
            s->xpos= s->xleft;
907
    }
908
}
909

    
910
static int video_open(VideoState *is){
911
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
912
    int w,h;
913

    
914
    if(is_full_screen) flags |= SDL_FULLSCREEN;
915
    else               flags |= SDL_RESIZABLE;
916

    
917
    if (is_full_screen && fs_screen_width) {
918
        w = fs_screen_width;
919
        h = fs_screen_height;
920
    } else if(!is_full_screen && screen_width){
921
        w = screen_width;
922
        h = screen_height;
923
#if CONFIG_AVFILTER
924
    }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
925
        w = is->out_video_filter->inputs[0]->w;
926
        h = is->out_video_filter->inputs[0]->h;
927
#else
928
    }else if (is->video_st && is->video_st->codec->width){
929
        w = is->video_st->codec->width;
930
        h = is->video_st->codec->height;
931
#endif
932
    } else {
933
        w = 640;
934
        h = 480;
935
    }
936
    if(screen && is->width == screen->w && screen->w == w
937
       && is->height== screen->h && screen->h == h)
938
        return 0;
939

    
940
#ifndef __APPLE__
941
    screen = SDL_SetVideoMode(w, h, 0, flags);
942
#else
943
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
944
    screen = SDL_SetVideoMode(w, h, 24, flags);
945
#endif
946
    if (!screen) {
947
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
948
        return -1;
949
    }
950
    if (!window_title)
951
        window_title = input_filename;
952
    SDL_WM_SetCaption(window_title, window_title);
953

    
954
    is->width = screen->w;
955
    is->height = screen->h;
956

    
957
    return 0;
958
}
959

    
960
/* display the current picture, if any */
961
static void video_display(VideoState *is)
962
{
963
    if(!screen)
964
        video_open(cur_stream);
965
    if (is->audio_st && is->show_audio)
966
        video_audio_display(is);
967
    else if (is->video_st)
968
        video_image_display(is);
969
}
970

    
971
static int refresh_thread(void *opaque)
972
{
973
    VideoState *is= opaque;
974
    while(!is->abort_request){
975
        SDL_Event event;
976
        event.type = FF_REFRESH_EVENT;
977
        event.user.data1 = opaque;
978
        if(!is->refresh){
979
            is->refresh=1;
980
            SDL_PushEvent(&event);
981
        }
982
        usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
983
    }
984
    return 0;
985
}
986

    
987
/* get the current audio clock value */
988
static double get_audio_clock(VideoState *is)
989
{
990
    double pts;
991
    int hw_buf_size, bytes_per_sec;
992
    pts = is->audio_clock;
993
    hw_buf_size = audio_write_get_buf_size(is);
994
    bytes_per_sec = 0;
995
    if (is->audio_st) {
996
        bytes_per_sec = is->audio_st->codec->sample_rate *
997
            2 * is->audio_st->codec->channels;
998
    }
999
    if (bytes_per_sec)
1000
        pts -= (double)hw_buf_size / bytes_per_sec;
1001
    return pts;
1002
}
1003

    
1004
/* get the current video clock value */
1005
static double get_video_clock(VideoState *is)
1006
{
1007
    if (is->paused) {
1008
        return is->video_current_pts;
1009
    } else {
1010
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1011
    }
1012
}
1013

    
1014
/* get the current external clock value */
1015
static double get_external_clock(VideoState *is)
1016
{
1017
    int64_t ti;
1018
    ti = av_gettime();
1019
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1020
}
1021

    
1022
/* get the current master clock value */
1023
static double get_master_clock(VideoState *is)
1024
{
1025
    double val;
1026

    
1027
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1028
        if (is->video_st)
1029
            val = get_video_clock(is);
1030
        else
1031
            val = get_audio_clock(is);
1032
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1033
        if (is->audio_st)
1034
            val = get_audio_clock(is);
1035
        else
1036
            val = get_video_clock(is);
1037
    } else {
1038
        val = get_external_clock(is);
1039
    }
1040
    return val;
1041
}
1042

    
1043
/* seek in the stream */
1044
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1045
{
1046
    if (!is->seek_req) {
1047
        is->seek_pos = pos;
1048
        is->seek_rel = rel;
1049
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1050
        if (seek_by_bytes)
1051
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1052
        is->seek_req = 1;
1053
    }
1054
}
1055

    
1056
/* pause or resume the video */
1057
static void stream_pause(VideoState *is)
1058
{
1059
    if (is->paused) {
1060
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1061
        if(is->read_pause_return != AVERROR(ENOSYS)){
1062
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1063
        }
1064
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1065
    }
1066
    is->paused = !is->paused;
1067
}
1068

    
1069
static double compute_target_time(double frame_current_pts, VideoState *is)
1070
{
1071
    double delay, sync_threshold, diff;
1072

    
1073
    /* compute nominal delay */
1074
    delay = frame_current_pts - is->frame_last_pts;
1075
    if (delay <= 0 || delay >= 10.0) {
1076
        /* if incorrect delay, use previous one */
1077
        delay = is->frame_last_delay;
1078
    } else {
1079
        is->frame_last_delay = delay;
1080
    }
1081
    is->frame_last_pts = frame_current_pts;
1082

    
1083
    /* update delay to follow master synchronisation source */
1084
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1085
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1086
        /* if video is slave, we try to correct big delays by
1087
           duplicating or deleting a frame */
1088
        diff = get_video_clock(is) - get_master_clock(is);
1089

    
1090
        /* skip or repeat frame. We take into account the
1091
           delay to compute the threshold. I still don't know
1092
           if it is the best guess */
1093
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1094
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1095
            if (diff <= -sync_threshold)
1096
                delay = 0;
1097
            else if (diff >= sync_threshold)
1098
                delay = 2 * delay;
1099
        }
1100
    }
1101
    is->frame_timer += delay;
1102
#if defined(DEBUG_SYNC)
1103
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1104
            delay, actual_delay, frame_current_pts, -diff);
1105
#endif
1106

    
1107
    return is->frame_timer;
1108
}
1109

    
1110
/* called to display each frame */
1111
static void video_refresh_timer(void *opaque)
1112
{
1113
    VideoState *is = opaque;
1114
    VideoPicture *vp;
1115

    
1116
    SubPicture *sp, *sp2;
1117

    
1118
    if (is->video_st) {
1119
retry:
1120
        if (is->pictq_size == 0) {
1121
            //nothing to do, no picture to display in the que
1122
        } else {
1123
            double time= av_gettime()/1000000.0;
1124
            double next_target;
1125
            /* dequeue the picture */
1126
            vp = &is->pictq[is->pictq_rindex];
1127

    
1128
            if(time < vp->target_clock)
1129
                return;
1130
            /* update current video pts */
1131
            is->video_current_pts = vp->pts;
1132
            is->video_current_pts_drift = is->video_current_pts - time;
1133
            is->video_current_pos = vp->pos;
1134
            if(is->pictq_size > 1){
1135
                VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1136
                assert(nextvp->target_clock >= vp->target_clock);
1137
                next_target= nextvp->target_clock;
1138
            }else{
1139
                next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1140
            }
1141
            if(framedrop && time > next_target){
1142
                is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1143
                if(is->pictq_size > 1 || time > next_target + 0.5){
1144
                    /* update queue size and signal for next picture */
1145
                    if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1146
                        is->pictq_rindex = 0;
1147

    
1148
                    SDL_LockMutex(is->pictq_mutex);
1149
                    is->pictq_size--;
1150
                    SDL_CondSignal(is->pictq_cond);
1151
                    SDL_UnlockMutex(is->pictq_mutex);
1152
                    goto retry;
1153
                }
1154
            }
1155

    
1156
            if(is->subtitle_st) {
1157
                if (is->subtitle_stream_changed) {
1158
                    SDL_LockMutex(is->subpq_mutex);
1159

    
1160
                    while (is->subpq_size) {
1161
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1162

    
1163
                        /* update queue size and signal for next picture */
1164
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1165
                            is->subpq_rindex = 0;
1166

    
1167
                        is->subpq_size--;
1168
                    }
1169
                    is->subtitle_stream_changed = 0;
1170

    
1171
                    SDL_CondSignal(is->subpq_cond);
1172
                    SDL_UnlockMutex(is->subpq_mutex);
1173
                } else {
1174
                    if (is->subpq_size > 0) {
1175
                        sp = &is->subpq[is->subpq_rindex];
1176

    
1177
                        if (is->subpq_size > 1)
1178
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1179
                        else
1180
                            sp2 = NULL;
1181

    
1182
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1183
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1184
                        {
1185
                            free_subpicture(sp);
1186

    
1187
                            /* update queue size and signal for next picture */
1188
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1189
                                is->subpq_rindex = 0;
1190

    
1191
                            SDL_LockMutex(is->subpq_mutex);
1192
                            is->subpq_size--;
1193
                            SDL_CondSignal(is->subpq_cond);
1194
                            SDL_UnlockMutex(is->subpq_mutex);
1195
                        }
1196
                    }
1197
                }
1198
            }
1199

    
1200
            /* display picture */
1201
            if (!display_disable)
1202
                video_display(is);
1203

    
1204
            /* update queue size and signal for next picture */
1205
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1206
                is->pictq_rindex = 0;
1207

    
1208
            SDL_LockMutex(is->pictq_mutex);
1209
            is->pictq_size--;
1210
            SDL_CondSignal(is->pictq_cond);
1211
            SDL_UnlockMutex(is->pictq_mutex);
1212
        }
1213
    } else if (is->audio_st) {
1214
        /* draw the next audio frame */
1215

    
1216
        /* if only audio stream, then display the audio bars (better
1217
           than nothing, just to test the implementation */
1218

    
1219
        /* display picture */
1220
        if (!display_disable)
1221
            video_display(is);
1222
    }
1223
    if (show_status) {
1224
        static int64_t last_time;
1225
        int64_t cur_time;
1226
        int aqsize, vqsize, sqsize;
1227
        double av_diff;
1228

    
1229
        cur_time = av_gettime();
1230
        if (!last_time || (cur_time - last_time) >= 30000) {
1231
            aqsize = 0;
1232
            vqsize = 0;
1233
            sqsize = 0;
1234
            if (is->audio_st)
1235
                aqsize = is->audioq.size;
1236
            if (is->video_st)
1237
                vqsize = is->videoq.size;
1238
            if (is->subtitle_st)
1239
                sqsize = is->subtitleq.size;
1240
            av_diff = 0;
1241
            if (is->audio_st && is->video_st)
1242
                av_diff = get_audio_clock(is) - get_video_clock(is);
1243
            printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1244
                   get_master_clock(is),
1245
                   av_diff,
1246
                   FFMAX(is->skip_frames-1, 0),
1247
                   aqsize / 1024,
1248
                   vqsize / 1024,
1249
                   sqsize,
1250
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1251
                   is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1252
            fflush(stdout);
1253
            last_time = cur_time;
1254
        }
1255
    }
1256
}
1257

    
1258
static void stream_close(VideoState *is)
1259
{
1260
    VideoPicture *vp;
1261
    int i;
1262
    /* XXX: use a special url_shutdown call to abort parse cleanly */
1263
    is->abort_request = 1;
1264
    SDL_WaitThread(is->parse_tid, NULL);
1265
    SDL_WaitThread(is->refresh_tid, NULL);
1266

    
1267
    /* free all pictures */
1268
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
1269
        vp = &is->pictq[i];
1270
#if CONFIG_AVFILTER
1271
        if (vp->picref) {
1272
            avfilter_unref_buffer(vp->picref);
1273
            vp->picref = NULL;
1274
        }
1275
#endif
1276
        if (vp->bmp) {
1277
            SDL_FreeYUVOverlay(vp->bmp);
1278
            vp->bmp = NULL;
1279
        }
1280
    }
1281
    SDL_DestroyMutex(is->pictq_mutex);
1282
    SDL_DestroyCond(is->pictq_cond);
1283
    SDL_DestroyMutex(is->subpq_mutex);
1284
    SDL_DestroyCond(is->subpq_cond);
1285
#if !CONFIG_AVFILTER
1286
    if (is->img_convert_ctx)
1287
        sws_freeContext(is->img_convert_ctx);
1288
#endif
1289
    av_free(is);
1290
}
1291

    
1292
static void do_exit(void)
1293
{
1294
    if (cur_stream) {
1295
        stream_close(cur_stream);
1296
        cur_stream = NULL;
1297
    }
1298
    uninit_opts();
1299
#if CONFIG_AVFILTER
1300
    avfilter_uninit();
1301
#endif
1302
    if (show_status)
1303
        printf("\n");
1304
    SDL_Quit();
1305
    av_log(NULL, AV_LOG_QUIET, "");
1306
    exit(0);
1307
}
1308

    
1309
/* allocate a picture (needs to do that in main thread to avoid
1310
   potential locking problems */
1311
static void alloc_picture(void *opaque)
1312
{
1313
    VideoState *is = opaque;
1314
    VideoPicture *vp;
1315

    
1316
    vp = &is->pictq[is->pictq_windex];
1317

    
1318
    if (vp->bmp)
1319
        SDL_FreeYUVOverlay(vp->bmp);
1320

    
1321
#if CONFIG_AVFILTER
1322
    if (vp->picref)
1323
        avfilter_unref_buffer(vp->picref);
1324
    vp->picref = NULL;
1325

    
1326
    vp->width   = is->out_video_filter->inputs[0]->w;
1327
    vp->height  = is->out_video_filter->inputs[0]->h;
1328
    vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1329
#else
1330
    vp->width   = is->video_st->codec->width;
1331
    vp->height  = is->video_st->codec->height;
1332
    vp->pix_fmt = is->video_st->codec->pix_fmt;
1333
#endif
1334

    
1335
    vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1336
                                   SDL_YV12_OVERLAY,
1337
                                   screen);
1338
    if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1339
        /* SDL allocates a buffer smaller than requested if the video
1340
         * overlay hardware is unable to support the requested size. */
1341
        fprintf(stderr, "Error: the video system does not support an image\n"
1342
                        "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1343
                        "to reduce the image size.\n", vp->width, vp->height );
1344
        do_exit();
1345
    }
1346

    
1347
    SDL_LockMutex(is->pictq_mutex);
1348
    vp->allocated = 1;
1349
    SDL_CondSignal(is->pictq_cond);
1350
    SDL_UnlockMutex(is->pictq_mutex);
1351
}
1352

    
1353
/**
1354
 *
1355
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1356
 */
1357
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1358
{
1359
    VideoPicture *vp;
1360
#if CONFIG_AVFILTER
1361
    AVPicture pict_src;
1362
#endif
1363
    /* wait until we have space to put a new picture */
1364
    SDL_LockMutex(is->pictq_mutex);
1365

    
1366
    if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1367
        is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1368

    
1369
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1370
           !is->videoq.abort_request) {
1371
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1372
    }
1373
    SDL_UnlockMutex(is->pictq_mutex);
1374

    
1375
    if (is->videoq.abort_request)
1376
        return -1;
1377

    
1378
    vp = &is->pictq[is->pictq_windex];
1379

    
1380
    /* alloc or resize hardware picture buffer */
1381
    if (!vp->bmp ||
1382
#if CONFIG_AVFILTER
1383
        vp->width  != is->out_video_filter->inputs[0]->w ||
1384
        vp->height != is->out_video_filter->inputs[0]->h) {
1385
#else
1386
        vp->width != is->video_st->codec->width ||
1387
        vp->height != is->video_st->codec->height) {
1388
#endif
1389
        SDL_Event event;
1390

    
1391
        vp->allocated = 0;
1392

    
1393
        /* the allocation must be done in the main thread to avoid
1394
           locking problems */
1395
        event.type = FF_ALLOC_EVENT;
1396
        event.user.data1 = is;
1397
        SDL_PushEvent(&event);
1398

    
1399
        /* wait until the picture is allocated */
1400
        SDL_LockMutex(is->pictq_mutex);
1401
        while (!vp->allocated && !is->videoq.abort_request) {
1402
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1403
        }
1404
        SDL_UnlockMutex(is->pictq_mutex);
1405

    
1406
        if (is->videoq.abort_request)
1407
            return -1;
1408
    }
1409

    
1410
    /* if the frame is not skipped, then display it */
1411
    if (vp->bmp) {
1412
        AVPicture pict;
1413
#if CONFIG_AVFILTER
1414
        if(vp->picref)
1415
            avfilter_unref_buffer(vp->picref);
1416
        vp->picref = src_frame->opaque;
1417
#endif
1418

    
1419
        /* get a pointer on the bitmap */
1420
        SDL_LockYUVOverlay (vp->bmp);
1421

    
1422
        memset(&pict,0,sizeof(AVPicture));
1423
        pict.data[0] = vp->bmp->pixels[0];
1424
        pict.data[1] = vp->bmp->pixels[2];
1425
        pict.data[2] = vp->bmp->pixels[1];
1426

    
1427
        pict.linesize[0] = vp->bmp->pitches[0];
1428
        pict.linesize[1] = vp->bmp->pitches[2];
1429
        pict.linesize[2] = vp->bmp->pitches[1];
1430

    
1431
#if CONFIG_AVFILTER
1432
        pict_src.data[0] = src_frame->data[0];
1433
        pict_src.data[1] = src_frame->data[1];
1434
        pict_src.data[2] = src_frame->data[2];
1435

    
1436
        pict_src.linesize[0] = src_frame->linesize[0];
1437
        pict_src.linesize[1] = src_frame->linesize[1];
1438
        pict_src.linesize[2] = src_frame->linesize[2];
1439

    
1440
        //FIXME use direct rendering
1441
        av_picture_copy(&pict, &pict_src,
1442
                        vp->pix_fmt, vp->width, vp->height);
1443
#else
1444
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1445
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1446
            vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1447
            PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1448
        if (is->img_convert_ctx == NULL) {
1449
            fprintf(stderr, "Cannot initialize the conversion context\n");
1450
            exit(1);
1451
        }
1452
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1453
                  0, vp->height, pict.data, pict.linesize);
1454
#endif
1455
        /* update the bitmap content */
1456
        SDL_UnlockYUVOverlay(vp->bmp);
1457

    
1458
        vp->pts = pts;
1459
        vp->pos = pos;
1460

    
1461
        /* now we can update the picture count */
1462
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1463
            is->pictq_windex = 0;
1464
        SDL_LockMutex(is->pictq_mutex);
1465
        vp->target_clock= compute_target_time(vp->pts, is);
1466

    
1467
        is->pictq_size++;
1468
        SDL_UnlockMutex(is->pictq_mutex);
1469
    }
1470
    return 0;
1471
}
1472

    
1473
/**
1474
 * compute the exact PTS for the picture if it is omitted in the stream
1475
 * @param pts1 the dts of the pkt / pts of the frame
1476
 */
1477
static int output_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1478
{
1479
    double frame_delay, pts;
1480

    
1481
    pts = pts1;
1482

    
1483
    if (pts != 0) {
1484
        /* update video clock with pts, if present */
1485
        is->video_clock = pts;
1486
    } else {
1487
        pts = is->video_clock;
1488
    }
1489
    /* update video clock for next frame */
1490
    frame_delay = av_q2d(is->video_st->codec->time_base);
1491
    /* for MPEG2, the frame can be repeated, so we update the
1492
       clock accordingly */
1493
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1494
    is->video_clock += frame_delay;
1495

    
1496
#if defined(DEBUG_SYNC) && 0
1497
    printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1498
           av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1499
#endif
1500
    return queue_picture(is, src_frame, pts, pos);
1501
}
1502

    
1503
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1504
{
1505
    int len1, got_picture, i;
1506

    
1507
    if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1508
        return -1;
1509

    
1510
    if (pkt->data == flush_pkt.data) {
1511
        avcodec_flush_buffers(is->video_st->codec);
1512

    
1513
        SDL_LockMutex(is->pictq_mutex);
1514
        //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1515
        for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1516
            is->pictq[i].target_clock= 0;
1517
        }
1518
        while (is->pictq_size && !is->videoq.abort_request) {
1519
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1520
        }
1521
        is->video_current_pos = -1;
1522
        SDL_UnlockMutex(is->pictq_mutex);
1523

    
1524
        is->frame_last_pts = AV_NOPTS_VALUE;
1525
        is->frame_last_delay = 0;
1526
        is->frame_timer = (double)av_gettime() / 1000000.0;
1527
        is->skip_frames = 1;
1528
        is->skip_frames_index = 0;
1529
        return 0;
1530
    }
1531

    
1532
    len1 = avcodec_decode_video2(is->video_st->codec,
1533
                                 frame, &got_picture,
1534
                                 pkt);
1535

    
1536
    if (got_picture) {
1537
        if (decoder_reorder_pts == -1) {
1538
            *pts = frame->best_effort_timestamp;
1539
        } else if (decoder_reorder_pts) {
1540
            *pts = frame->pkt_pts;
1541
        } else {
1542
            *pts = frame->pkt_dts;
1543
        }
1544

    
1545
        if (*pts == AV_NOPTS_VALUE) {
1546
            *pts = 0;
1547
        }
1548

    
1549
        is->skip_frames_index += 1;
1550
        if(is->skip_frames_index >= is->skip_frames){
1551
            is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1552
            return 1;
1553
        }
1554

    
1555
    }
1556
    return 0;
1557
}
1558

    
1559
#if CONFIG_AVFILTER
1560
typedef struct {
1561
    VideoState *is;
1562
    AVFrame *frame;
1563
    int use_dr1;
1564
} FilterPriv;
1565

    
1566
static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1567
{
1568
    AVFilterContext *ctx = codec->opaque;
1569
    AVFilterBufferRef  *ref;
1570
    int perms = AV_PERM_WRITE;
1571
    int i, w, h, stride[4];
1572
    unsigned edge;
1573
    int pixel_size;
1574

    
1575
    av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1576

    
1577
    if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1578
        perms |= AV_PERM_NEG_LINESIZES;
1579

    
1580
    if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1581
        if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1582
        if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1583
        if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1584
    }
1585
    if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1586

    
1587
    w = codec->width;
1588
    h = codec->height;
1589

    
1590
    if(av_image_check_size(w, h, 0, codec))
1591
        return -1;
1592

    
1593
    avcodec_align_dimensions2(codec, &w, &h, stride);
1594
    edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1595
    w += edge << 1;
1596
    h += edge << 1;
1597

    
1598
    if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1599
        return -1;
1600

    
1601
    pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1602
    ref->video->w = codec->width;
1603
    ref->video->h = codec->height;
1604
    for(i = 0; i < 4; i ++) {
1605
        unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1606
        unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1607

    
1608
        if (ref->data[i]) {
1609
            ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1610
        }
1611
        pic->data[i]     = ref->data[i];
1612
        pic->linesize[i] = ref->linesize[i];
1613
    }
1614
    pic->opaque = ref;
1615
    pic->age    = INT_MAX;
1616
    pic->type   = FF_BUFFER_TYPE_USER;
1617
    pic->reordered_opaque = codec->reordered_opaque;
1618
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1619
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1620
    return 0;
1621
}
1622

    
1623
static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1624
{
1625
    memset(pic->data, 0, sizeof(pic->data));
1626
    avfilter_unref_buffer(pic->opaque);
1627
}
1628

    
1629
static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1630
{
1631
    AVFilterBufferRef *ref = pic->opaque;
1632

    
1633
    if (pic->data[0] == NULL) {
1634
        pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1635
        return codec->get_buffer(codec, pic);
1636
    }
1637

    
1638
    if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1639
        (codec->pix_fmt != ref->format)) {
1640
        av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1641
        return -1;
1642
    }
1643

    
1644
    pic->reordered_opaque = codec->reordered_opaque;
1645
    if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1646
    else           pic->pkt_pts = AV_NOPTS_VALUE;
1647
    return 0;
1648
}
1649

    
1650
static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1651
{
1652
    FilterPriv *priv = ctx->priv;
1653
    AVCodecContext *codec;
1654
    if(!opaque) return -1;
1655

    
1656
    priv->is = opaque;
1657
    codec    = priv->is->video_st->codec;
1658
    codec->opaque = ctx;
1659
    if((codec->codec->capabilities & CODEC_CAP_DR1)
1660
    ) {
1661
        codec->flags |= CODEC_FLAG_EMU_EDGE;
1662
        priv->use_dr1 = 1;
1663
        codec->get_buffer     = input_get_buffer;
1664
        codec->release_buffer = input_release_buffer;
1665
        codec->reget_buffer   = input_reget_buffer;
1666
        codec->thread_safe_callbacks = 1;
1667
    }
1668

    
1669
    priv->frame = avcodec_alloc_frame();
1670

    
1671
    return 0;
1672
}
1673

    
1674
static void input_uninit(AVFilterContext *ctx)
1675
{
1676
    FilterPriv *priv = ctx->priv;
1677
    av_free(priv->frame);
1678
}
1679

    
1680
static int input_request_frame(AVFilterLink *link)
1681
{
1682
    FilterPriv *priv = link->src->priv;
1683
    AVFilterBufferRef *picref;
1684
    int64_t pts = 0;
1685
    AVPacket pkt;
1686
    int ret;
1687

    
1688
    while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1689
        av_free_packet(&pkt);
1690
    if (ret < 0)
1691
        return -1;
1692

    
1693
    if(priv->use_dr1) {
1694
        picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1695
    } else {
1696
        picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1697
        av_image_copy(picref->data, picref->linesize,
1698
                      priv->frame->data, priv->frame->linesize,
1699
                      picref->format, link->w, link->h);
1700
    }
1701
    av_free_packet(&pkt);
1702

    
1703
    picref->pts = pts;
1704
    picref->pos = pkt.pos;
1705
    picref->video->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1706
    avfilter_start_frame(link, picref);
1707
    avfilter_draw_slice(link, 0, link->h, 1);
1708
    avfilter_end_frame(link);
1709

    
1710
    return 0;
1711
}
1712

    
1713
static int input_query_formats(AVFilterContext *ctx)
1714
{
1715
    FilterPriv *priv = ctx->priv;
1716
    enum PixelFormat pix_fmts[] = {
1717
        priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1718
    };
1719

    
1720
    avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1721
    return 0;
1722
}
1723

    
1724
static int input_config_props(AVFilterLink *link)
1725
{
1726
    FilterPriv *priv  = link->src->priv;
1727
    AVCodecContext *c = priv->is->video_st->codec;
1728

    
1729
    link->w = c->width;
1730
    link->h = c->height;
1731
    link->time_base = priv->is->video_st->time_base;
1732

    
1733
    return 0;
1734
}
1735

    
1736
static AVFilter input_filter =
1737
{
1738
    .name      = "ffplay_input",
1739

    
1740
    .priv_size = sizeof(FilterPriv),
1741

    
1742
    .init      = input_init,
1743
    .uninit    = input_uninit,
1744

    
1745
    .query_formats = input_query_formats,
1746

    
1747
    .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1748
    .outputs   = (AVFilterPad[]) {{ .name = "default",
1749
                                    .type = AVMEDIA_TYPE_VIDEO,
1750
                                    .request_frame = input_request_frame,
1751
                                    .config_props  = input_config_props, },
1752
                                  { .name = NULL }},
1753
};
1754

    
1755
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1756
{
1757
    char sws_flags_str[128];
1758
    int ret;
1759
    FFSinkContext ffsink_ctx = { .pix_fmt = PIX_FMT_YUV420P };
1760
    AVFilterContext *filt_src = NULL, *filt_out = NULL;
1761
    snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1762
    graph->scale_sws_opts = av_strdup(sws_flags_str);
1763

    
1764
    if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1765
                                            NULL, is, graph)) < 0)
1766
        goto the_end;
1767
    if ((ret = avfilter_graph_create_filter(&filt_out, &ffsink, "out",
1768
                                            NULL, &ffsink_ctx, graph)) < 0)
1769
        goto the_end;
1770

    
1771
    if(vfilters) {
1772
        AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1773
        AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1774

    
1775
        outputs->name    = av_strdup("in");
1776
        outputs->filter_ctx = filt_src;
1777
        outputs->pad_idx = 0;
1778
        outputs->next    = NULL;
1779

    
1780
        inputs->name    = av_strdup("out");
1781
        inputs->filter_ctx = filt_out;
1782
        inputs->pad_idx = 0;
1783
        inputs->next    = NULL;
1784

    
1785
        if ((ret = avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL)) < 0)
1786
            goto the_end;
1787
        av_freep(&vfilters);
1788
    } else {
1789
        if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1790
            goto the_end;
1791
    }
1792

    
1793
    if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1794
        goto the_end;
1795

    
1796
    is->out_video_filter = filt_out;
1797
the_end:
1798
    return ret;
1799
}
1800

    
1801
#endif  /* CONFIG_AVFILTER */
1802

    
1803
static int video_thread(void *arg)
1804
{
1805
    VideoState *is = arg;
1806
    AVFrame *frame= avcodec_alloc_frame();
1807
    int64_t pts_int;
1808
    double pts;
1809
    int ret;
1810

    
1811
#if CONFIG_AVFILTER
1812
    AVFilterGraph *graph = avfilter_graph_alloc();
1813
    AVFilterContext *filt_out = NULL;
1814
    int64_t pos;
1815

    
1816
    if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1817
        goto the_end;
1818
    filt_out = is->out_video_filter;
1819
#endif
1820

    
1821
    for(;;) {
1822
#if !CONFIG_AVFILTER
1823
        AVPacket pkt;
1824
#else
1825
        AVFilterBufferRef *picref;
1826
        AVRational tb;
1827
#endif
1828
        while (is->paused && !is->videoq.abort_request)
1829
            SDL_Delay(10);
1830
#if CONFIG_AVFILTER
1831
        ret = get_filtered_video_frame(filt_out, frame, &picref, &tb);
1832
        if (picref) {
1833
            pts_int = picref->pts;
1834
            pos     = picref->pos;
1835
            frame->opaque = picref;
1836
        }
1837

    
1838
        if (av_cmp_q(tb, is->video_st->time_base)) {
1839
            av_unused int64_t pts1 = pts_int;
1840
            pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1841
            av_dlog(NULL, "video_thread(): "
1842
                    "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1843
                    tb.num, tb.den, pts1,
1844
                    is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1845
        }
1846
#else
1847
        ret = get_video_frame(is, frame, &pts_int, &pkt);
1848
#endif
1849

    
1850
        if (ret < 0) goto the_end;
1851

    
1852
        if (!ret)
1853
            continue;
1854

    
1855
        pts = pts_int*av_q2d(is->video_st->time_base);
1856

    
1857
#if CONFIG_AVFILTER
1858
        ret = output_picture(is, frame, pts, pos);
1859
#else
1860
        ret = output_picture(is, frame, pts,  pkt.pos);
1861
        av_free_packet(&pkt);
1862
#endif
1863
        if (ret < 0)
1864
            goto the_end;
1865

    
1866
        if (step)
1867
            if (cur_stream)
1868
                stream_pause(cur_stream);
1869
    }
1870
 the_end:
1871
#if CONFIG_AVFILTER
1872
    avfilter_graph_free(&graph);
1873
#endif
1874
    av_free(frame);
1875
    return 0;
1876
}
1877

    
1878
static int subtitle_thread(void *arg)
1879
{
1880
    VideoState *is = arg;
1881
    SubPicture *sp;
1882
    AVPacket pkt1, *pkt = &pkt1;
1883
    int len1, got_subtitle;
1884
    double pts;
1885
    int i, j;
1886
    int r, g, b, y, u, v, a;
1887

    
1888
    for(;;) {
1889
        while (is->paused && !is->subtitleq.abort_request) {
1890
            SDL_Delay(10);
1891
        }
1892
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1893
            break;
1894

    
1895
        if(pkt->data == flush_pkt.data){
1896
            avcodec_flush_buffers(is->subtitle_st->codec);
1897
            continue;
1898
        }
1899
        SDL_LockMutex(is->subpq_mutex);
1900
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1901
               !is->subtitleq.abort_request) {
1902
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1903
        }
1904
        SDL_UnlockMutex(is->subpq_mutex);
1905

    
1906
        if (is->subtitleq.abort_request)
1907
            goto the_end;
1908

    
1909
        sp = &is->subpq[is->subpq_windex];
1910

    
1911
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1912
           this packet, if any */
1913
        pts = 0;
1914
        if (pkt->pts != AV_NOPTS_VALUE)
1915
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1916

    
1917
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1918
                                    &sp->sub, &got_subtitle,
1919
                                    pkt);
1920
//            if (len1 < 0)
1921
//                break;
1922
        if (got_subtitle && sp->sub.format == 0) {
1923
            sp->pts = pts;
1924

    
1925
            for (i = 0; i < sp->sub.num_rects; i++)
1926
            {
1927
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1928
                {
1929
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1930
                    y = RGB_TO_Y_CCIR(r, g, b);
1931
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1932
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1933
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1934
                }
1935
            }
1936

    
1937
            /* now we can update the picture count */
1938
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1939
                is->subpq_windex = 0;
1940
            SDL_LockMutex(is->subpq_mutex);
1941
            is->subpq_size++;
1942
            SDL_UnlockMutex(is->subpq_mutex);
1943
        }
1944
        av_free_packet(pkt);
1945
//        if (step)
1946
//            if (cur_stream)
1947
//                stream_pause(cur_stream);
1948
    }
1949
 the_end:
1950
    return 0;
1951
}
1952

    
1953
/* copy samples for viewing in editor window */
1954
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1955
{
1956
    int size, len, channels;
1957

    
1958
    channels = is->audio_st->codec->channels;
1959

    
1960
    size = samples_size / sizeof(short);
1961
    while (size > 0) {
1962
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1963
        if (len > size)
1964
            len = size;
1965
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1966
        samples += len;
1967
        is->sample_array_index += len;
1968
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1969
            is->sample_array_index = 0;
1970
        size -= len;
1971
    }
1972
}
1973

    
1974
/* return the new audio buffer size (samples can be added or deleted
1975
   to get better sync if video or external master clock) */
1976
static int synchronize_audio(VideoState *is, short *samples,
1977
                             int samples_size1, double pts)
1978
{
1979
    int n, samples_size;
1980
    double ref_clock;
1981

    
1982
    n = 2 * is->audio_st->codec->channels;
1983
    samples_size = samples_size1;
1984

    
1985
    /* if not master, then we try to remove or add samples to correct the clock */
1986
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1987
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1988
        double diff, avg_diff;
1989
        int wanted_size, min_size, max_size, nb_samples;
1990

    
1991
        ref_clock = get_master_clock(is);
1992
        diff = get_audio_clock(is) - ref_clock;
1993

    
1994
        if (diff < AV_NOSYNC_THRESHOLD) {
1995
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1996
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1997
                /* not enough measures to have a correct estimate */
1998
                is->audio_diff_avg_count++;
1999
            } else {
2000
                /* estimate the A-V difference */
2001
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2002

    
2003
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
2004
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
2005
                    nb_samples = samples_size / n;
2006

    
2007
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2008
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
2009
                    if (wanted_size < min_size)
2010
                        wanted_size = min_size;
2011
                    else if (wanted_size > max_size)
2012
                        wanted_size = max_size;
2013

    
2014
                    /* add or remove samples to correction the synchro */
2015
                    if (wanted_size < samples_size) {
2016
                        /* remove samples */
2017
                        samples_size = wanted_size;
2018
                    } else if (wanted_size > samples_size) {
2019
                        uint8_t *samples_end, *q;
2020
                        int nb;
2021

    
2022
                        /* add samples */
2023
                        nb = (samples_size - wanted_size);
2024
                        samples_end = (uint8_t *)samples + samples_size - n;
2025
                        q = samples_end + n;
2026
                        while (nb > 0) {
2027
                            memcpy(q, samples_end, n);
2028
                            q += n;
2029
                            nb -= n;
2030
                        }
2031
                        samples_size = wanted_size;
2032
                    }
2033
                }
2034
#if 0
2035
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
2036
                       diff, avg_diff, samples_size - samples_size1,
2037
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
2038
#endif
2039
            }
2040
        } else {
2041
            /* too big difference : may be initial PTS errors, so
2042
               reset A-V filter */
2043
            is->audio_diff_avg_count = 0;
2044
            is->audio_diff_cum = 0;
2045
        }
2046
    }
2047

    
2048
    return samples_size;
2049
}
2050

    
2051
/* decode one audio frame and returns its uncompressed size */
2052
static int audio_decode_frame(VideoState *is, double *pts_ptr)
2053
{
2054
    AVPacket *pkt_temp = &is->audio_pkt_temp;
2055
    AVPacket *pkt = &is->audio_pkt;
2056
    AVCodecContext *dec= is->audio_st->codec;
2057
    int n, len1, data_size;
2058
    double pts;
2059

    
2060
    for(;;) {
2061
        /* NOTE: the audio packet can contain several frames */
2062
        while (pkt_temp->size > 0) {
2063
            data_size = sizeof(is->audio_buf1);
2064
            len1 = avcodec_decode_audio3(dec,
2065
                                        (int16_t *)is->audio_buf1, &data_size,
2066
                                        pkt_temp);
2067
            if (len1 < 0) {
2068
                /* if error, we skip the frame */
2069
                pkt_temp->size = 0;
2070
                break;
2071
            }
2072

    
2073
            pkt_temp->data += len1;
2074
            pkt_temp->size -= len1;
2075
            if (data_size <= 0)
2076
                continue;
2077

    
2078
            if (dec->sample_fmt != is->audio_src_fmt) {
2079
                if (is->reformat_ctx)
2080
                    av_audio_convert_free(is->reformat_ctx);
2081
                is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2082
                                                         dec->sample_fmt, 1, NULL, 0);
2083
                if (!is->reformat_ctx) {
2084
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2085
                        av_get_sample_fmt_name(dec->sample_fmt),
2086
                        av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2087
                        break;
2088
                }
2089
                is->audio_src_fmt= dec->sample_fmt;
2090
            }
2091

    
2092
            if (is->reformat_ctx) {
2093
                const void *ibuf[6]= {is->audio_buf1};
2094
                void *obuf[6]= {is->audio_buf2};
2095
                int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2096
                int ostride[6]= {2};
2097
                int len= data_size/istride[0];
2098
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2099
                    printf("av_audio_convert() failed\n");
2100
                    break;
2101
                }
2102
                is->audio_buf= is->audio_buf2;
2103
                /* FIXME: existing code assume that data_size equals framesize*channels*2
2104
                          remove this legacy cruft */
2105
                data_size= len*2;
2106
            }else{
2107
                is->audio_buf= is->audio_buf1;
2108
            }
2109

    
2110
            /* if no pts, then compute it */
2111
            pts = is->audio_clock;
2112
            *pts_ptr = pts;
2113
            n = 2 * dec->channels;
2114
            is->audio_clock += (double)data_size /
2115
                (double)(n * dec->sample_rate);
2116
#if defined(DEBUG_SYNC)
2117
            {
2118
                static double last_clock;
2119
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2120
                       is->audio_clock - last_clock,
2121
                       is->audio_clock, pts);
2122
                last_clock = is->audio_clock;
2123
            }
2124
#endif
2125
            return data_size;
2126
        }
2127

    
2128
        /* free the current packet */
2129
        if (pkt->data)
2130
            av_free_packet(pkt);
2131

    
2132
        if (is->paused || is->audioq.abort_request) {
2133
            return -1;
2134
        }
2135

    
2136
        /* read next packet */
2137
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2138
            return -1;
2139
        if(pkt->data == flush_pkt.data){
2140
            avcodec_flush_buffers(dec);
2141
            continue;
2142
        }
2143

    
2144
        pkt_temp->data = pkt->data;
2145
        pkt_temp->size = pkt->size;
2146

    
2147
        /* if update the audio clock with the pts */
2148
        if (pkt->pts != AV_NOPTS_VALUE) {
2149
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2150
        }
2151
    }
2152
}
2153

    
2154
/* get the current audio output buffer size, in samples. With SDL, we
2155
   cannot have a precise information */
2156
static int audio_write_get_buf_size(VideoState *is)
2157
{
2158
    return is->audio_buf_size - is->audio_buf_index;
2159
}
2160

    
2161

    
2162
/* prepare a new audio buffer */
2163
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2164
{
2165
    VideoState *is = opaque;
2166
    int audio_size, len1;
2167
    double pts;
2168

    
2169
    audio_callback_time = av_gettime();
2170

    
2171
    while (len > 0) {
2172
        if (is->audio_buf_index >= is->audio_buf_size) {
2173
           audio_size = audio_decode_frame(is, &pts);
2174
           if (audio_size < 0) {
2175
                /* if error, just output silence */
2176
               is->audio_buf = is->audio_buf1;
2177
               is->audio_buf_size = 1024;
2178
               memset(is->audio_buf, 0, is->audio_buf_size);
2179
           } else {
2180
               if (is->show_audio)
2181
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2182
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2183
                                              pts);
2184
               is->audio_buf_size = audio_size;
2185
           }
2186
           is->audio_buf_index = 0;
2187
        }
2188
        len1 = is->audio_buf_size - is->audio_buf_index;
2189
        if (len1 > len)
2190
            len1 = len;
2191
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2192
        len -= len1;
2193
        stream += len1;
2194
        is->audio_buf_index += len1;
2195
    }
2196
}
2197

    
2198
/* open a given stream. Return 0 if OK */
2199
static int stream_component_open(VideoState *is, int stream_index)
2200
{
2201
    AVFormatContext *ic = is->ic;
2202
    AVCodecContext *avctx;
2203
    AVCodec *codec;
2204
    SDL_AudioSpec wanted_spec, spec;
2205

    
2206
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2207
        return -1;
2208
    avctx = ic->streams[stream_index]->codec;
2209

    
2210
    /* prepare audio output */
2211
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2212
        if (avctx->channels > 0) {
2213
            avctx->request_channels = FFMIN(2, avctx->channels);
2214
        } else {
2215
            avctx->request_channels = 2;
2216
        }
2217
    }
2218

    
2219
    codec = avcodec_find_decoder(avctx->codec_id);
2220
    avctx->debug_mv = debug_mv;
2221
    avctx->debug = debug;
2222
    avctx->workaround_bugs = workaround_bugs;
2223
    avctx->lowres = lowres;
2224
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2225
    avctx->idct_algo= idct;
2226
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2227
    avctx->skip_frame= skip_frame;
2228
    avctx->skip_idct= skip_idct;
2229
    avctx->skip_loop_filter= skip_loop_filter;
2230
    avctx->error_recognition= error_recognition;
2231
    avctx->error_concealment= error_concealment;
2232
    avctx->thread_count= thread_count;
2233

    
2234
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2235

    
2236
    if (!codec ||
2237
        avcodec_open(avctx, codec) < 0)
2238
        return -1;
2239

    
2240
    /* prepare audio output */
2241
    if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2242
        wanted_spec.freq = avctx->sample_rate;
2243
        wanted_spec.format = AUDIO_S16SYS;
2244
        wanted_spec.channels = avctx->channels;
2245
        wanted_spec.silence = 0;
2246
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2247
        wanted_spec.callback = sdl_audio_callback;
2248
        wanted_spec.userdata = is;
2249
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2250
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2251
            return -1;
2252
        }
2253
        is->audio_hw_buf_size = spec.size;
2254
        is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2255
    }
2256

    
2257
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2258
    switch(avctx->codec_type) {
2259
    case AVMEDIA_TYPE_AUDIO:
2260
        is->audio_stream = stream_index;
2261
        is->audio_st = ic->streams[stream_index];
2262
        is->audio_buf_size = 0;
2263
        is->audio_buf_index = 0;
2264

    
2265
        /* init averaging filter */
2266
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2267
        is->audio_diff_avg_count = 0;
2268
        /* since we do not have a precise anough audio fifo fullness,
2269
           we correct audio sync only if larger than this threshold */
2270
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2271

    
2272
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2273
        packet_queue_init(&is->audioq);
2274
        SDL_PauseAudio(0);
2275
        break;
2276
    case AVMEDIA_TYPE_VIDEO:
2277
        is->video_stream = stream_index;
2278
        is->video_st = ic->streams[stream_index];
2279

    
2280
//        is->video_current_pts_time = av_gettime();
2281

    
2282
        packet_queue_init(&is->videoq);
2283
        is->video_tid = SDL_CreateThread(video_thread, is);
2284
        break;
2285
    case AVMEDIA_TYPE_SUBTITLE:
2286
        is->subtitle_stream = stream_index;
2287
        is->subtitle_st = ic->streams[stream_index];
2288
        packet_queue_init(&is->subtitleq);
2289

    
2290
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2291
        break;
2292
    default:
2293
        break;
2294
    }
2295
    return 0;
2296
}
2297

    
2298
static void stream_component_close(VideoState *is, int stream_index)
2299
{
2300
    AVFormatContext *ic = is->ic;
2301
    AVCodecContext *avctx;
2302

    
2303
    if (stream_index < 0 || stream_index >= ic->nb_streams)
2304
        return;
2305
    avctx = ic->streams[stream_index]->codec;
2306

    
2307
    switch(avctx->codec_type) {
2308
    case AVMEDIA_TYPE_AUDIO:
2309
        packet_queue_abort(&is->audioq);
2310

    
2311
        SDL_CloseAudio();
2312

    
2313
        packet_queue_end(&is->audioq);
2314
        if (is->reformat_ctx)
2315
            av_audio_convert_free(is->reformat_ctx);
2316
        is->reformat_ctx = NULL;
2317
        break;
2318
    case AVMEDIA_TYPE_VIDEO:
2319
        packet_queue_abort(&is->videoq);
2320

    
2321
        /* note: we also signal this mutex to make sure we deblock the
2322
           video thread in all cases */
2323
        SDL_LockMutex(is->pictq_mutex);
2324
        SDL_CondSignal(is->pictq_cond);
2325
        SDL_UnlockMutex(is->pictq_mutex);
2326

    
2327
        SDL_WaitThread(is->video_tid, NULL);
2328

    
2329
        packet_queue_end(&is->videoq);
2330
        break;
2331
    case AVMEDIA_TYPE_SUBTITLE:
2332
        packet_queue_abort(&is->subtitleq);
2333

    
2334
        /* note: we also signal this mutex to make sure we deblock the
2335
           video thread in all cases */
2336
        SDL_LockMutex(is->subpq_mutex);
2337
        is->subtitle_stream_changed = 1;
2338

    
2339
        SDL_CondSignal(is->subpq_cond);
2340
        SDL_UnlockMutex(is->subpq_mutex);
2341

    
2342
        SDL_WaitThread(is->subtitle_tid, NULL);
2343

    
2344
        packet_queue_end(&is->subtitleq);
2345
        break;
2346
    default:
2347
        break;
2348
    }
2349

    
2350
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
2351
    avcodec_close(avctx);
2352
    switch(avctx->codec_type) {
2353
    case AVMEDIA_TYPE_AUDIO:
2354
        is->audio_st = NULL;
2355
        is->audio_stream = -1;
2356
        break;
2357
    case AVMEDIA_TYPE_VIDEO:
2358
        is->video_st = NULL;
2359
        is->video_stream = -1;
2360
        break;
2361
    case AVMEDIA_TYPE_SUBTITLE:
2362
        is->subtitle_st = NULL;
2363
        is->subtitle_stream = -1;
2364
        break;
2365
    default:
2366
        break;
2367
    }
2368
}
2369

    
2370
/* since we have only one decoding thread, we can use a global
2371
   variable instead of a thread local variable */
2372
static VideoState *global_video_state;
2373

    
2374
static int decode_interrupt_cb(void)
2375
{
2376
    return (global_video_state && global_video_state->abort_request);
2377
}
2378

    
2379
/* this thread gets the stream from the disk or the network */
2380
static int decode_thread(void *arg)
2381
{
2382
    VideoState *is = arg;
2383
    AVFormatContext *ic;
2384
    int err, i, ret;
2385
    int st_index[AVMEDIA_TYPE_NB];
2386
    AVPacket pkt1, *pkt = &pkt1;
2387
    AVFormatParameters params, *ap = &params;
2388
    int eof=0;
2389
    int pkt_in_play_range = 0;
2390

    
2391
    ic = avformat_alloc_context();
2392

    
2393
    memset(st_index, -1, sizeof(st_index));
2394
    is->video_stream = -1;
2395
    is->audio_stream = -1;
2396
    is->subtitle_stream = -1;
2397

    
2398
    global_video_state = is;
2399
    avio_set_interrupt_cb(decode_interrupt_cb);
2400

    
2401
    memset(ap, 0, sizeof(*ap));
2402

    
2403
    ap->prealloced_context = 1;
2404
    ap->width = frame_width;
2405
    ap->height= frame_height;
2406
    ap->time_base= (AVRational){1, 25};
2407
    ap->pix_fmt = frame_pix_fmt;
2408

    
2409
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2410

    
2411
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2412
    if (err < 0) {
2413
        print_error(is->filename, err);
2414
        ret = -1;
2415
        goto fail;
2416
    }
2417
    is->ic = ic;
2418

    
2419
    if(genpts)
2420
        ic->flags |= AVFMT_FLAG_GENPTS;
2421

    
2422
    err = av_find_stream_info(ic);
2423
    if (err < 0) {
2424
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2425
        ret = -1;
2426
        goto fail;
2427
    }
2428
    if(ic->pb)
2429
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2430

    
2431
    if(seek_by_bytes<0)
2432
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2433

    
2434
    /* if seeking requested, we execute it */
2435
    if (start_time != AV_NOPTS_VALUE) {
2436
        int64_t timestamp;
2437

    
2438
        timestamp = start_time;
2439
        /* add the stream start time */
2440
        if (ic->start_time != AV_NOPTS_VALUE)
2441
            timestamp += ic->start_time;
2442
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2443
        if (ret < 0) {
2444
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2445
                    is->filename, (double)timestamp / AV_TIME_BASE);
2446
        }
2447
    }
2448

    
2449
    for (i = 0; i < ic->nb_streams; i++)
2450
        ic->streams[i]->discard = AVDISCARD_ALL;
2451
    if (!video_disable)
2452
        st_index[AVMEDIA_TYPE_VIDEO] =
2453
            av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2454
                                wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2455
    if (!audio_disable)
2456
        st_index[AVMEDIA_TYPE_AUDIO] =
2457
            av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2458
                                wanted_stream[AVMEDIA_TYPE_AUDIO],
2459
                                st_index[AVMEDIA_TYPE_VIDEO],
2460
                                NULL, 0);
2461
    if (!video_disable)
2462
        st_index[AVMEDIA_TYPE_SUBTITLE] =
2463
            av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2464
                                wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2465
                                (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2466
                                 st_index[AVMEDIA_TYPE_AUDIO] :
2467
                                 st_index[AVMEDIA_TYPE_VIDEO]),
2468
                                NULL, 0);
2469
    if (show_status) {
2470
        av_dump_format(ic, 0, is->filename, 0);
2471
    }
2472

    
2473
    /* open the streams */
2474
    if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2475
        stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2476
    }
2477

    
2478
    ret=-1;
2479
    if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2480
        ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2481
    }
2482
    is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2483
    if(ret<0) {
2484
        if (!display_disable)
2485
            is->show_audio = 2;
2486
    }
2487

    
2488
    if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2489
        stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2490
    }
2491

    
2492
    if (is->video_stream < 0 && is->audio_stream < 0) {
2493
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2494
        ret = -1;
2495
        goto fail;
2496
    }
2497

    
2498
    for(;;) {
2499
        if (is->abort_request)
2500
            break;
2501
        if (is->paused != is->last_paused) {
2502
            is->last_paused = is->paused;
2503
            if (is->paused)
2504
                is->read_pause_return= av_read_pause(ic);
2505
            else
2506
                av_read_play(ic);
2507
        }
2508
#if CONFIG_RTSP_DEMUXER
2509
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2510
            /* wait 10 ms to avoid trying to get another packet */
2511
            /* XXX: horrible */
2512
            SDL_Delay(10);
2513
            continue;
2514
        }
2515
#endif
2516
        if (is->seek_req) {
2517
            int64_t seek_target= is->seek_pos;
2518
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2519
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2520
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2521
//      of the seek_pos/seek_rel variables
2522

    
2523
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2524
            if (ret < 0) {
2525
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2526
            }else{
2527
                if (is->audio_stream >= 0) {
2528
                    packet_queue_flush(&is->audioq);
2529
                    packet_queue_put(&is->audioq, &flush_pkt);
2530
                }
2531
                if (is->subtitle_stream >= 0) {
2532
                    packet_queue_flush(&is->subtitleq);
2533
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2534
                }
2535
                if (is->video_stream >= 0) {
2536
                    packet_queue_flush(&is->videoq);
2537
                    packet_queue_put(&is->videoq, &flush_pkt);
2538
                }
2539
            }
2540
            is->seek_req = 0;
2541
            eof= 0;
2542
        }
2543

    
2544
        /* if the queue are full, no need to read more */
2545
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2546
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2547
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2548
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2549
            /* wait 10 ms */
2550
            SDL_Delay(10);
2551
            continue;
2552
        }
2553
        if(eof) {
2554
            if(is->video_stream >= 0){
2555
                av_init_packet(pkt);
2556
                pkt->data=NULL;
2557
                pkt->size=0;
2558
                pkt->stream_index= is->video_stream;
2559
                packet_queue_put(&is->videoq, pkt);
2560
            }
2561
            SDL_Delay(10);
2562
            if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2563
                if(loop!=1 && (!loop || --loop)){
2564
                    stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2565
                }else if(autoexit){
2566
                    ret=AVERROR_EOF;
2567
                    goto fail;
2568
                }
2569
            }
2570
            eof=0;
2571
            continue;
2572
        }
2573
        ret = av_read_frame(ic, pkt);
2574
        if (ret < 0) {
2575
            if (ret == AVERROR_EOF || url_feof(ic->pb))
2576
                eof=1;
2577
            if (ic->pb && ic->pb->error)
2578
                break;
2579
            SDL_Delay(100); /* wait for user event */
2580
            continue;
2581
        }
2582
        /* check if packet is in play range specified by user, then queue, otherwise discard */
2583
        pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2584
                (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2585
                av_q2d(ic->streams[pkt->stream_index]->time_base) -
2586
                (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2587
                <= ((double)duration/1000000);
2588
        if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2589
            packet_queue_put(&is->audioq, pkt);
2590
        } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2591
            packet_queue_put(&is->videoq, pkt);
2592
        } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2593
            packet_queue_put(&is->subtitleq, pkt);
2594
        } else {
2595
            av_free_packet(pkt);
2596
        }
2597
    }
2598
    /* wait until the end */
2599
    while (!is->abort_request) {
2600
        SDL_Delay(100);
2601
    }
2602

    
2603
    ret = 0;
2604
 fail:
2605
    /* disable interrupting */
2606
    global_video_state = NULL;
2607

    
2608
    /* close each stream */
2609
    if (is->audio_stream >= 0)
2610
        stream_component_close(is, is->audio_stream);
2611
    if (is->video_stream >= 0)
2612
        stream_component_close(is, is->video_stream);
2613
    if (is->subtitle_stream >= 0)
2614
        stream_component_close(is, is->subtitle_stream);
2615
    if (is->ic) {
2616
        av_close_input_file(is->ic);
2617
        is->ic = NULL; /* safety */
2618
    }
2619
    avio_set_interrupt_cb(NULL);
2620

    
2621
    if (ret != 0) {
2622
        SDL_Event event;
2623

    
2624
        event.type = FF_QUIT_EVENT;
2625
        event.user.data1 = is;
2626
        SDL_PushEvent(&event);
2627
    }
2628
    return 0;
2629
}
2630

    
2631
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2632
{
2633
    VideoState *is;
2634

    
2635
    is = av_mallocz(sizeof(VideoState));
2636
    if (!is)
2637
        return NULL;
2638
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2639
    is->iformat = iformat;
2640
    is->ytop = 0;
2641
    is->xleft = 0;
2642

    
2643
    /* start video display */
2644
    is->pictq_mutex = SDL_CreateMutex();
2645
    is->pictq_cond = SDL_CreateCond();
2646

    
2647
    is->subpq_mutex = SDL_CreateMutex();
2648
    is->subpq_cond = SDL_CreateCond();
2649

    
2650
    is->av_sync_type = av_sync_type;
2651
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2652
    if (!is->parse_tid) {
2653
        av_free(is);
2654
        return NULL;
2655
    }
2656
    return is;
2657
}
2658

    
2659
static void stream_cycle_channel(VideoState *is, int codec_type)
2660
{
2661
    AVFormatContext *ic = is->ic;
2662
    int start_index, stream_index;
2663
    AVStream *st;
2664

    
2665
    if (codec_type == AVMEDIA_TYPE_VIDEO)
2666
        start_index = is->video_stream;
2667
    else if (codec_type == AVMEDIA_TYPE_AUDIO)
2668
        start_index = is->audio_stream;
2669
    else
2670
        start_index = is->subtitle_stream;
2671
    if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2672
        return;
2673
    stream_index = start_index;
2674
    for(;;) {
2675
        if (++stream_index >= is->ic->nb_streams)
2676
        {
2677
            if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2678
            {
2679
                stream_index = -1;
2680
                goto the_end;
2681
            } else
2682
                stream_index = 0;
2683
        }
2684
        if (stream_index == start_index)
2685
            return;
2686
        st = ic->streams[stream_index];
2687
        if (st->codec->codec_type == codec_type) {
2688
            /* check that parameters are OK */
2689
            switch(codec_type) {
2690
            case AVMEDIA_TYPE_AUDIO:
2691
                if (st->codec->sample_rate != 0 &&
2692
                    st->codec->channels != 0)
2693
                    goto the_end;
2694
                break;
2695
            case AVMEDIA_TYPE_VIDEO:
2696
            case AVMEDIA_TYPE_SUBTITLE:
2697
                goto the_end;
2698
            default:
2699
                break;
2700
            }
2701
        }
2702
    }
2703
 the_end:
2704
    stream_component_close(is, start_index);
2705
    stream_component_open(is, stream_index);
2706
}
2707

    
2708

    
2709
static void toggle_full_screen(void)
2710
{
2711
    is_full_screen = !is_full_screen;
2712
    if (!fs_screen_width) {
2713
        /* use default SDL method */
2714
//        SDL_WM_ToggleFullScreen(screen);
2715
    }
2716
    video_open(cur_stream);
2717
}
2718

    
2719
static void toggle_pause(void)
2720
{
2721
    if (cur_stream)
2722
        stream_pause(cur_stream);
2723
    step = 0;
2724
}
2725

    
2726
static void step_to_next_frame(void)
2727
{
2728
    if (cur_stream) {
2729
        /* if the stream is paused unpause it, then step */
2730
        if (cur_stream->paused)
2731
            stream_pause(cur_stream);
2732
    }
2733
    step = 1;
2734
}
2735

    
2736
static void toggle_audio_display(void)
2737
{
2738
    if (cur_stream) {
2739
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2740
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2741
        fill_rectangle(screen,
2742
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2743
                    bgcolor);
2744
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2745
    }
2746
}
2747

    
2748
/* handle an event sent by the GUI */
2749
static void event_loop(void)
2750
{
2751
    SDL_Event event;
2752
    double incr, pos, frac;
2753

    
2754
    for(;;) {
2755
        double x;
2756
        SDL_WaitEvent(&event);
2757
        switch(event.type) {
2758
        case SDL_KEYDOWN:
2759
            if (exit_on_keydown) {
2760
                do_exit();
2761
                break;
2762
            }
2763
            switch(event.key.keysym.sym) {
2764
            case SDLK_ESCAPE:
2765
            case SDLK_q:
2766
                do_exit();
2767
                break;
2768
            case SDLK_f:
2769
                toggle_full_screen();
2770
                break;
2771
            case SDLK_p:
2772
            case SDLK_SPACE:
2773
                toggle_pause();
2774
                break;
2775
            case SDLK_s: //S: Step to next frame
2776
                step_to_next_frame();
2777
                break;
2778
            case SDLK_a:
2779
                if (cur_stream)
2780
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2781
                break;
2782
            case SDLK_v:
2783
                if (cur_stream)
2784
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2785
                break;
2786
            case SDLK_t:
2787
                if (cur_stream)
2788
                    stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2789
                break;
2790
            case SDLK_w:
2791
                toggle_audio_display();
2792
                break;
2793
            case SDLK_LEFT:
2794
                incr = -10.0;
2795
                goto do_seek;
2796
            case SDLK_RIGHT:
2797
                incr = 10.0;
2798
                goto do_seek;
2799
            case SDLK_UP:
2800
                incr = 60.0;
2801
                goto do_seek;
2802
            case SDLK_DOWN:
2803
                incr = -60.0;
2804
            do_seek:
2805
                if (cur_stream) {
2806
                    if (seek_by_bytes) {
2807
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2808
                            pos= cur_stream->video_current_pos;
2809
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2810
                            pos= cur_stream->audio_pkt.pos;
2811
                        }else
2812
                            pos = avio_tell(cur_stream->ic->pb);
2813
                        if (cur_stream->ic->bit_rate)
2814
                            incr *= cur_stream->ic->bit_rate / 8.0;
2815
                        else
2816
                            incr *= 180000.0;
2817
                        pos += incr;
2818
                        stream_seek(cur_stream, pos, incr, 1);
2819
                    } else {
2820
                        pos = get_master_clock(cur_stream);
2821
                        pos += incr;
2822
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2823
                    }
2824
                }
2825
                break;
2826
            default:
2827
                break;
2828
            }
2829
            break;
2830
        case SDL_MOUSEBUTTONDOWN:
2831
            if (exit_on_mousedown) {
2832
                do_exit();
2833
                break;
2834
            }
2835
        case SDL_MOUSEMOTION:
2836
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2837
                x= event.button.x;
2838
            }else{
2839
                if(event.motion.state != SDL_PRESSED)
2840
                    break;
2841
                x= event.motion.x;
2842
            }
2843
            if (cur_stream) {
2844
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2845
                    uint64_t size=  avio_size(cur_stream->ic->pb);
2846
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2847
                }else{
2848
                    int64_t ts;
2849
                    int ns, hh, mm, ss;
2850
                    int tns, thh, tmm, tss;
2851
                    tns = cur_stream->ic->duration/1000000LL;
2852
                    thh = tns/3600;
2853
                    tmm = (tns%3600)/60;
2854
                    tss = (tns%60);
2855
                    frac = x/cur_stream->width;
2856
                    ns = frac*tns;
2857
                    hh = ns/3600;
2858
                    mm = (ns%3600)/60;
2859
                    ss = (ns%60);
2860
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2861
                            hh, mm, ss, thh, tmm, tss);
2862
                    ts = frac*cur_stream->ic->duration;
2863
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2864
                        ts += cur_stream->ic->start_time;
2865
                    stream_seek(cur_stream, ts, 0, 0);
2866
                }
2867
            }
2868
            break;
2869
        case SDL_VIDEORESIZE:
2870
            if (cur_stream) {
2871
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2872
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2873
                screen_width = cur_stream->width = event.resize.w;
2874
                screen_height= cur_stream->height= event.resize.h;
2875
            }
2876
            break;
2877
        case SDL_QUIT:
2878
        case FF_QUIT_EVENT:
2879
            do_exit();
2880
            break;
2881
        case FF_ALLOC_EVENT:
2882
            video_open(event.user.data1);
2883
            alloc_picture(event.user.data1);
2884
            break;
2885
        case FF_REFRESH_EVENT:
2886
            video_refresh_timer(event.user.data1);
2887
            cur_stream->refresh=0;
2888
            break;
2889
        default:
2890
            break;
2891
        }
2892
    }
2893
}
2894

    
2895
static void opt_frame_size(const char *arg)
2896
{
2897
    if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2898
        fprintf(stderr, "Incorrect frame size\n");
2899
        exit(1);
2900
    }
2901
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2902
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2903
        exit(1);
2904
    }
2905
}
2906

    
2907
static int opt_width(const char *opt, const char *arg)
2908
{
2909
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2910
    return 0;
2911
}
2912

    
2913
static int opt_height(const char *opt, const char *arg)
2914
{
2915
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2916
    return 0;
2917
}
2918

    
2919
static void opt_format(const char *arg)
2920
{
2921
    file_iformat = av_find_input_format(arg);
2922
    if (!file_iformat) {
2923
        fprintf(stderr, "Unknown input format: %s\n", arg);
2924
        exit(1);
2925
    }
2926
}
2927

    
2928
static void opt_frame_pix_fmt(const char *arg)
2929
{
2930
    frame_pix_fmt = av_get_pix_fmt(arg);
2931
}
2932

    
2933
static int opt_sync(const char *opt, const char *arg)
2934
{
2935
    if (!strcmp(arg, "audio"))
2936
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2937
    else if (!strcmp(arg, "video"))
2938
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2939
    else if (!strcmp(arg, "ext"))
2940
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2941
    else {
2942
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2943
        exit(1);
2944
    }
2945
    return 0;
2946
}
2947

    
2948
static int opt_seek(const char *opt, const char *arg)
2949
{
2950
    start_time = parse_time_or_die(opt, arg, 1);
2951
    return 0;
2952
}
2953

    
2954
static int opt_duration(const char *opt, const char *arg)
2955
{
2956
    duration = parse_time_or_die(opt, arg, 1);
2957
    return 0;
2958
}
2959

    
2960
static int opt_debug(const char *opt, const char *arg)
2961
{
2962
    av_log_set_level(99);
2963
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2964
    return 0;
2965
}
2966

    
2967
static int opt_vismv(const char *opt, const char *arg)
2968
{
2969
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2970
    return 0;
2971
}
2972

    
2973
static int opt_thread_count(const char *opt, const char *arg)
2974
{
2975
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2976
#if !HAVE_THREADS
2977
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2978
#endif
2979
    return 0;
2980
}
2981

    
2982
static const OptionDef options[] = {
2983
#include "cmdutils_common_opts.h"
2984
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2985
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2986
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2987
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2988
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2989
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2990
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2991
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2992
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2993
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2994
    { "t", HAS_ARG | OPT_FUNC2, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2995
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2996
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2997
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2998
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2999
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
3000
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
3001
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
3002
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
3003
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
3004
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
3005
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3006
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
3007
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
3008
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
3009
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
3010
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
3011
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
3012
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
3013
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
3014
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
3015
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
3016
    { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
3017
    { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
3018
    { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
3019
    { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
3020
    { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
3021
#if CONFIG_AVFILTER
3022
    { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
3023
#endif
3024
    { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
3025
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
3026
    { "i", OPT_DUMMY, {NULL}, "ffmpeg compatibility dummy option", ""},
3027
    { NULL, },
3028
};
3029

    
3030
static void show_usage(void)
3031
{
3032
    printf("Simple media player\n");
3033
    printf("usage: ffplay [options] input_file\n");
3034
    printf("\n");
3035
}
3036

    
3037
static void show_help(void)
3038
{
3039
    av_log_set_callback(log_callback_help);
3040
    show_usage();
3041
    show_help_options(options, "Main options:\n",
3042
                      OPT_EXPERT, 0);
3043
    show_help_options(options, "\nAdvanced options:\n",
3044
                      OPT_EXPERT, OPT_EXPERT);
3045
    printf("\n");
3046
    av_opt_show2(avcodec_opts[0], NULL,
3047
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3048
    printf("\n");
3049
    av_opt_show2(avformat_opts, NULL,
3050
                 AV_OPT_FLAG_DECODING_PARAM, 0);
3051
#if !CONFIG_AVFILTER
3052
    printf("\n");
3053
    av_opt_show2(sws_opts, NULL,
3054
                 AV_OPT_FLAG_ENCODING_PARAM, 0);
3055
#endif
3056
    printf("\nWhile playing:\n"
3057
           "q, ESC              quit\n"
3058
           "f                   toggle full screen\n"
3059
           "p, SPC              pause\n"
3060
           "a                   cycle audio channel\n"
3061
           "v                   cycle video channel\n"
3062
           "t                   cycle subtitle channel\n"
3063
           "w                   show audio waves\n"
3064
           "s                   activate frame-step mode\n"
3065
           "left/right          seek backward/forward 10 seconds\n"
3066
           "down/up             seek backward/forward 1 minute\n"
3067
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
3068
           );
3069
}
3070

    
3071
static void opt_input_file(const char *filename)
3072
{
3073
    if (input_filename) {
3074
        fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3075
                filename, input_filename);
3076
        exit(1);
3077
    }
3078
    if (!strcmp(filename, "-"))
3079
        filename = "pipe:";
3080
    input_filename = filename;
3081
}
3082

    
3083
/* Called from the main */
3084
int main(int argc, char **argv)
3085
{
3086
    int flags;
3087

    
3088
    av_log_set_flags(AV_LOG_SKIP_REPEATED);
3089

    
3090
    /* register all codecs, demux and protocols */
3091
    avcodec_register_all();
3092
#if CONFIG_AVDEVICE
3093
    avdevice_register_all();
3094
#endif
3095
#if CONFIG_AVFILTER
3096
    avfilter_register_all();
3097
#endif
3098
    av_register_all();
3099

    
3100
    init_opts();
3101

    
3102
    show_banner();
3103

    
3104
    parse_options(argc, argv, options, opt_input_file);
3105

    
3106
    if (!input_filename) {
3107
        show_usage();
3108
        fprintf(stderr, "An input file must be specified\n");
3109
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3110
        exit(1);
3111
    }
3112

    
3113
    if (display_disable) {
3114
        video_disable = 1;
3115
    }
3116
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3117
#if !defined(__MINGW32__) && !defined(__APPLE__)
3118
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3119
#endif
3120
    if (SDL_Init (flags)) {
3121
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3122
        exit(1);
3123
    }
3124

    
3125
    if (!display_disable) {
3126
#if HAVE_SDL_VIDEO_SIZE
3127
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3128
        fs_screen_width = vi->current_w;
3129
        fs_screen_height = vi->current_h;
3130
#endif
3131
    }
3132

    
3133
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3134
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3135
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3136

    
3137
    av_init_packet(&flush_pkt);
3138
    flush_pkt.data= "FLUSH";
3139

    
3140
    cur_stream = stream_open(input_filename, file_iformat);
3141

    
3142
    event_loop();
3143

    
3144
    /* never returns */
3145

    
3146
    return 0;
3147
}