Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 1e1a0b18

History | View | Annotate | Download (79 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "libavutil/avstring.h"
25
#include "libavformat/avformat.h"
26
#include "libavdevice/avdevice.h"
27
#include "libswscale/swscale.h"
28
#include "libavcodec/audioconvert.h"
29
#include "libavcodec/colorspace.h"
30
#include "libavcodec/opt.h"
31

    
32
#include "cmdutils.h"
33

    
34
#include <SDL.h>
35
#include <SDL_thread.h>
36

    
37
#ifdef __MINGW32__
38
#undef main /* We don't want SDL to override our main() */
39
#endif
40

    
41
#undef exit
42

    
43
const char program_name[] = "FFplay";
44
const int program_birth_year = 2003;
45

    
46
//#define DEBUG_SYNC
47

    
48
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51

    
52
/* SDL audio buffer size, in samples. Should be small to have precise
53
   A/V sync as SDL does not have hardware buffer fullness info. */
54
#define SDL_AUDIO_BUFFER_SIZE 1024
55

    
56
/* no AV sync correction is done if below the AV sync threshold */
57
#define AV_SYNC_THRESHOLD 0.01
58
/* no AV correction is done if too big error */
59
#define AV_NOSYNC_THRESHOLD 10.0
60

    
61
/* maximum audio speed change to get correct sync */
62
#define SAMPLE_CORRECTION_PERCENT_MAX 10
63

    
64
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65
#define AUDIO_DIFF_AVG_NB   20
66

    
67
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68
#define SAMPLE_ARRAY_SIZE (2*65536)
69

    
70
static int sws_flags = SWS_BICUBIC;
71

    
72
typedef struct PacketQueue {
73
    AVPacketList *first_pkt, *last_pkt;
74
    int nb_packets;
75
    int size;
76
    int abort_request;
77
    SDL_mutex *mutex;
78
    SDL_cond *cond;
79
} PacketQueue;
80

    
81
#define VIDEO_PICTURE_QUEUE_SIZE 1
82
#define SUBPICTURE_QUEUE_SIZE 4
83

    
84
typedef struct VideoPicture {
85
    double pts;                                  ///<presentation time stamp for this picture
86
    SDL_Overlay *bmp;
87
    int width, height; /* source height & width */
88
    int allocated;
89
} VideoPicture;
90

    
91
typedef struct SubPicture {
92
    double pts; /* presentation time stamp for this picture */
93
    AVSubtitle sub;
94
} SubPicture;
95

    
96
enum {
97
    AV_SYNC_AUDIO_MASTER, /* default choice */
98
    AV_SYNC_VIDEO_MASTER,
99
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100
};
101

    
102
typedef struct VideoState {
103
    SDL_Thread *parse_tid;
104
    SDL_Thread *video_tid;
105
    AVInputFormat *iformat;
106
    int no_background;
107
    int abort_request;
108
    int paused;
109
    int last_paused;
110
    int seek_req;
111
    int seek_flags;
112
    int64_t seek_pos;
113
    int64_t seek_rel;
114
    AVFormatContext *ic;
115
    int dtg_active_format;
116

    
117
    int audio_stream;
118

    
119
    int av_sync_type;
120
    double external_clock; /* external clock base */
121
    int64_t external_clock_time;
122

    
123
    double audio_clock;
124
    double audio_diff_cum; /* used for AV difference average computation */
125
    double audio_diff_avg_coef;
126
    double audio_diff_threshold;
127
    int audio_diff_avg_count;
128
    AVStream *audio_st;
129
    PacketQueue audioq;
130
    int audio_hw_buf_size;
131
    /* samples output by the codec. we reserve more space for avsync
132
       compensation */
133
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135
    uint8_t *audio_buf;
136
    unsigned int audio_buf_size; /* in bytes */
137
    int audio_buf_index; /* in bytes */
138
    AVPacket audio_pkt_temp;
139
    AVPacket audio_pkt;
140
    enum SampleFormat audio_src_fmt;
141
    AVAudioConvert *reformat_ctx;
142

    
143
    int show_audio; /* if true, display audio samples */
144
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
145
    int sample_array_index;
146
    int last_i_start;
147

    
148
    SDL_Thread *subtitle_tid;
149
    int subtitle_stream;
150
    int subtitle_stream_changed;
151
    AVStream *subtitle_st;
152
    PacketQueue subtitleq;
153
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154
    int subpq_size, subpq_rindex, subpq_windex;
155
    SDL_mutex *subpq_mutex;
156
    SDL_cond *subpq_cond;
157

    
158
    double frame_timer;
159
    double frame_last_pts;
160
    double frame_last_delay;
161
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
162
    int video_stream;
163
    AVStream *video_st;
164
    PacketQueue videoq;
165
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
166
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168
    int pictq_size, pictq_rindex, pictq_windex;
169
    SDL_mutex *pictq_mutex;
170
    SDL_cond *pictq_cond;
171
    struct SwsContext *img_convert_ctx;
172

    
173
    //    QETimer *video_timer;
174
    char filename[1024];
175
    int width, height, xleft, ytop;
176
} VideoState;
177

    
178
static void show_help(void);
179
static int audio_write_get_buf_size(VideoState *is);
180

    
181
/* options specified by the user */
182
static AVInputFormat *file_iformat;
183
static const char *input_filename;
184
static int fs_screen_width;
185
static int fs_screen_height;
186
static int screen_width = 0;
187
static int screen_height = 0;
188
static int frame_width = 0;
189
static int frame_height = 0;
190
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
191
static int audio_disable;
192
static int video_disable;
193
static int wanted_audio_stream= 0;
194
static int wanted_video_stream= 0;
195
static int wanted_subtitle_stream= -1;
196
static int seek_by_bytes;
197
static int display_disable;
198
static int show_status = 1;
199
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
200
static int64_t start_time = AV_NOPTS_VALUE;
201
static int debug = 0;
202
static int debug_mv = 0;
203
static int step = 0;
204
static int thread_count = 1;
205
static int workaround_bugs = 1;
206
static int fast = 0;
207
static int genpts = 0;
208
static int lowres = 0;
209
static int idct = FF_IDCT_AUTO;
210
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
211
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
212
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
213
static int error_recognition = FF_ER_CAREFUL;
214
static int error_concealment = 3;
215
static int decoder_reorder_pts= 0;
216

    
217
/* current context */
218
static int is_full_screen;
219
static VideoState *cur_stream;
220
static int64_t audio_callback_time;
221

    
222
static AVPacket flush_pkt;
223

    
224
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
225
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
226
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
227

    
228
static SDL_Surface *screen;
229

    
230
/* packet queue handling */
231
static void packet_queue_init(PacketQueue *q)
232
{
233
    memset(q, 0, sizeof(PacketQueue));
234
    q->mutex = SDL_CreateMutex();
235
    q->cond = SDL_CreateCond();
236
}
237

    
238
static void packet_queue_flush(PacketQueue *q)
239
{
240
    AVPacketList *pkt, *pkt1;
241

    
242
    SDL_LockMutex(q->mutex);
243
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
244
        pkt1 = pkt->next;
245
        av_free_packet(&pkt->pkt);
246
        av_freep(&pkt);
247
    }
248
    q->last_pkt = NULL;
249
    q->first_pkt = NULL;
250
    q->nb_packets = 0;
251
    q->size = 0;
252
    SDL_UnlockMutex(q->mutex);
253
}
254

    
255
static void packet_queue_end(PacketQueue *q)
256
{
257
    packet_queue_flush(q);
258
    SDL_DestroyMutex(q->mutex);
259
    SDL_DestroyCond(q->cond);
260
}
261

    
262
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
263
{
264
    AVPacketList *pkt1;
265

    
266
    /* duplicate the packet */
267
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
268
        return -1;
269

    
270
    pkt1 = av_malloc(sizeof(AVPacketList));
271
    if (!pkt1)
272
        return -1;
273
    pkt1->pkt = *pkt;
274
    pkt1->next = NULL;
275

    
276

    
277
    SDL_LockMutex(q->mutex);
278

    
279
    if (!q->last_pkt)
280

    
281
        q->first_pkt = pkt1;
282
    else
283
        q->last_pkt->next = pkt1;
284
    q->last_pkt = pkt1;
285
    q->nb_packets++;
286
    q->size += pkt1->pkt.size + sizeof(*pkt1);
287
    /* XXX: should duplicate packet data in DV case */
288
    SDL_CondSignal(q->cond);
289

    
290
    SDL_UnlockMutex(q->mutex);
291
    return 0;
292
}
293

    
294
static void packet_queue_abort(PacketQueue *q)
295
{
296
    SDL_LockMutex(q->mutex);
297

    
298
    q->abort_request = 1;
299

    
300
    SDL_CondSignal(q->cond);
301

    
302
    SDL_UnlockMutex(q->mutex);
303
}
304

    
305
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
306
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
307
{
308
    AVPacketList *pkt1;
309
    int ret;
310

    
311
    SDL_LockMutex(q->mutex);
312

    
313
    for(;;) {
314
        if (q->abort_request) {
315
            ret = -1;
316
            break;
317
        }
318

    
319
        pkt1 = q->first_pkt;
320
        if (pkt1) {
321
            q->first_pkt = pkt1->next;
322
            if (!q->first_pkt)
323
                q->last_pkt = NULL;
324
            q->nb_packets--;
325
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
326
            *pkt = pkt1->pkt;
327
            av_free(pkt1);
328
            ret = 1;
329
            break;
330
        } else if (!block) {
331
            ret = 0;
332
            break;
333
        } else {
334
            SDL_CondWait(q->cond, q->mutex);
335
        }
336
    }
337
    SDL_UnlockMutex(q->mutex);
338
    return ret;
339
}
340

    
341
static inline void fill_rectangle(SDL_Surface *screen,
342
                                  int x, int y, int w, int h, int color)
343
{
344
    SDL_Rect rect;
345
    rect.x = x;
346
    rect.y = y;
347
    rect.w = w;
348
    rect.h = h;
349
    SDL_FillRect(screen, &rect, color);
350
}
351

    
352
#if 0
353
/* draw only the border of a rectangle */
354
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
355
{
356
    int w1, w2, h1, h2;
357

358
    /* fill the background */
359
    w1 = x;
360
    if (w1 < 0)
361
        w1 = 0;
362
    w2 = s->width - (x + w);
363
    if (w2 < 0)
364
        w2 = 0;
365
    h1 = y;
366
    if (h1 < 0)
367
        h1 = 0;
368
    h2 = s->height - (y + h);
369
    if (h2 < 0)
370
        h2 = 0;
371
    fill_rectangle(screen,
372
                   s->xleft, s->ytop,
373
                   w1, s->height,
374
                   color);
375
    fill_rectangle(screen,
376
                   s->xleft + s->width - w2, s->ytop,
377
                   w2, s->height,
378
                   color);
379
    fill_rectangle(screen,
380
                   s->xleft + w1, s->ytop,
381
                   s->width - w1 - w2, h1,
382
                   color);
383
    fill_rectangle(screen,
384
                   s->xleft + w1, s->ytop + s->height - h2,
385
                   s->width - w1 - w2, h2,
386
                   color);
387
}
388
#endif
389

    
390
#define ALPHA_BLEND(a, oldp, newp, s)\
391
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
392

    
393
#define RGBA_IN(r, g, b, a, s)\
394
{\
395
    unsigned int v = ((const uint32_t *)(s))[0];\
396
    a = (v >> 24) & 0xff;\
397
    r = (v >> 16) & 0xff;\
398
    g = (v >> 8) & 0xff;\
399
    b = v & 0xff;\
400
}
401

    
402
#define YUVA_IN(y, u, v, a, s, pal)\
403
{\
404
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
405
    a = (val >> 24) & 0xff;\
406
    y = (val >> 16) & 0xff;\
407
    u = (val >> 8) & 0xff;\
408
    v = val & 0xff;\
409
}
410

    
411
#define YUVA_OUT(d, y, u, v, a)\
412
{\
413
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
414
}
415

    
416

    
417
#define BPP 1
418

    
419
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
420
{
421
    int wrap, wrap3, width2, skip2;
422
    int y, u, v, a, u1, v1, a1, w, h;
423
    uint8_t *lum, *cb, *cr;
424
    const uint8_t *p;
425
    const uint32_t *pal;
426
    int dstx, dsty, dstw, dsth;
427

    
428
    dstw = av_clip(rect->w, 0, imgw);
429
    dsth = av_clip(rect->h, 0, imgh);
430
    dstx = av_clip(rect->x, 0, imgw - dstw);
431
    dsty = av_clip(rect->y, 0, imgh - dsth);
432
    lum = dst->data[0] + dsty * dst->linesize[0];
433
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
434
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
435

    
436
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
437
    skip2 = dstx >> 1;
438
    wrap = dst->linesize[0];
439
    wrap3 = rect->pict.linesize[0];
440
    p = rect->pict.data[0];
441
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
442

    
443
    if (dsty & 1) {
444
        lum += dstx;
445
        cb += skip2;
446
        cr += skip2;
447

    
448
        if (dstx & 1) {
449
            YUVA_IN(y, u, v, a, p, pal);
450
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
451
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
452
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
453
            cb++;
454
            cr++;
455
            lum++;
456
            p += BPP;
457
        }
458
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
459
            YUVA_IN(y, u, v, a, p, pal);
460
            u1 = u;
461
            v1 = v;
462
            a1 = a;
463
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
464

    
465
            YUVA_IN(y, u, v, a, p + BPP, pal);
466
            u1 += u;
467
            v1 += v;
468
            a1 += a;
469
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
470
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
471
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
472
            cb++;
473
            cr++;
474
            p += 2 * BPP;
475
            lum += 2;
476
        }
477
        if (w) {
478
            YUVA_IN(y, u, v, a, p, pal);
479
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
481
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
482
            p++;
483
            lum++;
484
        }
485
        p += wrap3 - dstw * BPP;
486
        lum += wrap - dstw - dstx;
487
        cb += dst->linesize[1] - width2 - skip2;
488
        cr += dst->linesize[2] - width2 - skip2;
489
    }
490
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
491
        lum += dstx;
492
        cb += skip2;
493
        cr += skip2;
494

    
495
        if (dstx & 1) {
496
            YUVA_IN(y, u, v, a, p, pal);
497
            u1 = u;
498
            v1 = v;
499
            a1 = a;
500
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
501
            p += wrap3;
502
            lum += wrap;
503
            YUVA_IN(y, u, v, a, p, pal);
504
            u1 += u;
505
            v1 += v;
506
            a1 += a;
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
509
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
510
            cb++;
511
            cr++;
512
            p += -wrap3 + BPP;
513
            lum += -wrap + 1;
514
        }
515
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 = u;
518
            v1 = v;
519
            a1 = a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521

    
522
            YUVA_IN(y, u, v, a, p + BPP, pal);
523
            u1 += u;
524
            v1 += v;
525
            a1 += a;
526
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
527
            p += wrap3;
528
            lum += wrap;
529

    
530
            YUVA_IN(y, u, v, a, p, pal);
531
            u1 += u;
532
            v1 += v;
533
            a1 += a;
534
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535

    
536
            YUVA_IN(y, u, v, a, p + BPP, pal);
537
            u1 += u;
538
            v1 += v;
539
            a1 += a;
540
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541

    
542
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
543
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
544

    
545
            cb++;
546
            cr++;
547
            p += -wrap3 + 2 * BPP;
548
            lum += -wrap + 2;
549
        }
550
        if (w) {
551
            YUVA_IN(y, u, v, a, p, pal);
552
            u1 = u;
553
            v1 = v;
554
            a1 = a;
555
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
556
            p += wrap3;
557
            lum += wrap;
558
            YUVA_IN(y, u, v, a, p, pal);
559
            u1 += u;
560
            v1 += v;
561
            a1 += a;
562
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
564
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
565
            cb++;
566
            cr++;
567
            p += -wrap3 + BPP;
568
            lum += -wrap + 1;
569
        }
570
        p += wrap3 + (wrap3 - dstw * BPP);
571
        lum += wrap + (wrap - dstw - dstx);
572
        cb += dst->linesize[1] - width2 - skip2;
573
        cr += dst->linesize[2] - width2 - skip2;
574
    }
575
    /* handle odd height */
576
    if (h) {
577
        lum += dstx;
578
        cb += skip2;
579
        cr += skip2;
580

    
581
        if (dstx & 1) {
582
            YUVA_IN(y, u, v, a, p, pal);
583
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
584
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
585
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
586
            cb++;
587
            cr++;
588
            lum++;
589
            p += BPP;
590
        }
591
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
592
            YUVA_IN(y, u, v, a, p, pal);
593
            u1 = u;
594
            v1 = v;
595
            a1 = a;
596
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597

    
598
            YUVA_IN(y, u, v, a, p + BPP, pal);
599
            u1 += u;
600
            v1 += v;
601
            a1 += a;
602
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
603
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
604
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
605
            cb++;
606
            cr++;
607
            p += 2 * BPP;
608
            lum += 2;
609
        }
610
        if (w) {
611
            YUVA_IN(y, u, v, a, p, pal);
612
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
613
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
614
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
615
        }
616
    }
617
}
618

    
619
static void free_subpicture(SubPicture *sp)
620
{
621
    int i;
622

    
623
    for (i = 0; i < sp->sub.num_rects; i++)
624
    {
625
        av_freep(&sp->sub.rects[i]->pict.data[0]);
626
        av_freep(&sp->sub.rects[i]->pict.data[1]);
627
        av_freep(&sp->sub.rects[i]);
628
    }
629

    
630
    av_free(sp->sub.rects);
631

    
632
    memset(&sp->sub, 0, sizeof(AVSubtitle));
633
}
634

    
635
static void video_image_display(VideoState *is)
636
{
637
    VideoPicture *vp;
638
    SubPicture *sp;
639
    AVPicture pict;
640
    float aspect_ratio;
641
    int width, height, x, y;
642
    SDL_Rect rect;
643
    int i;
644

    
645
    vp = &is->pictq[is->pictq_rindex];
646
    if (vp->bmp) {
647
        /* XXX: use variable in the frame */
648
        if (is->video_st->sample_aspect_ratio.num)
649
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
650
        else if (is->video_st->codec->sample_aspect_ratio.num)
651
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
652
        else
653
            aspect_ratio = 0;
654
        if (aspect_ratio <= 0.0)
655
            aspect_ratio = 1.0;
656
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
657
        /* if an active format is indicated, then it overrides the
658
           mpeg format */
659
#if 0
660
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
661
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
662
            printf("dtg_active_format=%d\n", is->dtg_active_format);
663
        }
664
#endif
665
#if 0
666
        switch(is->video_st->codec->dtg_active_format) {
667
        case FF_DTG_AFD_SAME:
668
        default:
669
            /* nothing to do */
670
            break;
671
        case FF_DTG_AFD_4_3:
672
            aspect_ratio = 4.0 / 3.0;
673
            break;
674
        case FF_DTG_AFD_16_9:
675
            aspect_ratio = 16.0 / 9.0;
676
            break;
677
        case FF_DTG_AFD_14_9:
678
            aspect_ratio = 14.0 / 9.0;
679
            break;
680
        case FF_DTG_AFD_4_3_SP_14_9:
681
            aspect_ratio = 14.0 / 9.0;
682
            break;
683
        case FF_DTG_AFD_16_9_SP_14_9:
684
            aspect_ratio = 14.0 / 9.0;
685
            break;
686
        case FF_DTG_AFD_SP_4_3:
687
            aspect_ratio = 4.0 / 3.0;
688
            break;
689
        }
690
#endif
691

    
692
        if (is->subtitle_st)
693
        {
694
            if (is->subpq_size > 0)
695
            {
696
                sp = &is->subpq[is->subpq_rindex];
697

    
698
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
699
                {
700
                    SDL_LockYUVOverlay (vp->bmp);
701

    
702
                    pict.data[0] = vp->bmp->pixels[0];
703
                    pict.data[1] = vp->bmp->pixels[2];
704
                    pict.data[2] = vp->bmp->pixels[1];
705

    
706
                    pict.linesize[0] = vp->bmp->pitches[0];
707
                    pict.linesize[1] = vp->bmp->pitches[2];
708
                    pict.linesize[2] = vp->bmp->pitches[1];
709

    
710
                    for (i = 0; i < sp->sub.num_rects; i++)
711
                        blend_subrect(&pict, sp->sub.rects[i],
712
                                      vp->bmp->w, vp->bmp->h);
713

    
714
                    SDL_UnlockYUVOverlay (vp->bmp);
715
                }
716
            }
717
        }
718

    
719

    
720
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
721
        height = is->height;
722
        width = ((int)rint(height * aspect_ratio)) & ~1;
723
        if (width > is->width) {
724
            width = is->width;
725
            height = ((int)rint(width / aspect_ratio)) & ~1;
726
        }
727
        x = (is->width - width) / 2;
728
        y = (is->height - height) / 2;
729
        if (!is->no_background) {
730
            /* fill the background */
731
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
732
        } else {
733
            is->no_background = 0;
734
        }
735
        rect.x = is->xleft + x;
736
        rect.y = is->ytop  + y;
737
        rect.w = width;
738
        rect.h = height;
739
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
740
    } else {
741
#if 0
742
        fill_rectangle(screen,
743
                       is->xleft, is->ytop, is->width, is->height,
744
                       QERGB(0x00, 0x00, 0x00));
745
#endif
746
    }
747
}
748

    
749
static inline int compute_mod(int a, int b)
750
{
751
    a = a % b;
752
    if (a >= 0)
753
        return a;
754
    else
755
        return a + b;
756
}
757

    
758
static void video_audio_display(VideoState *s)
759
{
760
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
761
    int ch, channels, h, h2, bgcolor, fgcolor;
762
    int16_t time_diff;
763

    
764
    /* compute display index : center on currently output samples */
765
    channels = s->audio_st->codec->channels;
766
    nb_display_channels = channels;
767
    if (!s->paused) {
768
        n = 2 * channels;
769
        delay = audio_write_get_buf_size(s);
770
        delay /= n;
771

    
772
        /* to be more precise, we take into account the time spent since
773
           the last buffer computation */
774
        if (audio_callback_time) {
775
            time_diff = av_gettime() - audio_callback_time;
776
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
777
        }
778

    
779
        delay -= s->width / 2;
780
        if (delay < s->width)
781
            delay = s->width;
782

    
783
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
784

    
785
        h= INT_MIN;
786
        for(i=0; i<1000; i+=channels){
787
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
788
            int a= s->sample_array[idx];
789
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
790
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
791
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
792
            int score= a-d;
793
            if(h<score && (b^c)<0){
794
                h= score;
795
                i_start= idx;
796
            }
797
        }
798

    
799
        s->last_i_start = i_start;
800
    } else {
801
        i_start = s->last_i_start;
802
    }
803

    
804
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
805
    fill_rectangle(screen,
806
                   s->xleft, s->ytop, s->width, s->height,
807
                   bgcolor);
808

    
809
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
810

    
811
    /* total height for one channel */
812
    h = s->height / nb_display_channels;
813
    /* graph height / 2 */
814
    h2 = (h * 9) / 20;
815
    for(ch = 0;ch < nb_display_channels; ch++) {
816
        i = i_start + ch;
817
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
818
        for(x = 0; x < s->width; x++) {
819
            y = (s->sample_array[i] * h2) >> 15;
820
            if (y < 0) {
821
                y = -y;
822
                ys = y1 - y;
823
            } else {
824
                ys = y1;
825
            }
826
            fill_rectangle(screen,
827
                           s->xleft + x, ys, 1, y,
828
                           fgcolor);
829
            i += channels;
830
            if (i >= SAMPLE_ARRAY_SIZE)
831
                i -= SAMPLE_ARRAY_SIZE;
832
        }
833
    }
834

    
835
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
836

    
837
    for(ch = 1;ch < nb_display_channels; ch++) {
838
        y = s->ytop + ch * h;
839
        fill_rectangle(screen,
840
                       s->xleft, y, s->width, 1,
841
                       fgcolor);
842
    }
843
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
844
}
845

    
846
static int video_open(VideoState *is){
847
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
848
    int w,h;
849

    
850
    if(is_full_screen) flags |= SDL_FULLSCREEN;
851
    else               flags |= SDL_RESIZABLE;
852

    
853
    if (is_full_screen && fs_screen_width) {
854
        w = fs_screen_width;
855
        h = fs_screen_height;
856
    } else if(!is_full_screen && screen_width){
857
        w = screen_width;
858
        h = screen_height;
859
    }else if (is->video_st && is->video_st->codec->width){
860
        w = is->video_st->codec->width;
861
        h = is->video_st->codec->height;
862
    } else {
863
        w = 640;
864
        h = 480;
865
    }
866
#ifndef __APPLE__
867
    screen = SDL_SetVideoMode(w, h, 0, flags);
868
#else
869
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
870
    screen = SDL_SetVideoMode(w, h, 24, flags);
871
#endif
872
    if (!screen) {
873
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
874
        return -1;
875
    }
876
    SDL_WM_SetCaption("FFplay", "FFplay");
877

    
878
    is->width = screen->w;
879
    is->height = screen->h;
880

    
881
    return 0;
882
}
883

    
884
/* display the current picture, if any */
885
static void video_display(VideoState *is)
886
{
887
    if(!screen)
888
        video_open(cur_stream);
889
    if (is->audio_st && is->show_audio)
890
        video_audio_display(is);
891
    else if (is->video_st)
892
        video_image_display(is);
893
}
894

    
895
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
896
{
897
    SDL_Event event;
898
    event.type = FF_REFRESH_EVENT;
899
    event.user.data1 = opaque;
900
    SDL_PushEvent(&event);
901
    return 0; /* 0 means stop timer */
902
}
903

    
904
/* schedule a video refresh in 'delay' ms */
905
static void schedule_refresh(VideoState *is, int delay)
906
{
907
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
908
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
909
}
910

    
911
/* get the current audio clock value */
912
static double get_audio_clock(VideoState *is)
913
{
914
    double pts;
915
    int hw_buf_size, bytes_per_sec;
916
    pts = is->audio_clock;
917
    hw_buf_size = audio_write_get_buf_size(is);
918
    bytes_per_sec = 0;
919
    if (is->audio_st) {
920
        bytes_per_sec = is->audio_st->codec->sample_rate *
921
            2 * is->audio_st->codec->channels;
922
    }
923
    if (bytes_per_sec)
924
        pts -= (double)hw_buf_size / bytes_per_sec;
925
    return pts;
926
}
927

    
928
/* get the current video clock value */
929
static double get_video_clock(VideoState *is)
930
{
931
    double delta;
932
    if (is->paused) {
933
        delta = 0;
934
    } else {
935
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
936
    }
937
    return is->video_current_pts + delta;
938
}
939

    
940
/* get the current external clock value */
941
static double get_external_clock(VideoState *is)
942
{
943
    int64_t ti;
944
    ti = av_gettime();
945
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
946
}
947

    
948
/* get the current master clock value */
949
static double get_master_clock(VideoState *is)
950
{
951
    double val;
952

    
953
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
954
        if (is->video_st)
955
            val = get_video_clock(is);
956
        else
957
            val = get_audio_clock(is);
958
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
959
        if (is->audio_st)
960
            val = get_audio_clock(is);
961
        else
962
            val = get_video_clock(is);
963
    } else {
964
        val = get_external_clock(is);
965
    }
966
    return val;
967
}
968

    
969
/* seek in the stream */
970
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
971
{
972
    if (!is->seek_req) {
973
        is->seek_pos = pos;
974
        is->seek_rel = rel;
975
        if (seek_by_bytes)
976
            is->seek_flags |= AVSEEK_FLAG_BYTE;
977
        is->seek_req = 1;
978
    }
979
}
980

    
981
/* pause or resume the video */
982
static void stream_pause(VideoState *is)
983
{
984
    is->paused = !is->paused;
985
    if (!is->paused) {
986
        is->video_current_pts = get_video_clock(is);
987
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
988
    }
989
}
990

    
991
static double compute_frame_delay(double frame_current_pts, VideoState *is)
992
{
993
    double actual_delay, delay, sync_threshold, ref_clock, diff;
994

    
995
    /* compute nominal delay */
996
    delay = frame_current_pts - is->frame_last_pts;
997
    if (delay <= 0 || delay >= 10.0) {
998
        /* if incorrect delay, use previous one */
999
        delay = is->frame_last_delay;
1000
    } else {
1001
        is->frame_last_delay = delay;
1002
    }
1003
    is->frame_last_pts = frame_current_pts;
1004

    
1005
    /* update delay to follow master synchronisation source */
1006
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1007
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1008
        /* if video is slave, we try to correct big delays by
1009
           duplicating or deleting a frame */
1010
        ref_clock = get_master_clock(is);
1011
        diff = frame_current_pts - ref_clock;
1012

    
1013
        /* skip or repeat frame. We take into account the
1014
           delay to compute the threshold. I still don't know
1015
           if it is the best guess */
1016
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1017
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1018
            if (diff <= -sync_threshold)
1019
                delay = 0;
1020
            else if (diff >= sync_threshold)
1021
                delay = 2 * delay;
1022
        }
1023
    }
1024

    
1025
    is->frame_timer += delay;
1026
    /* compute the REAL delay (we need to do that to avoid
1027
       long term errors */
1028
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1029
    if (actual_delay < 0.010) {
1030
        /* XXX: should skip picture */
1031
        actual_delay = 0.010;
1032
    }
1033

    
1034
#if defined(DEBUG_SYNC)
1035
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1036
            delay, actual_delay, frame_current_pts, -diff);
1037
#endif
1038

    
1039
    return actual_delay;
1040
}
1041

    
1042
/* called to display each frame */
1043
static void video_refresh_timer(void *opaque)
1044
{
1045
    VideoState *is = opaque;
1046
    VideoPicture *vp;
1047

    
1048
    SubPicture *sp, *sp2;
1049

    
1050
    if (is->video_st) {
1051
        if (is->pictq_size == 0) {
1052
            /* if no picture, need to wait */
1053
            schedule_refresh(is, 1);
1054
        } else {
1055
            /* dequeue the picture */
1056
            vp = &is->pictq[is->pictq_rindex];
1057

    
1058
            /* update current video pts */
1059
            is->video_current_pts = vp->pts;
1060
            is->video_current_pts_time = av_gettime();
1061

    
1062
            /* launch timer for next picture */
1063
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1064

    
1065
            if(is->subtitle_st) {
1066
                if (is->subtitle_stream_changed) {
1067
                    SDL_LockMutex(is->subpq_mutex);
1068

    
1069
                    while (is->subpq_size) {
1070
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1071

    
1072
                        /* update queue size and signal for next picture */
1073
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1074
                            is->subpq_rindex = 0;
1075

    
1076
                        is->subpq_size--;
1077
                    }
1078
                    is->subtitle_stream_changed = 0;
1079

    
1080
                    SDL_CondSignal(is->subpq_cond);
1081
                    SDL_UnlockMutex(is->subpq_mutex);
1082
                } else {
1083
                    if (is->subpq_size > 0) {
1084
                        sp = &is->subpq[is->subpq_rindex];
1085

    
1086
                        if (is->subpq_size > 1)
1087
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1088
                        else
1089
                            sp2 = NULL;
1090

    
1091
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1092
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1093
                        {
1094
                            free_subpicture(sp);
1095

    
1096
                            /* update queue size and signal for next picture */
1097
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1098
                                is->subpq_rindex = 0;
1099

    
1100
                            SDL_LockMutex(is->subpq_mutex);
1101
                            is->subpq_size--;
1102
                            SDL_CondSignal(is->subpq_cond);
1103
                            SDL_UnlockMutex(is->subpq_mutex);
1104
                        }
1105
                    }
1106
                }
1107
            }
1108

    
1109
            /* display picture */
1110
            video_display(is);
1111

    
1112
            /* update queue size and signal for next picture */
1113
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114
                is->pictq_rindex = 0;
1115

    
1116
            SDL_LockMutex(is->pictq_mutex);
1117
            is->pictq_size--;
1118
            SDL_CondSignal(is->pictq_cond);
1119
            SDL_UnlockMutex(is->pictq_mutex);
1120
        }
1121
    } else if (is->audio_st) {
1122
        /* draw the next audio frame */
1123

    
1124
        schedule_refresh(is, 40);
1125

    
1126
        /* if only audio stream, then display the audio bars (better
1127
           than nothing, just to test the implementation */
1128

    
1129
        /* display picture */
1130
        video_display(is);
1131
    } else {
1132
        schedule_refresh(is, 100);
1133
    }
1134
    if (show_status) {
1135
        static int64_t last_time;
1136
        int64_t cur_time;
1137
        int aqsize, vqsize, sqsize;
1138
        double av_diff;
1139

    
1140
        cur_time = av_gettime();
1141
        if (!last_time || (cur_time - last_time) >= 30000) {
1142
            aqsize = 0;
1143
            vqsize = 0;
1144
            sqsize = 0;
1145
            if (is->audio_st)
1146
                aqsize = is->audioq.size;
1147
            if (is->video_st)
1148
                vqsize = is->videoq.size;
1149
            if (is->subtitle_st)
1150
                sqsize = is->subtitleq.size;
1151
            av_diff = 0;
1152
            if (is->audio_st && is->video_st)
1153
                av_diff = get_audio_clock(is) - get_video_clock(is);
1154
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1155
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1156
            fflush(stdout);
1157
            last_time = cur_time;
1158
        }
1159
    }
1160
}
1161

    
1162
/* allocate a picture (needs to do that in main thread to avoid
1163
   potential locking problems */
1164
static void alloc_picture(void *opaque)
1165
{
1166
    VideoState *is = opaque;
1167
    VideoPicture *vp;
1168

    
1169
    vp = &is->pictq[is->pictq_windex];
1170

    
1171
    if (vp->bmp)
1172
        SDL_FreeYUVOverlay(vp->bmp);
1173

    
1174
#if 0
1175
    /* XXX: use generic function */
1176
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1177
    switch(is->video_st->codec->pix_fmt) {
1178
    case PIX_FMT_YUV420P:
1179
    case PIX_FMT_YUV422P:
1180
    case PIX_FMT_YUV444P:
1181
    case PIX_FMT_YUYV422:
1182
    case PIX_FMT_YUV410P:
1183
    case PIX_FMT_YUV411P:
1184
        is_yuv = 1;
1185
        break;
1186
    default:
1187
        is_yuv = 0;
1188
        break;
1189
    }
1190
#endif
1191
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1192
                                   is->video_st->codec->height,
1193
                                   SDL_YV12_OVERLAY,
1194
                                   screen);
1195
    vp->width = is->video_st->codec->width;
1196
    vp->height = is->video_st->codec->height;
1197

    
1198
    SDL_LockMutex(is->pictq_mutex);
1199
    vp->allocated = 1;
1200
    SDL_CondSignal(is->pictq_cond);
1201
    SDL_UnlockMutex(is->pictq_mutex);
1202
}
1203

    
1204
/**
1205
 *
1206
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1207
 */
1208
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1209
{
1210
    VideoPicture *vp;
1211
    int dst_pix_fmt;
1212

    
1213
    /* wait until we have space to put a new picture */
1214
    SDL_LockMutex(is->pictq_mutex);
1215
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1216
           !is->videoq.abort_request) {
1217
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1218
    }
1219
    SDL_UnlockMutex(is->pictq_mutex);
1220

    
1221
    if (is->videoq.abort_request)
1222
        return -1;
1223

    
1224
    vp = &is->pictq[is->pictq_windex];
1225

    
1226
    /* alloc or resize hardware picture buffer */
1227
    if (!vp->bmp ||
1228
        vp->width != is->video_st->codec->width ||
1229
        vp->height != is->video_st->codec->height) {
1230
        SDL_Event event;
1231

    
1232
        vp->allocated = 0;
1233

    
1234
        /* the allocation must be done in the main thread to avoid
1235
           locking problems */
1236
        event.type = FF_ALLOC_EVENT;
1237
        event.user.data1 = is;
1238
        SDL_PushEvent(&event);
1239

    
1240
        /* wait until the picture is allocated */
1241
        SDL_LockMutex(is->pictq_mutex);
1242
        while (!vp->allocated && !is->videoq.abort_request) {
1243
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1244
        }
1245
        SDL_UnlockMutex(is->pictq_mutex);
1246

    
1247
        if (is->videoq.abort_request)
1248
            return -1;
1249
    }
1250

    
1251
    /* if the frame is not skipped, then display it */
1252
    if (vp->bmp) {
1253
        AVPicture pict;
1254

    
1255
        /* get a pointer on the bitmap */
1256
        SDL_LockYUVOverlay (vp->bmp);
1257

    
1258
        dst_pix_fmt = PIX_FMT_YUV420P;
1259
        memset(&pict,0,sizeof(AVPicture));
1260
        pict.data[0] = vp->bmp->pixels[0];
1261
        pict.data[1] = vp->bmp->pixels[2];
1262
        pict.data[2] = vp->bmp->pixels[1];
1263

    
1264
        pict.linesize[0] = vp->bmp->pitches[0];
1265
        pict.linesize[1] = vp->bmp->pitches[2];
1266
        pict.linesize[2] = vp->bmp->pitches[1];
1267
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1268
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1269
            is->video_st->codec->width, is->video_st->codec->height,
1270
            is->video_st->codec->pix_fmt,
1271
            is->video_st->codec->width, is->video_st->codec->height,
1272
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1273
        if (is->img_convert_ctx == NULL) {
1274
            fprintf(stderr, "Cannot initialize the conversion context\n");
1275
            exit(1);
1276
        }
1277
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1278
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1279
        /* update the bitmap content */
1280
        SDL_UnlockYUVOverlay(vp->bmp);
1281

    
1282
        vp->pts = pts;
1283

    
1284
        /* now we can update the picture count */
1285
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1286
            is->pictq_windex = 0;
1287
        SDL_LockMutex(is->pictq_mutex);
1288
        is->pictq_size++;
1289
        SDL_UnlockMutex(is->pictq_mutex);
1290
    }
1291
    return 0;
1292
}
1293

    
1294
/**
1295
 * compute the exact PTS for the picture if it is omitted in the stream
1296
 * @param pts1 the dts of the pkt / pts of the frame
1297
 */
1298
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1299
{
1300
    double frame_delay, pts;
1301

    
1302
    pts = pts1;
1303

    
1304
    if (pts != 0) {
1305
        /* update video clock with pts, if present */
1306
        is->video_clock = pts;
1307
    } else {
1308
        pts = is->video_clock;
1309
    }
1310
    /* update video clock for next frame */
1311
    frame_delay = av_q2d(is->video_st->codec->time_base);
1312
    /* for MPEG2, the frame can be repeated, so we update the
1313
       clock accordingly */
1314
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1315
    is->video_clock += frame_delay;
1316

    
1317
#if defined(DEBUG_SYNC) && 0
1318
    {
1319
        int ftype;
1320
        if (src_frame->pict_type == FF_B_TYPE)
1321
            ftype = 'B';
1322
        else if (src_frame->pict_type == FF_I_TYPE)
1323
            ftype = 'I';
1324
        else
1325
            ftype = 'P';
1326
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1327
               ftype, pts, pts1);
1328
    }
1329
#endif
1330
    return queue_picture(is, src_frame, pts);
1331
}
1332

    
1333
static int video_thread(void *arg)
1334
{
1335
    VideoState *is = arg;
1336
    AVPacket pkt1, *pkt = &pkt1;
1337
    int len1, got_picture;
1338
    AVFrame *frame= avcodec_alloc_frame();
1339
    double pts;
1340

    
1341
    for(;;) {
1342
        while (is->paused && !is->videoq.abort_request) {
1343
            SDL_Delay(10);
1344
        }
1345
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1346
            break;
1347

    
1348
        if(pkt->data == flush_pkt.data){
1349
            avcodec_flush_buffers(is->video_st->codec);
1350
            continue;
1351
        }
1352

    
1353
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1354
           this packet, if any */
1355
        is->video_st->codec->reordered_opaque= pkt->pts;
1356
        len1 = avcodec_decode_video2(is->video_st->codec,
1357
                                    frame, &got_picture,
1358
                                    pkt);
1359

    
1360
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1361
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1362
            pts= frame->reordered_opaque;
1363
        else if(pkt->dts != AV_NOPTS_VALUE)
1364
            pts= pkt->dts;
1365
        else
1366
            pts= 0;
1367
        pts *= av_q2d(is->video_st->time_base);
1368

    
1369
//            if (len1 < 0)
1370
//                break;
1371
        if (got_picture) {
1372
            if (output_picture2(is, frame, pts) < 0)
1373
                goto the_end;
1374
        }
1375
        av_free_packet(pkt);
1376
        if (step)
1377
            if (cur_stream)
1378
                stream_pause(cur_stream);
1379
    }
1380
 the_end:
1381
    av_free(frame);
1382
    return 0;
1383
}
1384

    
1385
static int subtitle_thread(void *arg)
1386
{
1387
    VideoState *is = arg;
1388
    SubPicture *sp;
1389
    AVPacket pkt1, *pkt = &pkt1;
1390
    int len1, got_subtitle;
1391
    double pts;
1392
    int i, j;
1393
    int r, g, b, y, u, v, a;
1394

    
1395
    for(;;) {
1396
        while (is->paused && !is->subtitleq.abort_request) {
1397
            SDL_Delay(10);
1398
        }
1399
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1400
            break;
1401

    
1402
        if(pkt->data == flush_pkt.data){
1403
            avcodec_flush_buffers(is->subtitle_st->codec);
1404
            continue;
1405
        }
1406
        SDL_LockMutex(is->subpq_mutex);
1407
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1408
               !is->subtitleq.abort_request) {
1409
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1410
        }
1411
        SDL_UnlockMutex(is->subpq_mutex);
1412

    
1413
        if (is->subtitleq.abort_request)
1414
            goto the_end;
1415

    
1416
        sp = &is->subpq[is->subpq_windex];
1417

    
1418
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1419
           this packet, if any */
1420
        pts = 0;
1421
        if (pkt->pts != AV_NOPTS_VALUE)
1422
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1423

    
1424
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1425
                                    &sp->sub, &got_subtitle,
1426
                                    pkt);
1427
//            if (len1 < 0)
1428
//                break;
1429
        if (got_subtitle && sp->sub.format == 0) {
1430
            sp->pts = pts;
1431

    
1432
            for (i = 0; i < sp->sub.num_rects; i++)
1433
            {
1434
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1435
                {
1436
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1437
                    y = RGB_TO_Y_CCIR(r, g, b);
1438
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1439
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1440
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1441
                }
1442
            }
1443

    
1444
            /* now we can update the picture count */
1445
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1446
                is->subpq_windex = 0;
1447
            SDL_LockMutex(is->subpq_mutex);
1448
            is->subpq_size++;
1449
            SDL_UnlockMutex(is->subpq_mutex);
1450
        }
1451
        av_free_packet(pkt);
1452
//        if (step)
1453
//            if (cur_stream)
1454
//                stream_pause(cur_stream);
1455
    }
1456
 the_end:
1457
    return 0;
1458
}
1459

    
1460
/* copy samples for viewing in editor window */
1461
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1462
{
1463
    int size, len, channels;
1464

    
1465
    channels = is->audio_st->codec->channels;
1466

    
1467
    size = samples_size / sizeof(short);
1468
    while (size > 0) {
1469
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1470
        if (len > size)
1471
            len = size;
1472
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1473
        samples += len;
1474
        is->sample_array_index += len;
1475
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1476
            is->sample_array_index = 0;
1477
        size -= len;
1478
    }
1479
}
1480

    
1481
/* return the new audio buffer size (samples can be added or deleted
1482
   to get better sync if video or external master clock) */
1483
static int synchronize_audio(VideoState *is, short *samples,
1484
                             int samples_size1, double pts)
1485
{
1486
    int n, samples_size;
1487
    double ref_clock;
1488

    
1489
    n = 2 * is->audio_st->codec->channels;
1490
    samples_size = samples_size1;
1491

    
1492
    /* if not master, then we try to remove or add samples to correct the clock */
1493
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1494
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1495
        double diff, avg_diff;
1496
        int wanted_size, min_size, max_size, nb_samples;
1497

    
1498
        ref_clock = get_master_clock(is);
1499
        diff = get_audio_clock(is) - ref_clock;
1500

    
1501
        if (diff < AV_NOSYNC_THRESHOLD) {
1502
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1503
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1504
                /* not enough measures to have a correct estimate */
1505
                is->audio_diff_avg_count++;
1506
            } else {
1507
                /* estimate the A-V difference */
1508
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1509

    
1510
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1511
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1512
                    nb_samples = samples_size / n;
1513

    
1514
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1515
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1516
                    if (wanted_size < min_size)
1517
                        wanted_size = min_size;
1518
                    else if (wanted_size > max_size)
1519
                        wanted_size = max_size;
1520

    
1521
                    /* add or remove samples to correction the synchro */
1522
                    if (wanted_size < samples_size) {
1523
                        /* remove samples */
1524
                        samples_size = wanted_size;
1525
                    } else if (wanted_size > samples_size) {
1526
                        uint8_t *samples_end, *q;
1527
                        int nb;
1528

    
1529
                        /* add samples */
1530
                        nb = (samples_size - wanted_size);
1531
                        samples_end = (uint8_t *)samples + samples_size - n;
1532
                        q = samples_end + n;
1533
                        while (nb > 0) {
1534
                            memcpy(q, samples_end, n);
1535
                            q += n;
1536
                            nb -= n;
1537
                        }
1538
                        samples_size = wanted_size;
1539
                    }
1540
                }
1541
#if 0
1542
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1543
                       diff, avg_diff, samples_size - samples_size1,
1544
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1545
#endif
1546
            }
1547
        } else {
1548
            /* too big difference : may be initial PTS errors, so
1549
               reset A-V filter */
1550
            is->audio_diff_avg_count = 0;
1551
            is->audio_diff_cum = 0;
1552
        }
1553
    }
1554

    
1555
    return samples_size;
1556
}
1557

    
1558
/* decode one audio frame and returns its uncompressed size */
1559
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1560
{
1561
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1562
    AVPacket *pkt = &is->audio_pkt;
1563
    AVCodecContext *dec= is->audio_st->codec;
1564
    int n, len1, data_size;
1565
    double pts;
1566

    
1567
    for(;;) {
1568
        /* NOTE: the audio packet can contain several frames */
1569
        while (pkt_temp->size > 0) {
1570
            data_size = sizeof(is->audio_buf1);
1571
            len1 = avcodec_decode_audio3(dec,
1572
                                        (int16_t *)is->audio_buf1, &data_size,
1573
                                        pkt_temp);
1574
            if (len1 < 0) {
1575
                /* if error, we skip the frame */
1576
                pkt_temp->size = 0;
1577
                break;
1578
            }
1579

    
1580
            pkt_temp->data += len1;
1581
            pkt_temp->size -= len1;
1582
            if (data_size <= 0)
1583
                continue;
1584

    
1585
            if (dec->sample_fmt != is->audio_src_fmt) {
1586
                if (is->reformat_ctx)
1587
                    av_audio_convert_free(is->reformat_ctx);
1588
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1589
                                                         dec->sample_fmt, 1, NULL, 0);
1590
                if (!is->reformat_ctx) {
1591
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1592
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1593
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1594
                        break;
1595
                }
1596
                is->audio_src_fmt= dec->sample_fmt;
1597
            }
1598

    
1599
            if (is->reformat_ctx) {
1600
                const void *ibuf[6]= {is->audio_buf1};
1601
                void *obuf[6]= {is->audio_buf2};
1602
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1603
                int ostride[6]= {2};
1604
                int len= data_size/istride[0];
1605
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1606
                    printf("av_audio_convert() failed\n");
1607
                    break;
1608
                }
1609
                is->audio_buf= is->audio_buf2;
1610
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1611
                          remove this legacy cruft */
1612
                data_size= len*2;
1613
            }else{
1614
                is->audio_buf= is->audio_buf1;
1615
            }
1616

    
1617
            /* if no pts, then compute it */
1618
            pts = is->audio_clock;
1619
            *pts_ptr = pts;
1620
            n = 2 * dec->channels;
1621
            is->audio_clock += (double)data_size /
1622
                (double)(n * dec->sample_rate);
1623
#if defined(DEBUG_SYNC)
1624
            {
1625
                static double last_clock;
1626
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1627
                       is->audio_clock - last_clock,
1628
                       is->audio_clock, pts);
1629
                last_clock = is->audio_clock;
1630
            }
1631
#endif
1632
            return data_size;
1633
        }
1634

    
1635
        /* free the current packet */
1636
        if (pkt->data)
1637
            av_free_packet(pkt);
1638

    
1639
        if (is->paused || is->audioq.abort_request) {
1640
            return -1;
1641
        }
1642

    
1643
        /* read next packet */
1644
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1645
            return -1;
1646
        if(pkt->data == flush_pkt.data){
1647
            avcodec_flush_buffers(dec);
1648
            continue;
1649
        }
1650

    
1651
        pkt_temp->data = pkt->data;
1652
        pkt_temp->size = pkt->size;
1653

    
1654
        /* if update the audio clock with the pts */
1655
        if (pkt->pts != AV_NOPTS_VALUE) {
1656
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1657
        }
1658
    }
1659
}
1660

    
1661
/* get the current audio output buffer size, in samples. With SDL, we
1662
   cannot have a precise information */
1663
static int audio_write_get_buf_size(VideoState *is)
1664
{
1665
    return is->audio_buf_size - is->audio_buf_index;
1666
}
1667

    
1668

    
1669
/* prepare a new audio buffer */
1670
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1671
{
1672
    VideoState *is = opaque;
1673
    int audio_size, len1;
1674
    double pts;
1675

    
1676
    audio_callback_time = av_gettime();
1677

    
1678
    while (len > 0) {
1679
        if (is->audio_buf_index >= is->audio_buf_size) {
1680
           audio_size = audio_decode_frame(is, &pts);
1681
           if (audio_size < 0) {
1682
                /* if error, just output silence */
1683
               is->audio_buf = is->audio_buf1;
1684
               is->audio_buf_size = 1024;
1685
               memset(is->audio_buf, 0, is->audio_buf_size);
1686
           } else {
1687
               if (is->show_audio)
1688
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1689
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1690
                                              pts);
1691
               is->audio_buf_size = audio_size;
1692
           }
1693
           is->audio_buf_index = 0;
1694
        }
1695
        len1 = is->audio_buf_size - is->audio_buf_index;
1696
        if (len1 > len)
1697
            len1 = len;
1698
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1699
        len -= len1;
1700
        stream += len1;
1701
        is->audio_buf_index += len1;
1702
    }
1703
}
1704

    
1705
/* open a given stream. Return 0 if OK */
1706
static int stream_component_open(VideoState *is, int stream_index)
1707
{
1708
    AVFormatContext *ic = is->ic;
1709
    AVCodecContext *enc;
1710
    AVCodec *codec;
1711
    SDL_AudioSpec wanted_spec, spec;
1712

    
1713
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1714
        return -1;
1715
    enc = ic->streams[stream_index]->codec;
1716

    
1717
    /* prepare audio output */
1718
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1719
        if (enc->channels > 0) {
1720
            enc->request_channels = FFMIN(2, enc->channels);
1721
        } else {
1722
            enc->request_channels = 2;
1723
        }
1724
    }
1725

    
1726
    codec = avcodec_find_decoder(enc->codec_id);
1727
    enc->debug_mv = debug_mv;
1728
    enc->debug = debug;
1729
    enc->workaround_bugs = workaround_bugs;
1730
    enc->lowres = lowres;
1731
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1732
    enc->idct_algo= idct;
1733
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1734
    enc->skip_frame= skip_frame;
1735
    enc->skip_idct= skip_idct;
1736
    enc->skip_loop_filter= skip_loop_filter;
1737
    enc->error_recognition= error_recognition;
1738
    enc->error_concealment= error_concealment;
1739

    
1740
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1741

    
1742
    if (!codec ||
1743
        avcodec_open(enc, codec) < 0)
1744
        return -1;
1745

    
1746
    /* prepare audio output */
1747
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1748
        wanted_spec.freq = enc->sample_rate;
1749
        wanted_spec.format = AUDIO_S16SYS;
1750
        wanted_spec.channels = enc->channels;
1751
        wanted_spec.silence = 0;
1752
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1753
        wanted_spec.callback = sdl_audio_callback;
1754
        wanted_spec.userdata = is;
1755
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1756
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1757
            return -1;
1758
        }
1759
        is->audio_hw_buf_size = spec.size;
1760
        is->audio_src_fmt= SAMPLE_FMT_S16;
1761
    }
1762

    
1763
    if(thread_count>1)
1764
        avcodec_thread_init(enc, thread_count);
1765
    enc->thread_count= thread_count;
1766
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1767
    switch(enc->codec_type) {
1768
    case CODEC_TYPE_AUDIO:
1769
        is->audio_stream = stream_index;
1770
        is->audio_st = ic->streams[stream_index];
1771
        is->audio_buf_size = 0;
1772
        is->audio_buf_index = 0;
1773

    
1774
        /* init averaging filter */
1775
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1776
        is->audio_diff_avg_count = 0;
1777
        /* since we do not have a precise anough audio fifo fullness,
1778
           we correct audio sync only if larger than this threshold */
1779
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1780

    
1781
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1782
        packet_queue_init(&is->audioq);
1783
        SDL_PauseAudio(0);
1784
        break;
1785
    case CODEC_TYPE_VIDEO:
1786
        is->video_stream = stream_index;
1787
        is->video_st = ic->streams[stream_index];
1788

    
1789
        is->frame_last_delay = 40e-3;
1790
        is->frame_timer = (double)av_gettime() / 1000000.0;
1791
        is->video_current_pts_time = av_gettime();
1792

    
1793
        packet_queue_init(&is->videoq);
1794
        is->video_tid = SDL_CreateThread(video_thread, is);
1795
        break;
1796
    case CODEC_TYPE_SUBTITLE:
1797
        is->subtitle_stream = stream_index;
1798
        is->subtitle_st = ic->streams[stream_index];
1799
        packet_queue_init(&is->subtitleq);
1800

    
1801
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1802
        break;
1803
    default:
1804
        break;
1805
    }
1806
    return 0;
1807
}
1808

    
1809
static void stream_component_close(VideoState *is, int stream_index)
1810
{
1811
    AVFormatContext *ic = is->ic;
1812
    AVCodecContext *enc;
1813

    
1814
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1815
        return;
1816
    enc = ic->streams[stream_index]->codec;
1817

    
1818
    switch(enc->codec_type) {
1819
    case CODEC_TYPE_AUDIO:
1820
        packet_queue_abort(&is->audioq);
1821

    
1822
        SDL_CloseAudio();
1823

    
1824
        packet_queue_end(&is->audioq);
1825
        if (is->reformat_ctx)
1826
            av_audio_convert_free(is->reformat_ctx);
1827
        break;
1828
    case CODEC_TYPE_VIDEO:
1829
        packet_queue_abort(&is->videoq);
1830

    
1831
        /* note: we also signal this mutex to make sure we deblock the
1832
           video thread in all cases */
1833
        SDL_LockMutex(is->pictq_mutex);
1834
        SDL_CondSignal(is->pictq_cond);
1835
        SDL_UnlockMutex(is->pictq_mutex);
1836

    
1837
        SDL_WaitThread(is->video_tid, NULL);
1838

    
1839
        packet_queue_end(&is->videoq);
1840
        break;
1841
    case CODEC_TYPE_SUBTITLE:
1842
        packet_queue_abort(&is->subtitleq);
1843

    
1844
        /* note: we also signal this mutex to make sure we deblock the
1845
           video thread in all cases */
1846
        SDL_LockMutex(is->subpq_mutex);
1847
        is->subtitle_stream_changed = 1;
1848

    
1849
        SDL_CondSignal(is->subpq_cond);
1850
        SDL_UnlockMutex(is->subpq_mutex);
1851

    
1852
        SDL_WaitThread(is->subtitle_tid, NULL);
1853

    
1854
        packet_queue_end(&is->subtitleq);
1855
        break;
1856
    default:
1857
        break;
1858
    }
1859

    
1860
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1861
    avcodec_close(enc);
1862
    switch(enc->codec_type) {
1863
    case CODEC_TYPE_AUDIO:
1864
        is->audio_st = NULL;
1865
        is->audio_stream = -1;
1866
        break;
1867
    case CODEC_TYPE_VIDEO:
1868
        is->video_st = NULL;
1869
        is->video_stream = -1;
1870
        break;
1871
    case CODEC_TYPE_SUBTITLE:
1872
        is->subtitle_st = NULL;
1873
        is->subtitle_stream = -1;
1874
        break;
1875
    default:
1876
        break;
1877
    }
1878
}
1879

    
1880
static void dump_stream_info(const AVFormatContext *s)
1881
{
1882
    AVMetadataTag *tag = NULL;
1883
    while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1884
        fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1885
}
1886

    
1887
/* since we have only one decoding thread, we can use a global
1888
   variable instead of a thread local variable */
1889
static VideoState *global_video_state;
1890

    
1891
static int decode_interrupt_cb(void)
1892
{
1893
    return (global_video_state && global_video_state->abort_request);
1894
}
1895

    
1896
/* this thread gets the stream from the disk or the network */
1897
static int decode_thread(void *arg)
1898
{
1899
    VideoState *is = arg;
1900
    AVFormatContext *ic;
1901
    int err, i, ret, video_index, audio_index, subtitle_index;
1902
    AVPacket pkt1, *pkt = &pkt1;
1903
    AVFormatParameters params, *ap = &params;
1904
    int eof=0;
1905

    
1906
    video_index = -1;
1907
    audio_index = -1;
1908
    subtitle_index = -1;
1909
    is->video_stream = -1;
1910
    is->audio_stream = -1;
1911
    is->subtitle_stream = -1;
1912

    
1913
    global_video_state = is;
1914
    url_set_interrupt_cb(decode_interrupt_cb);
1915

    
1916
    memset(ap, 0, sizeof(*ap));
1917

    
1918
    ap->width = frame_width;
1919
    ap->height= frame_height;
1920
    ap->time_base= (AVRational){1, 25};
1921
    ap->pix_fmt = frame_pix_fmt;
1922

    
1923
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1924
    if (err < 0) {
1925
        print_error(is->filename, err);
1926
        ret = -1;
1927
        goto fail;
1928
    }
1929
    is->ic = ic;
1930

    
1931
    if(genpts)
1932
        ic->flags |= AVFMT_FLAG_GENPTS;
1933

    
1934
    err = av_find_stream_info(ic);
1935
    if (err < 0) {
1936
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1937
        ret = -1;
1938
        goto fail;
1939
    }
1940
    if(ic->pb)
1941
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1942

    
1943
    /* if seeking requested, we execute it */
1944
    if (start_time != AV_NOPTS_VALUE) {
1945
        int64_t timestamp;
1946

    
1947
        timestamp = start_time;
1948
        /* add the stream start time */
1949
        if (ic->start_time != AV_NOPTS_VALUE)
1950
            timestamp += ic->start_time;
1951
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1952
        if (ret < 0) {
1953
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1954
                    is->filename, (double)timestamp / AV_TIME_BASE);
1955
        }
1956
    }
1957

    
1958
    for(i = 0; i < ic->nb_streams; i++) {
1959
        AVCodecContext *enc = ic->streams[i]->codec;
1960
        ic->streams[i]->discard = AVDISCARD_ALL;
1961
        switch(enc->codec_type) {
1962
        case CODEC_TYPE_AUDIO:
1963
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1964
                audio_index = i;
1965
            break;
1966
        case CODEC_TYPE_VIDEO:
1967
            if (wanted_video_stream-- >= 0 && !video_disable)
1968
                video_index = i;
1969
            break;
1970
        case CODEC_TYPE_SUBTITLE:
1971
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1972
                subtitle_index = i;
1973
            break;
1974
        default:
1975
            break;
1976
        }
1977
    }
1978
    if (show_status) {
1979
        dump_format(ic, 0, is->filename, 0);
1980
        dump_stream_info(ic);
1981
    }
1982

    
1983
    /* open the streams */
1984
    if (audio_index >= 0) {
1985
        stream_component_open(is, audio_index);
1986
    }
1987

    
1988
    if (video_index >= 0) {
1989
        stream_component_open(is, video_index);
1990
    } else {
1991
        if (!display_disable)
1992
            is->show_audio = 1;
1993
    }
1994

    
1995
    if (subtitle_index >= 0) {
1996
        stream_component_open(is, subtitle_index);
1997
    }
1998

    
1999
    if (is->video_stream < 0 && is->audio_stream < 0) {
2000
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2001
        ret = -1;
2002
        goto fail;
2003
    }
2004

    
2005
    for(;;) {
2006
        if (is->abort_request)
2007
            break;
2008
        if (is->paused != is->last_paused) {
2009
            is->last_paused = is->paused;
2010
            if (is->paused)
2011
                av_read_pause(ic);
2012
            else
2013
                av_read_play(ic);
2014
        }
2015
#if CONFIG_RTSP_DEMUXER
2016
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2017
            /* wait 10 ms to avoid trying to get another packet */
2018
            /* XXX: horrible */
2019
            SDL_Delay(10);
2020
            continue;
2021
        }
2022
#endif
2023
        if (is->seek_req) {
2024
            int64_t seek_target= is->seek_pos;
2025
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2026
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2027
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2028
//      of the seek_pos/seek_rel variables
2029

    
2030
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2031
            if (ret < 0) {
2032
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2033
            }else{
2034
                if (is->audio_stream >= 0) {
2035
                    packet_queue_flush(&is->audioq);
2036
                    packet_queue_put(&is->audioq, &flush_pkt);
2037
                }
2038
                if (is->subtitle_stream >= 0) {
2039
                    packet_queue_flush(&is->subtitleq);
2040
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2041
                }
2042
                if (is->video_stream >= 0) {
2043
                    packet_queue_flush(&is->videoq);
2044
                    packet_queue_put(&is->videoq, &flush_pkt);
2045
                }
2046
            }
2047
            is->seek_req = 0;
2048
            eof= 0;
2049
        }
2050

    
2051
        /* if the queue are full, no need to read more */
2052
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2053
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2054
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2055
            /* wait 10 ms */
2056
            SDL_Delay(10);
2057
            continue;
2058
        }
2059
        if(url_feof(ic->pb) || eof) {
2060
            if(is->video_stream >= 0){
2061
                av_init_packet(pkt);
2062
                pkt->data=NULL;
2063
                pkt->size=0;
2064
                pkt->stream_index= is->video_stream;
2065
                packet_queue_put(&is->videoq, pkt);
2066
            }
2067
            SDL_Delay(10);
2068
            continue;
2069
        }
2070
        ret = av_read_frame(ic, pkt);
2071
        if (ret < 0) {
2072
            if (ret == AVERROR_EOF)
2073
                eof=1;
2074
            if (url_ferror(ic->pb))
2075
                break;
2076
            SDL_Delay(100); /* wait for user event */
2077
            continue;
2078
        }
2079
        if (pkt->stream_index == is->audio_stream) {
2080
            packet_queue_put(&is->audioq, pkt);
2081
        } else if (pkt->stream_index == is->video_stream) {
2082
            packet_queue_put(&is->videoq, pkt);
2083
        } else if (pkt->stream_index == is->subtitle_stream) {
2084
            packet_queue_put(&is->subtitleq, pkt);
2085
        } else {
2086
            av_free_packet(pkt);
2087
        }
2088
    }
2089
    /* wait until the end */
2090
    while (!is->abort_request) {
2091
        SDL_Delay(100);
2092
    }
2093

    
2094
    ret = 0;
2095
 fail:
2096
    /* disable interrupting */
2097
    global_video_state = NULL;
2098

    
2099
    /* close each stream */
2100
    if (is->audio_stream >= 0)
2101
        stream_component_close(is, is->audio_stream);
2102
    if (is->video_stream >= 0)
2103
        stream_component_close(is, is->video_stream);
2104
    if (is->subtitle_stream >= 0)
2105
        stream_component_close(is, is->subtitle_stream);
2106
    if (is->ic) {
2107
        av_close_input_file(is->ic);
2108
        is->ic = NULL; /* safety */
2109
    }
2110
    url_set_interrupt_cb(NULL);
2111

    
2112
    if (ret != 0) {
2113
        SDL_Event event;
2114

    
2115
        event.type = FF_QUIT_EVENT;
2116
        event.user.data1 = is;
2117
        SDL_PushEvent(&event);
2118
    }
2119
    return 0;
2120
}
2121

    
2122
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2123
{
2124
    VideoState *is;
2125

    
2126
    is = av_mallocz(sizeof(VideoState));
2127
    if (!is)
2128
        return NULL;
2129
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2130
    is->iformat = iformat;
2131
    is->ytop = 0;
2132
    is->xleft = 0;
2133

    
2134
    /* start video display */
2135
    is->pictq_mutex = SDL_CreateMutex();
2136
    is->pictq_cond = SDL_CreateCond();
2137

    
2138
    is->subpq_mutex = SDL_CreateMutex();
2139
    is->subpq_cond = SDL_CreateCond();
2140

    
2141
    /* add the refresh timer to draw the picture */
2142
    schedule_refresh(is, 40);
2143

    
2144
    is->av_sync_type = av_sync_type;
2145
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2146
    if (!is->parse_tid) {
2147
        av_free(is);
2148
        return NULL;
2149
    }
2150
    return is;
2151
}
2152

    
2153
static void stream_close(VideoState *is)
2154
{
2155
    VideoPicture *vp;
2156
    int i;
2157
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2158
    is->abort_request = 1;
2159
    SDL_WaitThread(is->parse_tid, NULL);
2160

    
2161
    /* free all pictures */
2162
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2163
        vp = &is->pictq[i];
2164
        if (vp->bmp) {
2165
            SDL_FreeYUVOverlay(vp->bmp);
2166
            vp->bmp = NULL;
2167
        }
2168
    }
2169
    SDL_DestroyMutex(is->pictq_mutex);
2170
    SDL_DestroyCond(is->pictq_cond);
2171
    SDL_DestroyMutex(is->subpq_mutex);
2172
    SDL_DestroyCond(is->subpq_cond);
2173
    if (is->img_convert_ctx)
2174
        sws_freeContext(is->img_convert_ctx);
2175
    av_free(is);
2176
}
2177

    
2178
static void stream_cycle_channel(VideoState *is, int codec_type)
2179
{
2180
    AVFormatContext *ic = is->ic;
2181
    int start_index, stream_index;
2182
    AVStream *st;
2183

    
2184
    if (codec_type == CODEC_TYPE_VIDEO)
2185
        start_index = is->video_stream;
2186
    else if (codec_type == CODEC_TYPE_AUDIO)
2187
        start_index = is->audio_stream;
2188
    else
2189
        start_index = is->subtitle_stream;
2190
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2191
        return;
2192
    stream_index = start_index;
2193
    for(;;) {
2194
        if (++stream_index >= is->ic->nb_streams)
2195
        {
2196
            if (codec_type == CODEC_TYPE_SUBTITLE)
2197
            {
2198
                stream_index = -1;
2199
                goto the_end;
2200
            } else
2201
                stream_index = 0;
2202
        }
2203
        if (stream_index == start_index)
2204
            return;
2205
        st = ic->streams[stream_index];
2206
        if (st->codec->codec_type == codec_type) {
2207
            /* check that parameters are OK */
2208
            switch(codec_type) {
2209
            case CODEC_TYPE_AUDIO:
2210
                if (st->codec->sample_rate != 0 &&
2211
                    st->codec->channels != 0)
2212
                    goto the_end;
2213
                break;
2214
            case CODEC_TYPE_VIDEO:
2215
            case CODEC_TYPE_SUBTITLE:
2216
                goto the_end;
2217
            default:
2218
                break;
2219
            }
2220
        }
2221
    }
2222
 the_end:
2223
    stream_component_close(is, start_index);
2224
    stream_component_open(is, stream_index);
2225
}
2226

    
2227

    
2228
static void toggle_full_screen(void)
2229
{
2230
    is_full_screen = !is_full_screen;
2231
    if (!fs_screen_width) {
2232
        /* use default SDL method */
2233
//        SDL_WM_ToggleFullScreen(screen);
2234
    }
2235
    video_open(cur_stream);
2236
}
2237

    
2238
static void toggle_pause(void)
2239
{
2240
    if (cur_stream)
2241
        stream_pause(cur_stream);
2242
    step = 0;
2243
}
2244

    
2245
static void step_to_next_frame(void)
2246
{
2247
    if (cur_stream) {
2248
        /* if the stream is paused unpause it, then step */
2249
        if (cur_stream->paused)
2250
            stream_pause(cur_stream);
2251
    }
2252
    step = 1;
2253
}
2254

    
2255
static void do_exit(void)
2256
{
2257
    int i;
2258
    if (cur_stream) {
2259
        stream_close(cur_stream);
2260
        cur_stream = NULL;
2261
    }
2262
    for (i = 0; i < CODEC_TYPE_NB; i++)
2263
        av_free(avcodec_opts[i]);
2264
    av_free(avformat_opts);
2265
    av_free(sws_opts);
2266
    if (show_status)
2267
        printf("\n");
2268
    SDL_Quit();
2269
    exit(0);
2270
}
2271

    
2272
static void toggle_audio_display(void)
2273
{
2274
    if (cur_stream) {
2275
        cur_stream->show_audio = !cur_stream->show_audio;
2276
    }
2277
}
2278

    
2279
/* handle an event sent by the GUI */
2280
static void event_loop(void)
2281
{
2282
    SDL_Event event;
2283
    double incr, pos, frac;
2284

    
2285
    for(;;) {
2286
        SDL_WaitEvent(&event);
2287
        switch(event.type) {
2288
        case SDL_KEYDOWN:
2289
            switch(event.key.keysym.sym) {
2290
            case SDLK_ESCAPE:
2291
            case SDLK_q:
2292
                do_exit();
2293
                break;
2294
            case SDLK_f:
2295
                toggle_full_screen();
2296
                break;
2297
            case SDLK_p:
2298
            case SDLK_SPACE:
2299
                toggle_pause();
2300
                break;
2301
            case SDLK_s: //S: Step to next frame
2302
                step_to_next_frame();
2303
                break;
2304
            case SDLK_a:
2305
                if (cur_stream)
2306
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2307
                break;
2308
            case SDLK_v:
2309
                if (cur_stream)
2310
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2311
                break;
2312
            case SDLK_t:
2313
                if (cur_stream)
2314
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2315
                break;
2316
            case SDLK_w:
2317
                toggle_audio_display();
2318
                break;
2319
            case SDLK_LEFT:
2320
                incr = -10.0;
2321
                goto do_seek;
2322
            case SDLK_RIGHT:
2323
                incr = 10.0;
2324
                goto do_seek;
2325
            case SDLK_UP:
2326
                incr = 60.0;
2327
                goto do_seek;
2328
            case SDLK_DOWN:
2329
                incr = -60.0;
2330
            do_seek:
2331
                if (cur_stream) {
2332
                    if (seek_by_bytes) {
2333
                        pos = url_ftell(cur_stream->ic->pb);
2334
                        if (cur_stream->ic->bit_rate)
2335
                            incr *= cur_stream->ic->bit_rate / 60.0;
2336
                        else
2337
                            incr *= 180000.0;
2338
                        pos += incr;
2339
                        stream_seek(cur_stream, pos, incr);
2340
                    } else {
2341
                        pos = get_master_clock(cur_stream);
2342
                        pos += incr;
2343
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2344
                    }
2345
                }
2346
                break;
2347
            default:
2348
                break;
2349
            }
2350
            break;
2351
        case SDL_MOUSEBUTTONDOWN:
2352
            if (cur_stream) {
2353
                int64_t ts;
2354
                int ns, hh, mm, ss;
2355
                int tns, thh, tmm, tss;
2356
                tns = cur_stream->ic->duration/1000000LL;
2357
                thh = tns/3600;
2358
                tmm = (tns%3600)/60;
2359
                tss = (tns%60);
2360
                frac = (double)event.button.x/(double)cur_stream->width;
2361
                ns = frac*tns;
2362
                hh = ns/3600;
2363
                mm = (ns%3600)/60;
2364
                ss = (ns%60);
2365
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2366
                        hh, mm, ss, thh, tmm, tss);
2367
                ts = frac*cur_stream->ic->duration;
2368
                if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2369
                    ts += cur_stream->ic->start_time;
2370
                stream_seek(cur_stream, ts, 0);
2371
            }
2372
            break;
2373
        case SDL_VIDEORESIZE:
2374
            if (cur_stream) {
2375
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2376
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2377
                screen_width = cur_stream->width = event.resize.w;
2378
                screen_height= cur_stream->height= event.resize.h;
2379
            }
2380
            break;
2381
        case SDL_QUIT:
2382
        case FF_QUIT_EVENT:
2383
            do_exit();
2384
            break;
2385
        case FF_ALLOC_EVENT:
2386
            video_open(event.user.data1);
2387
            alloc_picture(event.user.data1);
2388
            break;
2389
        case FF_REFRESH_EVENT:
2390
            video_refresh_timer(event.user.data1);
2391
            break;
2392
        default:
2393
            break;
2394
        }
2395
    }
2396
}
2397

    
2398
static void opt_frame_size(const char *arg)
2399
{
2400
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2401
        fprintf(stderr, "Incorrect frame size\n");
2402
        exit(1);
2403
    }
2404
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2405
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2406
        exit(1);
2407
    }
2408
}
2409

    
2410
static int opt_width(const char *opt, const char *arg)
2411
{
2412
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2413
    return 0;
2414
}
2415

    
2416
static int opt_height(const char *opt, const char *arg)
2417
{
2418
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2419
    return 0;
2420
}
2421

    
2422
static void opt_format(const char *arg)
2423
{
2424
    file_iformat = av_find_input_format(arg);
2425
    if (!file_iformat) {
2426
        fprintf(stderr, "Unknown input format: %s\n", arg);
2427
        exit(1);
2428
    }
2429
}
2430

    
2431
static void opt_frame_pix_fmt(const char *arg)
2432
{
2433
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2434
}
2435

    
2436
static int opt_sync(const char *opt, const char *arg)
2437
{
2438
    if (!strcmp(arg, "audio"))
2439
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2440
    else if (!strcmp(arg, "video"))
2441
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2442
    else if (!strcmp(arg, "ext"))
2443
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2444
    else {
2445
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2446
        exit(1);
2447
    }
2448
    return 0;
2449
}
2450

    
2451
static int opt_seek(const char *opt, const char *arg)
2452
{
2453
    start_time = parse_time_or_die(opt, arg, 1);
2454
    return 0;
2455
}
2456

    
2457
static int opt_debug(const char *opt, const char *arg)
2458
{
2459
    av_log_set_level(99);
2460
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2461
    return 0;
2462
}
2463

    
2464
static int opt_vismv(const char *opt, const char *arg)
2465
{
2466
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2467
    return 0;
2468
}
2469

    
2470
static int opt_thread_count(const char *opt, const char *arg)
2471
{
2472
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2473
#if !HAVE_THREADS
2474
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2475
#endif
2476
    return 0;
2477
}
2478

    
2479
static const OptionDef options[] = {
2480
    { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2481
    { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2482
    { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2483
    { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2484
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2485
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2486
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2487
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2488
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2489
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2490
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2491
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2492
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2493
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2494
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2495
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2496
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2497
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2498
    { "stats", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2499
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2500
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2501
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2502
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2503
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2504
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2505
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2506
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2507
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2508
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2509
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2510
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2511
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2512
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2513
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2514
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2515
    { NULL, },
2516
};
2517

    
2518
static void show_help(void)
2519
{
2520
    printf("usage: ffplay [options] input_file\n"
2521
           "Simple media player\n");
2522
    printf("\n");
2523
    show_help_options(options, "Main options:\n",
2524
                      OPT_EXPERT, 0);
2525
    show_help_options(options, "\nAdvanced options:\n",
2526
                      OPT_EXPERT, OPT_EXPERT);
2527
    printf("\nWhile playing:\n"
2528
           "q, ESC              quit\n"
2529
           "f                   toggle full screen\n"
2530
           "p, SPC              pause\n"
2531
           "a                   cycle audio channel\n"
2532
           "v                   cycle video channel\n"
2533
           "t                   cycle subtitle channel\n"
2534
           "w                   show audio waves\n"
2535
           "left/right          seek backward/forward 10 seconds\n"
2536
           "down/up             seek backward/forward 1 minute\n"
2537
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2538
           );
2539
}
2540

    
2541
static void opt_input_file(const char *filename)
2542
{
2543
    if (!strcmp(filename, "-"))
2544
        filename = "pipe:";
2545
    input_filename = filename;
2546
}
2547

    
2548
/* Called from the main */
2549
int main(int argc, char **argv)
2550
{
2551
    int flags, i;
2552

    
2553
    /* register all codecs, demux and protocols */
2554
    avcodec_register_all();
2555
    avdevice_register_all();
2556
    av_register_all();
2557

    
2558
    for(i=0; i<CODEC_TYPE_NB; i++){
2559
        avcodec_opts[i]= avcodec_alloc_context2(i);
2560
    }
2561
    avformat_opts = avformat_alloc_context();
2562
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2563

    
2564
    show_banner();
2565

    
2566
    parse_options(argc, argv, options, opt_input_file);
2567

    
2568
    if (!input_filename) {
2569
        fprintf(stderr, "An input file must be specified\n");
2570
        exit(1);
2571
    }
2572

    
2573
    if (display_disable) {
2574
        video_disable = 1;
2575
    }
2576
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2577
#if !defined(__MINGW32__) && !defined(__APPLE__)
2578
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2579
#endif
2580
    if (SDL_Init (flags)) {
2581
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2582
        exit(1);
2583
    }
2584

    
2585
    if (!display_disable) {
2586
#if HAVE_SDL_VIDEO_SIZE
2587
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2588
        fs_screen_width = vi->current_w;
2589
        fs_screen_height = vi->current_h;
2590
#endif
2591
    }
2592

    
2593
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2594
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2595
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2596
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2597

    
2598
    av_init_packet(&flush_pkt);
2599
    flush_pkt.data= "FLUSH";
2600

    
2601
    cur_stream = stream_open(input_filename, file_iformat);
2602

    
2603
    event_loop();
2604

    
2605
    /* never returns */
2606

    
2607
    return 0;
2608
}