Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 7d9beec7

History | View | Annotate | Download (79 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "libavutil/avstring.h"
25
#include "libavformat/avformat.h"
26
#include "libavformat/rtsp.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30
#include "libavcodec/colorspace.h"
31
#include "libavcodec/opt.h"
32

    
33
#include "cmdutils.h"
34

    
35
#include <SDL.h>
36
#include <SDL_thread.h>
37

    
38
#ifdef __MINGW32__
39
#undef main /* We don't want SDL to override our main() */
40
#endif
41

    
42
#undef exit
43

    
44
const char program_name[] = "FFplay";
45
const int program_birth_year = 2003;
46

    
47
//#define DEBUG_SYNC
48

    
49
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
50
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
51
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52

    
53
/* SDL audio buffer size, in samples. Should be small to have precise
54
   A/V sync as SDL does not have hardware buffer fullness info. */
55
#define SDL_AUDIO_BUFFER_SIZE 1024
56

    
57
/* no AV sync correction is done if below the AV sync threshold */
58
#define AV_SYNC_THRESHOLD 0.01
59
/* no AV correction is done if too big error */
60
#define AV_NOSYNC_THRESHOLD 10.0
61

    
62
/* maximum audio speed change to get correct sync */
63
#define SAMPLE_CORRECTION_PERCENT_MAX 10
64

    
65
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
66
#define AUDIO_DIFF_AVG_NB   20
67

    
68
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
69
#define SAMPLE_ARRAY_SIZE (2*65536)
70

    
71
static int sws_flags = SWS_BICUBIC;
72

    
73
typedef struct PacketQueue {
74
    AVPacketList *first_pkt, *last_pkt;
75
    int nb_packets;
76
    int size;
77
    int abort_request;
78
    SDL_mutex *mutex;
79
    SDL_cond *cond;
80
} PacketQueue;
81

    
82
#define VIDEO_PICTURE_QUEUE_SIZE 1
83
#define SUBPICTURE_QUEUE_SIZE 4
84

    
85
typedef struct VideoPicture {
86
    double pts;                                  ///<presentation time stamp for this picture
87
    SDL_Overlay *bmp;
88
    int width, height; /* source height & width */
89
    int allocated;
90
} VideoPicture;
91

    
92
typedef struct SubPicture {
93
    double pts; /* presentation time stamp for this picture */
94
    AVSubtitle sub;
95
} SubPicture;
96

    
97
enum {
98
    AV_SYNC_AUDIO_MASTER, /* default choice */
99
    AV_SYNC_VIDEO_MASTER,
100
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101
};
102

    
103
typedef struct VideoState {
104
    SDL_Thread *parse_tid;
105
    SDL_Thread *video_tid;
106
    AVInputFormat *iformat;
107
    int no_background;
108
    int abort_request;
109
    int paused;
110
    int last_paused;
111
    int seek_req;
112
    int seek_flags;
113
    int64_t seek_pos;
114
    int64_t seek_rel;
115
    AVFormatContext *ic;
116
    int dtg_active_format;
117

    
118
    int audio_stream;
119

    
120
    int av_sync_type;
121
    double external_clock; /* external clock base */
122
    int64_t external_clock_time;
123

    
124
    double audio_clock;
125
    double audio_diff_cum; /* used for AV difference average computation */
126
    double audio_diff_avg_coef;
127
    double audio_diff_threshold;
128
    int audio_diff_avg_count;
129
    AVStream *audio_st;
130
    PacketQueue audioq;
131
    int audio_hw_buf_size;
132
    /* samples output by the codec. we reserve more space for avsync
133
       compensation */
134
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
136
    uint8_t *audio_buf;
137
    unsigned int audio_buf_size; /* in bytes */
138
    int audio_buf_index; /* in bytes */
139
    AVPacket audio_pkt_temp;
140
    AVPacket audio_pkt;
141
    enum SampleFormat audio_src_fmt;
142
    AVAudioConvert *reformat_ctx;
143

    
144
    int show_audio; /* if true, display audio samples */
145
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
146
    int sample_array_index;
147
    int last_i_start;
148

    
149
    SDL_Thread *subtitle_tid;
150
    int subtitle_stream;
151
    int subtitle_stream_changed;
152
    AVStream *subtitle_st;
153
    PacketQueue subtitleq;
154
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
155
    int subpq_size, subpq_rindex, subpq_windex;
156
    SDL_mutex *subpq_mutex;
157
    SDL_cond *subpq_cond;
158

    
159
    double frame_timer;
160
    double frame_last_pts;
161
    double frame_last_delay;
162
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
163
    int video_stream;
164
    AVStream *video_st;
165
    PacketQueue videoq;
166
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
167
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
168
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
169
    int pictq_size, pictq_rindex, pictq_windex;
170
    SDL_mutex *pictq_mutex;
171
    SDL_cond *pictq_cond;
172
    struct SwsContext *img_convert_ctx;
173

    
174
    //    QETimer *video_timer;
175
    char filename[1024];
176
    int width, height, xleft, ytop;
177
} VideoState;
178

    
179
static void show_help(void);
180
static int audio_write_get_buf_size(VideoState *is);
181

    
182
/* options specified by the user */
183
static AVInputFormat *file_iformat;
184
static const char *input_filename;
185
static int fs_screen_width;
186
static int fs_screen_height;
187
static int screen_width = 0;
188
static int screen_height = 0;
189
static int frame_width = 0;
190
static int frame_height = 0;
191
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
192
static int audio_disable;
193
static int video_disable;
194
static int wanted_audio_stream= 0;
195
static int wanted_video_stream= 0;
196
static int wanted_subtitle_stream= -1;
197
static int seek_by_bytes;
198
static int display_disable;
199
static int show_status;
200
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
201
static int64_t start_time = AV_NOPTS_VALUE;
202
static int debug = 0;
203
static int debug_mv = 0;
204
static int step = 0;
205
static int thread_count = 1;
206
static int workaround_bugs = 1;
207
static int fast = 0;
208
static int genpts = 0;
209
static int lowres = 0;
210
static int idct = FF_IDCT_AUTO;
211
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
212
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
213
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
214
static int error_recognition = FF_ER_CAREFUL;
215
static int error_concealment = 3;
216
static int decoder_reorder_pts= 0;
217

    
218
/* current context */
219
static int is_full_screen;
220
static VideoState *cur_stream;
221
static int64_t audio_callback_time;
222

    
223
static AVPacket flush_pkt;
224

    
225
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
226
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
227
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
228

    
229
static SDL_Surface *screen;
230

    
231
/* packet queue handling */
232
static void packet_queue_init(PacketQueue *q)
233
{
234
    memset(q, 0, sizeof(PacketQueue));
235
    q->mutex = SDL_CreateMutex();
236
    q->cond = SDL_CreateCond();
237
}
238

    
239
static void packet_queue_flush(PacketQueue *q)
240
{
241
    AVPacketList *pkt, *pkt1;
242

    
243
    SDL_LockMutex(q->mutex);
244
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
245
        pkt1 = pkt->next;
246
        av_free_packet(&pkt->pkt);
247
        av_freep(&pkt);
248
    }
249
    q->last_pkt = NULL;
250
    q->first_pkt = NULL;
251
    q->nb_packets = 0;
252
    q->size = 0;
253
    SDL_UnlockMutex(q->mutex);
254
}
255

    
256
static void packet_queue_end(PacketQueue *q)
257
{
258
    packet_queue_flush(q);
259
    SDL_DestroyMutex(q->mutex);
260
    SDL_DestroyCond(q->cond);
261
}
262

    
263
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
264
{
265
    AVPacketList *pkt1;
266

    
267
    /* duplicate the packet */
268
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
269
        return -1;
270

    
271
    pkt1 = av_malloc(sizeof(AVPacketList));
272
    if (!pkt1)
273
        return -1;
274
    pkt1->pkt = *pkt;
275
    pkt1->next = NULL;
276

    
277

    
278
    SDL_LockMutex(q->mutex);
279

    
280
    if (!q->last_pkt)
281

    
282
        q->first_pkt = pkt1;
283
    else
284
        q->last_pkt->next = pkt1;
285
    q->last_pkt = pkt1;
286
    q->nb_packets++;
287
    q->size += pkt1->pkt.size + sizeof(*pkt1);
288
    /* XXX: should duplicate packet data in DV case */
289
    SDL_CondSignal(q->cond);
290

    
291
    SDL_UnlockMutex(q->mutex);
292
    return 0;
293
}
294

    
295
static void packet_queue_abort(PacketQueue *q)
296
{
297
    SDL_LockMutex(q->mutex);
298

    
299
    q->abort_request = 1;
300

    
301
    SDL_CondSignal(q->cond);
302

    
303
    SDL_UnlockMutex(q->mutex);
304
}
305

    
306
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
307
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
308
{
309
    AVPacketList *pkt1;
310
    int ret;
311

    
312
    SDL_LockMutex(q->mutex);
313

    
314
    for(;;) {
315
        if (q->abort_request) {
316
            ret = -1;
317
            break;
318
        }
319

    
320
        pkt1 = q->first_pkt;
321
        if (pkt1) {
322
            q->first_pkt = pkt1->next;
323
            if (!q->first_pkt)
324
                q->last_pkt = NULL;
325
            q->nb_packets--;
326
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
327
            *pkt = pkt1->pkt;
328
            av_free(pkt1);
329
            ret = 1;
330
            break;
331
        } else if (!block) {
332
            ret = 0;
333
            break;
334
        } else {
335
            SDL_CondWait(q->cond, q->mutex);
336
        }
337
    }
338
    SDL_UnlockMutex(q->mutex);
339
    return ret;
340
}
341

    
342
static inline void fill_rectangle(SDL_Surface *screen,
343
                                  int x, int y, int w, int h, int color)
344
{
345
    SDL_Rect rect;
346
    rect.x = x;
347
    rect.y = y;
348
    rect.w = w;
349
    rect.h = h;
350
    SDL_FillRect(screen, &rect, color);
351
}
352

    
353
#if 0
354
/* draw only the border of a rectangle */
355
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
356
{
357
    int w1, w2, h1, h2;
358

359
    /* fill the background */
360
    w1 = x;
361
    if (w1 < 0)
362
        w1 = 0;
363
    w2 = s->width - (x + w);
364
    if (w2 < 0)
365
        w2 = 0;
366
    h1 = y;
367
    if (h1 < 0)
368
        h1 = 0;
369
    h2 = s->height - (y + h);
370
    if (h2 < 0)
371
        h2 = 0;
372
    fill_rectangle(screen,
373
                   s->xleft, s->ytop,
374
                   w1, s->height,
375
                   color);
376
    fill_rectangle(screen,
377
                   s->xleft + s->width - w2, s->ytop,
378
                   w2, s->height,
379
                   color);
380
    fill_rectangle(screen,
381
                   s->xleft + w1, s->ytop,
382
                   s->width - w1 - w2, h1,
383
                   color);
384
    fill_rectangle(screen,
385
                   s->xleft + w1, s->ytop + s->height - h2,
386
                   s->width - w1 - w2, h2,
387
                   color);
388
}
389
#endif
390

    
391
#define ALPHA_BLEND(a, oldp, newp, s)\
392
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
393

    
394
#define RGBA_IN(r, g, b, a, s)\
395
{\
396
    unsigned int v = ((const uint32_t *)(s))[0];\
397
    a = (v >> 24) & 0xff;\
398
    r = (v >> 16) & 0xff;\
399
    g = (v >> 8) & 0xff;\
400
    b = v & 0xff;\
401
}
402

    
403
#define YUVA_IN(y, u, v, a, s, pal)\
404
{\
405
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
406
    a = (val >> 24) & 0xff;\
407
    y = (val >> 16) & 0xff;\
408
    u = (val >> 8) & 0xff;\
409
    v = val & 0xff;\
410
}
411

    
412
#define YUVA_OUT(d, y, u, v, a)\
413
{\
414
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
415
}
416

    
417

    
418
#define BPP 1
419

    
420
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
421
{
422
    int wrap, wrap3, width2, skip2;
423
    int y, u, v, a, u1, v1, a1, w, h;
424
    uint8_t *lum, *cb, *cr;
425
    const uint8_t *p;
426
    const uint32_t *pal;
427
    int dstx, dsty, dstw, dsth;
428

    
429
    dstw = av_clip(rect->w, 0, imgw);
430
    dsth = av_clip(rect->h, 0, imgh);
431
    dstx = av_clip(rect->x, 0, imgw - dstw);
432
    dsty = av_clip(rect->y, 0, imgh - dsth);
433
    lum = dst->data[0] + dsty * dst->linesize[0];
434
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
435
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
436

    
437
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
438
    skip2 = dstx >> 1;
439
    wrap = dst->linesize[0];
440
    wrap3 = rect->pict.linesize[0];
441
    p = rect->pict.data[0];
442
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
443

    
444
    if (dsty & 1) {
445
        lum += dstx;
446
        cb += skip2;
447
        cr += skip2;
448

    
449
        if (dstx & 1) {
450
            YUVA_IN(y, u, v, a, p, pal);
451
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
452
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
453
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
454
            cb++;
455
            cr++;
456
            lum++;
457
            p += BPP;
458
        }
459
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
460
            YUVA_IN(y, u, v, a, p, pal);
461
            u1 = u;
462
            v1 = v;
463
            a1 = a;
464
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465

    
466
            YUVA_IN(y, u, v, a, p + BPP, pal);
467
            u1 += u;
468
            v1 += v;
469
            a1 += a;
470
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
471
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
472
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
473
            cb++;
474
            cr++;
475
            p += 2 * BPP;
476
            lum += 2;
477
        }
478
        if (w) {
479
            YUVA_IN(y, u, v, a, p, pal);
480
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
482
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
483
            p++;
484
            lum++;
485
        }
486
        p += wrap3 - dstw * BPP;
487
        lum += wrap - dstw - dstx;
488
        cb += dst->linesize[1] - width2 - skip2;
489
        cr += dst->linesize[2] - width2 - skip2;
490
    }
491
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
492
        lum += dstx;
493
        cb += skip2;
494
        cr += skip2;
495

    
496
        if (dstx & 1) {
497
            YUVA_IN(y, u, v, a, p, pal);
498
            u1 = u;
499
            v1 = v;
500
            a1 = a;
501
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502
            p += wrap3;
503
            lum += wrap;
504
            YUVA_IN(y, u, v, a, p, pal);
505
            u1 += u;
506
            v1 += v;
507
            a1 += a;
508
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
510
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
511
            cb++;
512
            cr++;
513
            p += -wrap3 + BPP;
514
            lum += -wrap + 1;
515
        }
516
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517
            YUVA_IN(y, u, v, a, p, pal);
518
            u1 = u;
519
            v1 = v;
520
            a1 = a;
521
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522

    
523
            YUVA_IN(y, u, v, a, p + BPP, pal);
524
            u1 += u;
525
            v1 += v;
526
            a1 += a;
527
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528
            p += wrap3;
529
            lum += wrap;
530

    
531
            YUVA_IN(y, u, v, a, p, pal);
532
            u1 += u;
533
            v1 += v;
534
            a1 += a;
535
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536

    
537
            YUVA_IN(y, u, v, a, p + BPP, pal);
538
            u1 += u;
539
            v1 += v;
540
            a1 += a;
541
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542

    
543
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
544
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
545

    
546
            cb++;
547
            cr++;
548
            p += -wrap3 + 2 * BPP;
549
            lum += -wrap + 2;
550
        }
551
        if (w) {
552
            YUVA_IN(y, u, v, a, p, pal);
553
            u1 = u;
554
            v1 = v;
555
            a1 = a;
556
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
            p += wrap3;
558
            lum += wrap;
559
            YUVA_IN(y, u, v, a, p, pal);
560
            u1 += u;
561
            v1 += v;
562
            a1 += a;
563
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566
            cb++;
567
            cr++;
568
            p += -wrap3 + BPP;
569
            lum += -wrap + 1;
570
        }
571
        p += wrap3 + (wrap3 - dstw * BPP);
572
        lum += wrap + (wrap - dstw - dstx);
573
        cb += dst->linesize[1] - width2 - skip2;
574
        cr += dst->linesize[2] - width2 - skip2;
575
    }
576
    /* handle odd height */
577
    if (h) {
578
        lum += dstx;
579
        cb += skip2;
580
        cr += skip2;
581

    
582
        if (dstx & 1) {
583
            YUVA_IN(y, u, v, a, p, pal);
584
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
586
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
587
            cb++;
588
            cr++;
589
            lum++;
590
            p += BPP;
591
        }
592
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
593
            YUVA_IN(y, u, v, a, p, pal);
594
            u1 = u;
595
            v1 = v;
596
            a1 = a;
597
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598

    
599
            YUVA_IN(y, u, v, a, p + BPP, pal);
600
            u1 += u;
601
            v1 += v;
602
            a1 += a;
603
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
605
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
606
            cb++;
607
            cr++;
608
            p += 2 * BPP;
609
            lum += 2;
610
        }
611
        if (w) {
612
            YUVA_IN(y, u, v, a, p, pal);
613
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
615
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
616
        }
617
    }
618
}
619

    
620
static void free_subpicture(SubPicture *sp)
621
{
622
    int i;
623

    
624
    for (i = 0; i < sp->sub.num_rects; i++)
625
    {
626
        av_freep(&sp->sub.rects[i]->pict.data[0]);
627
        av_freep(&sp->sub.rects[i]->pict.data[1]);
628
        av_freep(&sp->sub.rects[i]);
629
    }
630

    
631
    av_free(sp->sub.rects);
632

    
633
    memset(&sp->sub, 0, sizeof(AVSubtitle));
634
}
635

    
636
static void video_image_display(VideoState *is)
637
{
638
    VideoPicture *vp;
639
    SubPicture *sp;
640
    AVPicture pict;
641
    float aspect_ratio;
642
    int width, height, x, y;
643
    SDL_Rect rect;
644
    int i;
645

    
646
    vp = &is->pictq[is->pictq_rindex];
647
    if (vp->bmp) {
648
        /* XXX: use variable in the frame */
649
        if (is->video_st->sample_aspect_ratio.num)
650
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
651
        else if (is->video_st->codec->sample_aspect_ratio.num)
652
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
653
        else
654
            aspect_ratio = 0;
655
        if (aspect_ratio <= 0.0)
656
            aspect_ratio = 1.0;
657
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
658
        /* if an active format is indicated, then it overrides the
659
           mpeg format */
660
#if 0
661
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
662
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
663
            printf("dtg_active_format=%d\n", is->dtg_active_format);
664
        }
665
#endif
666
#if 0
667
        switch(is->video_st->codec->dtg_active_format) {
668
        case FF_DTG_AFD_SAME:
669
        default:
670
            /* nothing to do */
671
            break;
672
        case FF_DTG_AFD_4_3:
673
            aspect_ratio = 4.0 / 3.0;
674
            break;
675
        case FF_DTG_AFD_16_9:
676
            aspect_ratio = 16.0 / 9.0;
677
            break;
678
        case FF_DTG_AFD_14_9:
679
            aspect_ratio = 14.0 / 9.0;
680
            break;
681
        case FF_DTG_AFD_4_3_SP_14_9:
682
            aspect_ratio = 14.0 / 9.0;
683
            break;
684
        case FF_DTG_AFD_16_9_SP_14_9:
685
            aspect_ratio = 14.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_SP_4_3:
688
            aspect_ratio = 4.0 / 3.0;
689
            break;
690
        }
691
#endif
692

    
693
        if (is->subtitle_st)
694
        {
695
            if (is->subpq_size > 0)
696
            {
697
                sp = &is->subpq[is->subpq_rindex];
698

    
699
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
700
                {
701
                    SDL_LockYUVOverlay (vp->bmp);
702

    
703
                    pict.data[0] = vp->bmp->pixels[0];
704
                    pict.data[1] = vp->bmp->pixels[2];
705
                    pict.data[2] = vp->bmp->pixels[1];
706

    
707
                    pict.linesize[0] = vp->bmp->pitches[0];
708
                    pict.linesize[1] = vp->bmp->pitches[2];
709
                    pict.linesize[2] = vp->bmp->pitches[1];
710

    
711
                    for (i = 0; i < sp->sub.num_rects; i++)
712
                        blend_subrect(&pict, sp->sub.rects[i],
713
                                      vp->bmp->w, vp->bmp->h);
714

    
715
                    SDL_UnlockYUVOverlay (vp->bmp);
716
                }
717
            }
718
        }
719

    
720

    
721
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
722
        height = is->height;
723
        width = ((int)rint(height * aspect_ratio)) & ~1;
724
        if (width > is->width) {
725
            width = is->width;
726
            height = ((int)rint(width / aspect_ratio)) & ~1;
727
        }
728
        x = (is->width - width) / 2;
729
        y = (is->height - height) / 2;
730
        if (!is->no_background) {
731
            /* fill the background */
732
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
733
        } else {
734
            is->no_background = 0;
735
        }
736
        rect.x = is->xleft + x;
737
        rect.y = is->ytop  + y;
738
        rect.w = width;
739
        rect.h = height;
740
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
741
    } else {
742
#if 0
743
        fill_rectangle(screen,
744
                       is->xleft, is->ytop, is->width, is->height,
745
                       QERGB(0x00, 0x00, 0x00));
746
#endif
747
    }
748
}
749

    
750
static inline int compute_mod(int a, int b)
751
{
752
    a = a % b;
753
    if (a >= 0)
754
        return a;
755
    else
756
        return a + b;
757
}
758

    
759
static void video_audio_display(VideoState *s)
760
{
761
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
762
    int ch, channels, h, h2, bgcolor, fgcolor;
763
    int16_t time_diff;
764

    
765
    /* compute display index : center on currently output samples */
766
    channels = s->audio_st->codec->channels;
767
    nb_display_channels = channels;
768
    if (!s->paused) {
769
        n = 2 * channels;
770
        delay = audio_write_get_buf_size(s);
771
        delay /= n;
772

    
773
        /* to be more precise, we take into account the time spent since
774
           the last buffer computation */
775
        if (audio_callback_time) {
776
            time_diff = av_gettime() - audio_callback_time;
777
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
778
        }
779

    
780
        delay -= s->width / 2;
781
        if (delay < s->width)
782
            delay = s->width;
783

    
784
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
785

    
786
        h= INT_MIN;
787
        for(i=0; i<1000; i+=channels){
788
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
789
            int a= s->sample_array[idx];
790
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
791
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
792
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
793
            int score= a-d;
794
            if(h<score && (b^c)<0){
795
                h= score;
796
                i_start= idx;
797
            }
798
        }
799

    
800
        s->last_i_start = i_start;
801
    } else {
802
        i_start = s->last_i_start;
803
    }
804

    
805
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
806
    fill_rectangle(screen,
807
                   s->xleft, s->ytop, s->width, s->height,
808
                   bgcolor);
809

    
810
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
811

    
812
    /* total height for one channel */
813
    h = s->height / nb_display_channels;
814
    /* graph height / 2 */
815
    h2 = (h * 9) / 20;
816
    for(ch = 0;ch < nb_display_channels; ch++) {
817
        i = i_start + ch;
818
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
819
        for(x = 0; x < s->width; x++) {
820
            y = (s->sample_array[i] * h2) >> 15;
821
            if (y < 0) {
822
                y = -y;
823
                ys = y1 - y;
824
            } else {
825
                ys = y1;
826
            }
827
            fill_rectangle(screen,
828
                           s->xleft + x, ys, 1, y,
829
                           fgcolor);
830
            i += channels;
831
            if (i >= SAMPLE_ARRAY_SIZE)
832
                i -= SAMPLE_ARRAY_SIZE;
833
        }
834
    }
835

    
836
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
837

    
838
    for(ch = 1;ch < nb_display_channels; ch++) {
839
        y = s->ytop + ch * h;
840
        fill_rectangle(screen,
841
                       s->xleft, y, s->width, 1,
842
                       fgcolor);
843
    }
844
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
845
}
846

    
847
static int video_open(VideoState *is){
848
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
849
    int w,h;
850

    
851
    if(is_full_screen) flags |= SDL_FULLSCREEN;
852
    else               flags |= SDL_RESIZABLE;
853

    
854
    if (is_full_screen && fs_screen_width) {
855
        w = fs_screen_width;
856
        h = fs_screen_height;
857
    } else if(!is_full_screen && screen_width){
858
        w = screen_width;
859
        h = screen_height;
860
    }else if (is->video_st && is->video_st->codec->width){
861
        w = is->video_st->codec->width;
862
        h = is->video_st->codec->height;
863
    } else {
864
        w = 640;
865
        h = 480;
866
    }
867
#ifndef __APPLE__
868
    screen = SDL_SetVideoMode(w, h, 0, flags);
869
#else
870
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
871
    screen = SDL_SetVideoMode(w, h, 24, flags);
872
#endif
873
    if (!screen) {
874
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
875
        return -1;
876
    }
877
    SDL_WM_SetCaption("FFplay", "FFplay");
878

    
879
    is->width = screen->w;
880
    is->height = screen->h;
881

    
882
    return 0;
883
}
884

    
885
/* display the current picture, if any */
886
static void video_display(VideoState *is)
887
{
888
    if(!screen)
889
        video_open(cur_stream);
890
    if (is->audio_st && is->show_audio)
891
        video_audio_display(is);
892
    else if (is->video_st)
893
        video_image_display(is);
894
}
895

    
896
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
897
{
898
    SDL_Event event;
899
    event.type = FF_REFRESH_EVENT;
900
    event.user.data1 = opaque;
901
    SDL_PushEvent(&event);
902
    return 0; /* 0 means stop timer */
903
}
904

    
905
/* schedule a video refresh in 'delay' ms */
906
static void schedule_refresh(VideoState *is, int delay)
907
{
908
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
909
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
910
}
911

    
912
/* get the current audio clock value */
913
static double get_audio_clock(VideoState *is)
914
{
915
    double pts;
916
    int hw_buf_size, bytes_per_sec;
917
    pts = is->audio_clock;
918
    hw_buf_size = audio_write_get_buf_size(is);
919
    bytes_per_sec = 0;
920
    if (is->audio_st) {
921
        bytes_per_sec = is->audio_st->codec->sample_rate *
922
            2 * is->audio_st->codec->channels;
923
    }
924
    if (bytes_per_sec)
925
        pts -= (double)hw_buf_size / bytes_per_sec;
926
    return pts;
927
}
928

    
929
/* get the current video clock value */
930
static double get_video_clock(VideoState *is)
931
{
932
    double delta;
933
    if (is->paused) {
934
        delta = 0;
935
    } else {
936
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
937
    }
938
    return is->video_current_pts + delta;
939
}
940

    
941
/* get the current external clock value */
942
static double get_external_clock(VideoState *is)
943
{
944
    int64_t ti;
945
    ti = av_gettime();
946
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
947
}
948

    
949
/* get the current master clock value */
950
static double get_master_clock(VideoState *is)
951
{
952
    double val;
953

    
954
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
955
        if (is->video_st)
956
            val = get_video_clock(is);
957
        else
958
            val = get_audio_clock(is);
959
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
960
        if (is->audio_st)
961
            val = get_audio_clock(is);
962
        else
963
            val = get_video_clock(is);
964
    } else {
965
        val = get_external_clock(is);
966
    }
967
    return val;
968
}
969

    
970
/* seek in the stream */
971
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
972
{
973
    if (!is->seek_req) {
974
        is->seek_pos = pos;
975
        is->seek_rel = rel;
976
        if (seek_by_bytes)
977
            is->seek_flags |= AVSEEK_FLAG_BYTE;
978
        is->seek_req = 1;
979
    }
980
}
981

    
982
/* pause or resume the video */
983
static void stream_pause(VideoState *is)
984
{
985
    is->paused = !is->paused;
986
    if (!is->paused) {
987
        is->video_current_pts = get_video_clock(is);
988
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
989
    }
990
}
991

    
992
static double compute_frame_delay(double frame_current_pts, VideoState *is)
993
{
994
    double actual_delay, delay, sync_threshold, ref_clock, diff;
995

    
996
    /* compute nominal delay */
997
    delay = frame_current_pts - is->frame_last_pts;
998
    if (delay <= 0 || delay >= 10.0) {
999
        /* if incorrect delay, use previous one */
1000
        delay = is->frame_last_delay;
1001
    } else {
1002
        is->frame_last_delay = delay;
1003
    }
1004
    is->frame_last_pts = frame_current_pts;
1005

    
1006
    /* update delay to follow master synchronisation source */
1007
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1008
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1009
        /* if video is slave, we try to correct big delays by
1010
           duplicating or deleting a frame */
1011
        ref_clock = get_master_clock(is);
1012
        diff = frame_current_pts - ref_clock;
1013

    
1014
        /* skip or repeat frame. We take into account the
1015
           delay to compute the threshold. I still don't know
1016
           if it is the best guess */
1017
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1018
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1019
            if (diff <= -sync_threshold)
1020
                delay = 0;
1021
            else if (diff >= sync_threshold)
1022
                delay = 2 * delay;
1023
        }
1024
    }
1025

    
1026
    is->frame_timer += delay;
1027
    /* compute the REAL delay (we need to do that to avoid
1028
       long term errors */
1029
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1030
    if (actual_delay < 0.010) {
1031
        /* XXX: should skip picture */
1032
        actual_delay = 0.010;
1033
    }
1034

    
1035
#if defined(DEBUG_SYNC)
1036
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1037
            delay, actual_delay, frame_current_pts, -diff);
1038
#endif
1039

    
1040
    return actual_delay;
1041
}
1042

    
1043
/* called to display each frame */
1044
static void video_refresh_timer(void *opaque)
1045
{
1046
    VideoState *is = opaque;
1047
    VideoPicture *vp;
1048

    
1049
    SubPicture *sp, *sp2;
1050

    
1051
    if (is->video_st) {
1052
        if (is->pictq_size == 0) {
1053
            /* if no picture, need to wait */
1054
            schedule_refresh(is, 1);
1055
        } else {
1056
            /* dequeue the picture */
1057
            vp = &is->pictq[is->pictq_rindex];
1058

    
1059
            /* update current video pts */
1060
            is->video_current_pts = vp->pts;
1061
            is->video_current_pts_time = av_gettime();
1062

    
1063
            /* launch timer for next picture */
1064
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1065

    
1066
            if(is->subtitle_st) {
1067
                if (is->subtitle_stream_changed) {
1068
                    SDL_LockMutex(is->subpq_mutex);
1069

    
1070
                    while (is->subpq_size) {
1071
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1072

    
1073
                        /* update queue size and signal for next picture */
1074
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1075
                            is->subpq_rindex = 0;
1076

    
1077
                        is->subpq_size--;
1078
                    }
1079
                    is->subtitle_stream_changed = 0;
1080

    
1081
                    SDL_CondSignal(is->subpq_cond);
1082
                    SDL_UnlockMutex(is->subpq_mutex);
1083
                } else {
1084
                    if (is->subpq_size > 0) {
1085
                        sp = &is->subpq[is->subpq_rindex];
1086

    
1087
                        if (is->subpq_size > 1)
1088
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1089
                        else
1090
                            sp2 = NULL;
1091

    
1092
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1093
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1094
                        {
1095
                            free_subpicture(sp);
1096

    
1097
                            /* update queue size and signal for next picture */
1098
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1099
                                is->subpq_rindex = 0;
1100

    
1101
                            SDL_LockMutex(is->subpq_mutex);
1102
                            is->subpq_size--;
1103
                            SDL_CondSignal(is->subpq_cond);
1104
                            SDL_UnlockMutex(is->subpq_mutex);
1105
                        }
1106
                    }
1107
                }
1108
            }
1109

    
1110
            /* display picture */
1111
            video_display(is);
1112

    
1113
            /* update queue size and signal for next picture */
1114
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1115
                is->pictq_rindex = 0;
1116

    
1117
            SDL_LockMutex(is->pictq_mutex);
1118
            is->pictq_size--;
1119
            SDL_CondSignal(is->pictq_cond);
1120
            SDL_UnlockMutex(is->pictq_mutex);
1121
        }
1122
    } else if (is->audio_st) {
1123
        /* draw the next audio frame */
1124

    
1125
        schedule_refresh(is, 40);
1126

    
1127
        /* if only audio stream, then display the audio bars (better
1128
           than nothing, just to test the implementation */
1129

    
1130
        /* display picture */
1131
        video_display(is);
1132
    } else {
1133
        schedule_refresh(is, 100);
1134
    }
1135
    if (show_status) {
1136
        static int64_t last_time;
1137
        int64_t cur_time;
1138
        int aqsize, vqsize, sqsize;
1139
        double av_diff;
1140

    
1141
        cur_time = av_gettime();
1142
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1143
            aqsize = 0;
1144
            vqsize = 0;
1145
            sqsize = 0;
1146
            if (is->audio_st)
1147
                aqsize = is->audioq.size;
1148
            if (is->video_st)
1149
                vqsize = is->videoq.size;
1150
            if (is->subtitle_st)
1151
                sqsize = is->subtitleq.size;
1152
            av_diff = 0;
1153
            if (is->audio_st && is->video_st)
1154
                av_diff = get_audio_clock(is) - get_video_clock(is);
1155
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1156
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1157
            fflush(stdout);
1158
            last_time = cur_time;
1159
        }
1160
    }
1161
}
1162

    
1163
/* allocate a picture (needs to do that in main thread to avoid
1164
   potential locking problems */
1165
static void alloc_picture(void *opaque)
1166
{
1167
    VideoState *is = opaque;
1168
    VideoPicture *vp;
1169

    
1170
    vp = &is->pictq[is->pictq_windex];
1171

    
1172
    if (vp->bmp)
1173
        SDL_FreeYUVOverlay(vp->bmp);
1174

    
1175
#if 0
1176
    /* XXX: use generic function */
1177
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1178
    switch(is->video_st->codec->pix_fmt) {
1179
    case PIX_FMT_YUV420P:
1180
    case PIX_FMT_YUV422P:
1181
    case PIX_FMT_YUV444P:
1182
    case PIX_FMT_YUYV422:
1183
    case PIX_FMT_YUV410P:
1184
    case PIX_FMT_YUV411P:
1185
        is_yuv = 1;
1186
        break;
1187
    default:
1188
        is_yuv = 0;
1189
        break;
1190
    }
1191
#endif
1192
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1193
                                   is->video_st->codec->height,
1194
                                   SDL_YV12_OVERLAY,
1195
                                   screen);
1196
    vp->width = is->video_st->codec->width;
1197
    vp->height = is->video_st->codec->height;
1198

    
1199
    SDL_LockMutex(is->pictq_mutex);
1200
    vp->allocated = 1;
1201
    SDL_CondSignal(is->pictq_cond);
1202
    SDL_UnlockMutex(is->pictq_mutex);
1203
}
1204

    
1205
/**
1206
 *
1207
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1208
 */
1209
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1210
{
1211
    VideoPicture *vp;
1212
    int dst_pix_fmt;
1213

    
1214
    /* wait until we have space to put a new picture */
1215
    SDL_LockMutex(is->pictq_mutex);
1216
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1217
           !is->videoq.abort_request) {
1218
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1219
    }
1220
    SDL_UnlockMutex(is->pictq_mutex);
1221

    
1222
    if (is->videoq.abort_request)
1223
        return -1;
1224

    
1225
    vp = &is->pictq[is->pictq_windex];
1226

    
1227
    /* alloc or resize hardware picture buffer */
1228
    if (!vp->bmp ||
1229
        vp->width != is->video_st->codec->width ||
1230
        vp->height != is->video_st->codec->height) {
1231
        SDL_Event event;
1232

    
1233
        vp->allocated = 0;
1234

    
1235
        /* the allocation must be done in the main thread to avoid
1236
           locking problems */
1237
        event.type = FF_ALLOC_EVENT;
1238
        event.user.data1 = is;
1239
        SDL_PushEvent(&event);
1240

    
1241
        /* wait until the picture is allocated */
1242
        SDL_LockMutex(is->pictq_mutex);
1243
        while (!vp->allocated && !is->videoq.abort_request) {
1244
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1245
        }
1246
        SDL_UnlockMutex(is->pictq_mutex);
1247

    
1248
        if (is->videoq.abort_request)
1249
            return -1;
1250
    }
1251

    
1252
    /* if the frame is not skipped, then display it */
1253
    if (vp->bmp) {
1254
        AVPicture pict;
1255

    
1256
        /* get a pointer on the bitmap */
1257
        SDL_LockYUVOverlay (vp->bmp);
1258

    
1259
        dst_pix_fmt = PIX_FMT_YUV420P;
1260
        memset(&pict,0,sizeof(AVPicture));
1261
        pict.data[0] = vp->bmp->pixels[0];
1262
        pict.data[1] = vp->bmp->pixels[2];
1263
        pict.data[2] = vp->bmp->pixels[1];
1264

    
1265
        pict.linesize[0] = vp->bmp->pitches[0];
1266
        pict.linesize[1] = vp->bmp->pitches[2];
1267
        pict.linesize[2] = vp->bmp->pitches[1];
1268
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1269
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1270
            is->video_st->codec->width, is->video_st->codec->height,
1271
            is->video_st->codec->pix_fmt,
1272
            is->video_st->codec->width, is->video_st->codec->height,
1273
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1274
        if (is->img_convert_ctx == NULL) {
1275
            fprintf(stderr, "Cannot initialize the conversion context\n");
1276
            exit(1);
1277
        }
1278
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1279
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1280
        /* update the bitmap content */
1281
        SDL_UnlockYUVOverlay(vp->bmp);
1282

    
1283
        vp->pts = pts;
1284

    
1285
        /* now we can update the picture count */
1286
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1287
            is->pictq_windex = 0;
1288
        SDL_LockMutex(is->pictq_mutex);
1289
        is->pictq_size++;
1290
        SDL_UnlockMutex(is->pictq_mutex);
1291
    }
1292
    return 0;
1293
}
1294

    
1295
/**
1296
 * compute the exact PTS for the picture if it is omitted in the stream
1297
 * @param pts1 the dts of the pkt / pts of the frame
1298
 */
1299
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1300
{
1301
    double frame_delay, pts;
1302

    
1303
    pts = pts1;
1304

    
1305
    if (pts != 0) {
1306
        /* update video clock with pts, if present */
1307
        is->video_clock = pts;
1308
    } else {
1309
        pts = is->video_clock;
1310
    }
1311
    /* update video clock for next frame */
1312
    frame_delay = av_q2d(is->video_st->codec->time_base);
1313
    /* for MPEG2, the frame can be repeated, so we update the
1314
       clock accordingly */
1315
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1316
    is->video_clock += frame_delay;
1317

    
1318
#if defined(DEBUG_SYNC) && 0
1319
    {
1320
        int ftype;
1321
        if (src_frame->pict_type == FF_B_TYPE)
1322
            ftype = 'B';
1323
        else if (src_frame->pict_type == FF_I_TYPE)
1324
            ftype = 'I';
1325
        else
1326
            ftype = 'P';
1327
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1328
               ftype, pts, pts1);
1329
    }
1330
#endif
1331
    return queue_picture(is, src_frame, pts);
1332
}
1333

    
1334
static int video_thread(void *arg)
1335
{
1336
    VideoState *is = arg;
1337
    AVPacket pkt1, *pkt = &pkt1;
1338
    int len1, got_picture;
1339
    AVFrame *frame= avcodec_alloc_frame();
1340
    double pts;
1341

    
1342
    for(;;) {
1343
        while (is->paused && !is->videoq.abort_request) {
1344
            SDL_Delay(10);
1345
        }
1346
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1347
            break;
1348

    
1349
        if(pkt->data == flush_pkt.data){
1350
            avcodec_flush_buffers(is->video_st->codec);
1351
            continue;
1352
        }
1353

    
1354
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1355
           this packet, if any */
1356
        is->video_st->codec->reordered_opaque= pkt->pts;
1357
        len1 = avcodec_decode_video2(is->video_st->codec,
1358
                                    frame, &got_picture,
1359
                                    pkt);
1360

    
1361
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1362
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1363
            pts= frame->reordered_opaque;
1364
        else if(pkt->dts != AV_NOPTS_VALUE)
1365
            pts= pkt->dts;
1366
        else
1367
            pts= 0;
1368
        pts *= av_q2d(is->video_st->time_base);
1369

    
1370
//            if (len1 < 0)
1371
//                break;
1372
        if (got_picture) {
1373
            if (output_picture2(is, frame, pts) < 0)
1374
                goto the_end;
1375
        }
1376
        av_free_packet(pkt);
1377
        if (step)
1378
            if (cur_stream)
1379
                stream_pause(cur_stream);
1380
    }
1381
 the_end:
1382
    av_free(frame);
1383
    return 0;
1384
}
1385

    
1386
static int subtitle_thread(void *arg)
1387
{
1388
    VideoState *is = arg;
1389
    SubPicture *sp;
1390
    AVPacket pkt1, *pkt = &pkt1;
1391
    int len1, got_subtitle;
1392
    double pts;
1393
    int i, j;
1394
    int r, g, b, y, u, v, a;
1395

    
1396
    for(;;) {
1397
        while (is->paused && !is->subtitleq.abort_request) {
1398
            SDL_Delay(10);
1399
        }
1400
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1401
            break;
1402

    
1403
        if(pkt->data == flush_pkt.data){
1404
            avcodec_flush_buffers(is->subtitle_st->codec);
1405
            continue;
1406
        }
1407
        SDL_LockMutex(is->subpq_mutex);
1408
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1409
               !is->subtitleq.abort_request) {
1410
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1411
        }
1412
        SDL_UnlockMutex(is->subpq_mutex);
1413

    
1414
        if (is->subtitleq.abort_request)
1415
            goto the_end;
1416

    
1417
        sp = &is->subpq[is->subpq_windex];
1418

    
1419
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1420
           this packet, if any */
1421
        pts = 0;
1422
        if (pkt->pts != AV_NOPTS_VALUE)
1423
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1424

    
1425
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1426
                                    &sp->sub, &got_subtitle,
1427
                                    pkt);
1428
//            if (len1 < 0)
1429
//                break;
1430
        if (got_subtitle && sp->sub.format == 0) {
1431
            sp->pts = pts;
1432

    
1433
            for (i = 0; i < sp->sub.num_rects; i++)
1434
            {
1435
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1436
                {
1437
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1438
                    y = RGB_TO_Y_CCIR(r, g, b);
1439
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1440
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1441
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1442
                }
1443
            }
1444

    
1445
            /* now we can update the picture count */
1446
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1447
                is->subpq_windex = 0;
1448
            SDL_LockMutex(is->subpq_mutex);
1449
            is->subpq_size++;
1450
            SDL_UnlockMutex(is->subpq_mutex);
1451
        }
1452
        av_free_packet(pkt);
1453
//        if (step)
1454
//            if (cur_stream)
1455
//                stream_pause(cur_stream);
1456
    }
1457
 the_end:
1458
    return 0;
1459
}
1460

    
1461
/* copy samples for viewing in editor window */
1462
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1463
{
1464
    int size, len, channels;
1465

    
1466
    channels = is->audio_st->codec->channels;
1467

    
1468
    size = samples_size / sizeof(short);
1469
    while (size > 0) {
1470
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1471
        if (len > size)
1472
            len = size;
1473
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1474
        samples += len;
1475
        is->sample_array_index += len;
1476
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1477
            is->sample_array_index = 0;
1478
        size -= len;
1479
    }
1480
}
1481

    
1482
/* return the new audio buffer size (samples can be added or deleted
1483
   to get better sync if video or external master clock) */
1484
static int synchronize_audio(VideoState *is, short *samples,
1485
                             int samples_size1, double pts)
1486
{
1487
    int n, samples_size;
1488
    double ref_clock;
1489

    
1490
    n = 2 * is->audio_st->codec->channels;
1491
    samples_size = samples_size1;
1492

    
1493
    /* if not master, then we try to remove or add samples to correct the clock */
1494
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1495
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1496
        double diff, avg_diff;
1497
        int wanted_size, min_size, max_size, nb_samples;
1498

    
1499
        ref_clock = get_master_clock(is);
1500
        diff = get_audio_clock(is) - ref_clock;
1501

    
1502
        if (diff < AV_NOSYNC_THRESHOLD) {
1503
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1504
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1505
                /* not enough measures to have a correct estimate */
1506
                is->audio_diff_avg_count++;
1507
            } else {
1508
                /* estimate the A-V difference */
1509
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1510

    
1511
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1512
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1513
                    nb_samples = samples_size / n;
1514

    
1515
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1516
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1517
                    if (wanted_size < min_size)
1518
                        wanted_size = min_size;
1519
                    else if (wanted_size > max_size)
1520
                        wanted_size = max_size;
1521

    
1522
                    /* add or remove samples to correction the synchro */
1523
                    if (wanted_size < samples_size) {
1524
                        /* remove samples */
1525
                        samples_size = wanted_size;
1526
                    } else if (wanted_size > samples_size) {
1527
                        uint8_t *samples_end, *q;
1528
                        int nb;
1529

    
1530
                        /* add samples */
1531
                        nb = (samples_size - wanted_size);
1532
                        samples_end = (uint8_t *)samples + samples_size - n;
1533
                        q = samples_end + n;
1534
                        while (nb > 0) {
1535
                            memcpy(q, samples_end, n);
1536
                            q += n;
1537
                            nb -= n;
1538
                        }
1539
                        samples_size = wanted_size;
1540
                    }
1541
                }
1542
#if 0
1543
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1544
                       diff, avg_diff, samples_size - samples_size1,
1545
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1546
#endif
1547
            }
1548
        } else {
1549
            /* too big difference : may be initial PTS errors, so
1550
               reset A-V filter */
1551
            is->audio_diff_avg_count = 0;
1552
            is->audio_diff_cum = 0;
1553
        }
1554
    }
1555

    
1556
    return samples_size;
1557
}
1558

    
1559
/* decode one audio frame and returns its uncompressed size */
1560
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1561
{
1562
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1563
    AVPacket *pkt = &is->audio_pkt;
1564
    AVCodecContext *dec= is->audio_st->codec;
1565
    int n, len1, data_size;
1566
    double pts;
1567

    
1568
    for(;;) {
1569
        /* NOTE: the audio packet can contain several frames */
1570
        while (pkt_temp->size > 0) {
1571
            data_size = sizeof(is->audio_buf1);
1572
            len1 = avcodec_decode_audio3(dec,
1573
                                        (int16_t *)is->audio_buf1, &data_size,
1574
                                        pkt_temp);
1575
            if (len1 < 0) {
1576
                /* if error, we skip the frame */
1577
                pkt_temp->size = 0;
1578
                break;
1579
            }
1580

    
1581
            pkt_temp->data += len1;
1582
            pkt_temp->size -= len1;
1583
            if (data_size <= 0)
1584
                continue;
1585

    
1586
            if (dec->sample_fmt != is->audio_src_fmt) {
1587
                if (is->reformat_ctx)
1588
                    av_audio_convert_free(is->reformat_ctx);
1589
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1590
                                                         dec->sample_fmt, 1, NULL, 0);
1591
                if (!is->reformat_ctx) {
1592
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1593
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1594
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1595
                        break;
1596
                }
1597
                is->audio_src_fmt= dec->sample_fmt;
1598
            }
1599

    
1600
            if (is->reformat_ctx) {
1601
                const void *ibuf[6]= {is->audio_buf1};
1602
                void *obuf[6]= {is->audio_buf2};
1603
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1604
                int ostride[6]= {2};
1605
                int len= data_size/istride[0];
1606
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1607
                    printf("av_audio_convert() failed\n");
1608
                    break;
1609
                }
1610
                is->audio_buf= is->audio_buf2;
1611
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1612
                          remove this legacy cruft */
1613
                data_size= len*2;
1614
            }else{
1615
                is->audio_buf= is->audio_buf1;
1616
            }
1617

    
1618
            /* if no pts, then compute it */
1619
            pts = is->audio_clock;
1620
            *pts_ptr = pts;
1621
            n = 2 * dec->channels;
1622
            is->audio_clock += (double)data_size /
1623
                (double)(n * dec->sample_rate);
1624
#if defined(DEBUG_SYNC)
1625
            {
1626
                static double last_clock;
1627
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1628
                       is->audio_clock - last_clock,
1629
                       is->audio_clock, pts);
1630
                last_clock = is->audio_clock;
1631
            }
1632
#endif
1633
            return data_size;
1634
        }
1635

    
1636
        /* free the current packet */
1637
        if (pkt->data)
1638
            av_free_packet(pkt);
1639

    
1640
        if (is->paused || is->audioq.abort_request) {
1641
            return -1;
1642
        }
1643

    
1644
        /* read next packet */
1645
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1646
            return -1;
1647
        if(pkt->data == flush_pkt.data){
1648
            avcodec_flush_buffers(dec);
1649
            continue;
1650
        }
1651

    
1652
        pkt_temp->data = pkt->data;
1653
        pkt_temp->size = pkt->size;
1654

    
1655
        /* if update the audio clock with the pts */
1656
        if (pkt->pts != AV_NOPTS_VALUE) {
1657
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1658
        }
1659
    }
1660
}
1661

    
1662
/* get the current audio output buffer size, in samples. With SDL, we
1663
   cannot have a precise information */
1664
static int audio_write_get_buf_size(VideoState *is)
1665
{
1666
    return is->audio_buf_size - is->audio_buf_index;
1667
}
1668

    
1669

    
1670
/* prepare a new audio buffer */
1671
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1672
{
1673
    VideoState *is = opaque;
1674
    int audio_size, len1;
1675
    double pts;
1676

    
1677
    audio_callback_time = av_gettime();
1678

    
1679
    while (len > 0) {
1680
        if (is->audio_buf_index >= is->audio_buf_size) {
1681
           audio_size = audio_decode_frame(is, &pts);
1682
           if (audio_size < 0) {
1683
                /* if error, just output silence */
1684
               is->audio_buf = is->audio_buf1;
1685
               is->audio_buf_size = 1024;
1686
               memset(is->audio_buf, 0, is->audio_buf_size);
1687
           } else {
1688
               if (is->show_audio)
1689
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1690
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1691
                                              pts);
1692
               is->audio_buf_size = audio_size;
1693
           }
1694
           is->audio_buf_index = 0;
1695
        }
1696
        len1 = is->audio_buf_size - is->audio_buf_index;
1697
        if (len1 > len)
1698
            len1 = len;
1699
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1700
        len -= len1;
1701
        stream += len1;
1702
        is->audio_buf_index += len1;
1703
    }
1704
}
1705

    
1706
/* open a given stream. Return 0 if OK */
1707
static int stream_component_open(VideoState *is, int stream_index)
1708
{
1709
    AVFormatContext *ic = is->ic;
1710
    AVCodecContext *enc;
1711
    AVCodec *codec;
1712
    SDL_AudioSpec wanted_spec, spec;
1713

    
1714
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1715
        return -1;
1716
    enc = ic->streams[stream_index]->codec;
1717

    
1718
    /* prepare audio output */
1719
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1720
        if (enc->channels > 0) {
1721
            enc->request_channels = FFMIN(2, enc->channels);
1722
        } else {
1723
            enc->request_channels = 2;
1724
        }
1725
    }
1726

    
1727
    codec = avcodec_find_decoder(enc->codec_id);
1728
    enc->debug_mv = debug_mv;
1729
    enc->debug = debug;
1730
    enc->workaround_bugs = workaround_bugs;
1731
    enc->lowres = lowres;
1732
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1733
    enc->idct_algo= idct;
1734
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1735
    enc->skip_frame= skip_frame;
1736
    enc->skip_idct= skip_idct;
1737
    enc->skip_loop_filter= skip_loop_filter;
1738
    enc->error_recognition= error_recognition;
1739
    enc->error_concealment= error_concealment;
1740

    
1741
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1742

    
1743
    if (!codec ||
1744
        avcodec_open(enc, codec) < 0)
1745
        return -1;
1746

    
1747
    /* prepare audio output */
1748
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1749
        wanted_spec.freq = enc->sample_rate;
1750
        wanted_spec.format = AUDIO_S16SYS;
1751
        wanted_spec.channels = enc->channels;
1752
        wanted_spec.silence = 0;
1753
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1754
        wanted_spec.callback = sdl_audio_callback;
1755
        wanted_spec.userdata = is;
1756
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1757
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1758
            return -1;
1759
        }
1760
        is->audio_hw_buf_size = spec.size;
1761
        is->audio_src_fmt= SAMPLE_FMT_S16;
1762
    }
1763

    
1764
    if(thread_count>1)
1765
        avcodec_thread_init(enc, thread_count);
1766
    enc->thread_count= thread_count;
1767
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1768
    switch(enc->codec_type) {
1769
    case CODEC_TYPE_AUDIO:
1770
        is->audio_stream = stream_index;
1771
        is->audio_st = ic->streams[stream_index];
1772
        is->audio_buf_size = 0;
1773
        is->audio_buf_index = 0;
1774

    
1775
        /* init averaging filter */
1776
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1777
        is->audio_diff_avg_count = 0;
1778
        /* since we do not have a precise anough audio fifo fullness,
1779
           we correct audio sync only if larger than this threshold */
1780
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1781

    
1782
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1783
        packet_queue_init(&is->audioq);
1784
        SDL_PauseAudio(0);
1785
        break;
1786
    case CODEC_TYPE_VIDEO:
1787
        is->video_stream = stream_index;
1788
        is->video_st = ic->streams[stream_index];
1789

    
1790
        is->frame_last_delay = 40e-3;
1791
        is->frame_timer = (double)av_gettime() / 1000000.0;
1792
        is->video_current_pts_time = av_gettime();
1793

    
1794
        packet_queue_init(&is->videoq);
1795
        is->video_tid = SDL_CreateThread(video_thread, is);
1796
        break;
1797
    case CODEC_TYPE_SUBTITLE:
1798
        is->subtitle_stream = stream_index;
1799
        is->subtitle_st = ic->streams[stream_index];
1800
        packet_queue_init(&is->subtitleq);
1801

    
1802
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1803
        break;
1804
    default:
1805
        break;
1806
    }
1807
    return 0;
1808
}
1809

    
1810
static void stream_component_close(VideoState *is, int stream_index)
1811
{
1812
    AVFormatContext *ic = is->ic;
1813
    AVCodecContext *enc;
1814

    
1815
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1816
        return;
1817
    enc = ic->streams[stream_index]->codec;
1818

    
1819
    switch(enc->codec_type) {
1820
    case CODEC_TYPE_AUDIO:
1821
        packet_queue_abort(&is->audioq);
1822

    
1823
        SDL_CloseAudio();
1824

    
1825
        packet_queue_end(&is->audioq);
1826
        if (is->reformat_ctx)
1827
            av_audio_convert_free(is->reformat_ctx);
1828
        break;
1829
    case CODEC_TYPE_VIDEO:
1830
        packet_queue_abort(&is->videoq);
1831

    
1832
        /* note: we also signal this mutex to make sure we deblock the
1833
           video thread in all cases */
1834
        SDL_LockMutex(is->pictq_mutex);
1835
        SDL_CondSignal(is->pictq_cond);
1836
        SDL_UnlockMutex(is->pictq_mutex);
1837

    
1838
        SDL_WaitThread(is->video_tid, NULL);
1839

    
1840
        packet_queue_end(&is->videoq);
1841
        break;
1842
    case CODEC_TYPE_SUBTITLE:
1843
        packet_queue_abort(&is->subtitleq);
1844

    
1845
        /* note: we also signal this mutex to make sure we deblock the
1846
           video thread in all cases */
1847
        SDL_LockMutex(is->subpq_mutex);
1848
        is->subtitle_stream_changed = 1;
1849

    
1850
        SDL_CondSignal(is->subpq_cond);
1851
        SDL_UnlockMutex(is->subpq_mutex);
1852

    
1853
        SDL_WaitThread(is->subtitle_tid, NULL);
1854

    
1855
        packet_queue_end(&is->subtitleq);
1856
        break;
1857
    default:
1858
        break;
1859
    }
1860

    
1861
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1862
    avcodec_close(enc);
1863
    switch(enc->codec_type) {
1864
    case CODEC_TYPE_AUDIO:
1865
        is->audio_st = NULL;
1866
        is->audio_stream = -1;
1867
        break;
1868
    case CODEC_TYPE_VIDEO:
1869
        is->video_st = NULL;
1870
        is->video_stream = -1;
1871
        break;
1872
    case CODEC_TYPE_SUBTITLE:
1873
        is->subtitle_st = NULL;
1874
        is->subtitle_stream = -1;
1875
        break;
1876
    default:
1877
        break;
1878
    }
1879
}
1880

    
1881
static void dump_stream_info(const AVFormatContext *s)
1882
{
1883
    AVMetadataTag *tag = NULL;
1884
    while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1885
        fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1886
}
1887

    
1888
/* since we have only one decoding thread, we can use a global
1889
   variable instead of a thread local variable */
1890
static VideoState *global_video_state;
1891

    
1892
static int decode_interrupt_cb(void)
1893
{
1894
    return (global_video_state && global_video_state->abort_request);
1895
}
1896

    
1897
/* this thread gets the stream from the disk or the network */
1898
static int decode_thread(void *arg)
1899
{
1900
    VideoState *is = arg;
1901
    AVFormatContext *ic;
1902
    int err, i, ret, video_index, audio_index, subtitle_index;
1903
    AVPacket pkt1, *pkt = &pkt1;
1904
    AVFormatParameters params, *ap = &params;
1905
    int eof=0;
1906

    
1907
    video_index = -1;
1908
    audio_index = -1;
1909
    subtitle_index = -1;
1910
    is->video_stream = -1;
1911
    is->audio_stream = -1;
1912
    is->subtitle_stream = -1;
1913

    
1914
    global_video_state = is;
1915
    url_set_interrupt_cb(decode_interrupt_cb);
1916

    
1917
    memset(ap, 0, sizeof(*ap));
1918

    
1919
    ap->width = frame_width;
1920
    ap->height= frame_height;
1921
    ap->time_base= (AVRational){1, 25};
1922
    ap->pix_fmt = frame_pix_fmt;
1923

    
1924
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1925
    if (err < 0) {
1926
        print_error(is->filename, err);
1927
        ret = -1;
1928
        goto fail;
1929
    }
1930
    is->ic = ic;
1931

    
1932
    if(genpts)
1933
        ic->flags |= AVFMT_FLAG_GENPTS;
1934

    
1935
    err = av_find_stream_info(ic);
1936
    if (err < 0) {
1937
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1938
        ret = -1;
1939
        goto fail;
1940
    }
1941
    if(ic->pb)
1942
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1943

    
1944
    /* if seeking requested, we execute it */
1945
    if (start_time != AV_NOPTS_VALUE) {
1946
        int64_t timestamp;
1947

    
1948
        timestamp = start_time;
1949
        /* add the stream start time */
1950
        if (ic->start_time != AV_NOPTS_VALUE)
1951
            timestamp += ic->start_time;
1952
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1953
        if (ret < 0) {
1954
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1955
                    is->filename, (double)timestamp / AV_TIME_BASE);
1956
        }
1957
    }
1958

    
1959
    for(i = 0; i < ic->nb_streams; i++) {
1960
        AVCodecContext *enc = ic->streams[i]->codec;
1961
        ic->streams[i]->discard = AVDISCARD_ALL;
1962
        switch(enc->codec_type) {
1963
        case CODEC_TYPE_AUDIO:
1964
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1965
                audio_index = i;
1966
            break;
1967
        case CODEC_TYPE_VIDEO:
1968
            if (wanted_video_stream-- >= 0 && !video_disable)
1969
                video_index = i;
1970
            break;
1971
        case CODEC_TYPE_SUBTITLE:
1972
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1973
                subtitle_index = i;
1974
            break;
1975
        default:
1976
            break;
1977
        }
1978
    }
1979
    if (show_status) {
1980
        dump_format(ic, 0, is->filename, 0);
1981
        dump_stream_info(ic);
1982
    }
1983

    
1984
    /* open the streams */
1985
    if (audio_index >= 0) {
1986
        stream_component_open(is, audio_index);
1987
    }
1988

    
1989
    if (video_index >= 0) {
1990
        stream_component_open(is, video_index);
1991
    } else {
1992
        if (!display_disable)
1993
            is->show_audio = 1;
1994
    }
1995

    
1996
    if (subtitle_index >= 0) {
1997
        stream_component_open(is, subtitle_index);
1998
    }
1999

    
2000
    if (is->video_stream < 0 && is->audio_stream < 0) {
2001
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2002
        ret = -1;
2003
        goto fail;
2004
    }
2005

    
2006
    for(;;) {
2007
        if (is->abort_request)
2008
            break;
2009
        if (is->paused != is->last_paused) {
2010
            is->last_paused = is->paused;
2011
            if (is->paused)
2012
                av_read_pause(ic);
2013
            else
2014
                av_read_play(ic);
2015
        }
2016
#if CONFIG_RTSP_DEMUXER
2017
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2018
            /* wait 10 ms to avoid trying to get another packet */
2019
            /* XXX: horrible */
2020
            SDL_Delay(10);
2021
            continue;
2022
        }
2023
#endif
2024
        if (is->seek_req) {
2025
            int64_t seek_target= is->seek_pos;
2026
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2027
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2028
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2029
//      of the seek_pos/seek_rel variables
2030

    
2031
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2032
            if (ret < 0) {
2033
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2034
            }else{
2035
                if (is->audio_stream >= 0) {
2036
                    packet_queue_flush(&is->audioq);
2037
                    packet_queue_put(&is->audioq, &flush_pkt);
2038
                }
2039
                if (is->subtitle_stream >= 0) {
2040
                    packet_queue_flush(&is->subtitleq);
2041
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2042
                }
2043
                if (is->video_stream >= 0) {
2044
                    packet_queue_flush(&is->videoq);
2045
                    packet_queue_put(&is->videoq, &flush_pkt);
2046
                }
2047
            }
2048
            is->seek_req = 0;
2049
            eof= 0;
2050
        }
2051

    
2052
        /* if the queue are full, no need to read more */
2053
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2054
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2055
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2056
            /* wait 10 ms */
2057
            SDL_Delay(10);
2058
            continue;
2059
        }
2060
        if(url_feof(ic->pb) || eof) {
2061
            if(is->video_stream >= 0){
2062
                av_init_packet(pkt);
2063
                pkt->data=NULL;
2064
                pkt->size=0;
2065
                pkt->stream_index= is->video_stream;
2066
                packet_queue_put(&is->videoq, pkt);
2067
            }
2068
            SDL_Delay(10);
2069
            continue;
2070
        }
2071
        ret = av_read_frame(ic, pkt);
2072
        if (ret < 0) {
2073
            if (ret == AVERROR_EOF)
2074
                eof=1;
2075
            if (url_ferror(ic->pb))
2076
                break;
2077
            SDL_Delay(100); /* wait for user event */
2078
            continue;
2079
        }
2080
        if (pkt->stream_index == is->audio_stream) {
2081
            packet_queue_put(&is->audioq, pkt);
2082
        } else if (pkt->stream_index == is->video_stream) {
2083
            packet_queue_put(&is->videoq, pkt);
2084
        } else if (pkt->stream_index == is->subtitle_stream) {
2085
            packet_queue_put(&is->subtitleq, pkt);
2086
        } else {
2087
            av_free_packet(pkt);
2088
        }
2089
    }
2090
    /* wait until the end */
2091
    while (!is->abort_request) {
2092
        SDL_Delay(100);
2093
    }
2094

    
2095
    ret = 0;
2096
 fail:
2097
    /* disable interrupting */
2098
    global_video_state = NULL;
2099

    
2100
    /* close each stream */
2101
    if (is->audio_stream >= 0)
2102
        stream_component_close(is, is->audio_stream);
2103
    if (is->video_stream >= 0)
2104
        stream_component_close(is, is->video_stream);
2105
    if (is->subtitle_stream >= 0)
2106
        stream_component_close(is, is->subtitle_stream);
2107
    if (is->ic) {
2108
        av_close_input_file(is->ic);
2109
        is->ic = NULL; /* safety */
2110
    }
2111
    url_set_interrupt_cb(NULL);
2112

    
2113
    if (ret != 0) {
2114
        SDL_Event event;
2115

    
2116
        event.type = FF_QUIT_EVENT;
2117
        event.user.data1 = is;
2118
        SDL_PushEvent(&event);
2119
    }
2120
    return 0;
2121
}
2122

    
2123
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2124
{
2125
    VideoState *is;
2126

    
2127
    is = av_mallocz(sizeof(VideoState));
2128
    if (!is)
2129
        return NULL;
2130
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2131
    is->iformat = iformat;
2132
    is->ytop = 0;
2133
    is->xleft = 0;
2134

    
2135
    /* start video display */
2136
    is->pictq_mutex = SDL_CreateMutex();
2137
    is->pictq_cond = SDL_CreateCond();
2138

    
2139
    is->subpq_mutex = SDL_CreateMutex();
2140
    is->subpq_cond = SDL_CreateCond();
2141

    
2142
    /* add the refresh timer to draw the picture */
2143
    schedule_refresh(is, 40);
2144

    
2145
    is->av_sync_type = av_sync_type;
2146
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2147
    if (!is->parse_tid) {
2148
        av_free(is);
2149
        return NULL;
2150
    }
2151
    return is;
2152
}
2153

    
2154
static void stream_close(VideoState *is)
2155
{
2156
    VideoPicture *vp;
2157
    int i;
2158
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2159
    is->abort_request = 1;
2160
    SDL_WaitThread(is->parse_tid, NULL);
2161

    
2162
    /* free all pictures */
2163
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2164
        vp = &is->pictq[i];
2165
        if (vp->bmp) {
2166
            SDL_FreeYUVOverlay(vp->bmp);
2167
            vp->bmp = NULL;
2168
        }
2169
    }
2170
    SDL_DestroyMutex(is->pictq_mutex);
2171
    SDL_DestroyCond(is->pictq_cond);
2172
    SDL_DestroyMutex(is->subpq_mutex);
2173
    SDL_DestroyCond(is->subpq_cond);
2174
    if (is->img_convert_ctx)
2175
        sws_freeContext(is->img_convert_ctx);
2176
    av_free(is);
2177
}
2178

    
2179
static void stream_cycle_channel(VideoState *is, int codec_type)
2180
{
2181
    AVFormatContext *ic = is->ic;
2182
    int start_index, stream_index;
2183
    AVStream *st;
2184

    
2185
    if (codec_type == CODEC_TYPE_VIDEO)
2186
        start_index = is->video_stream;
2187
    else if (codec_type == CODEC_TYPE_AUDIO)
2188
        start_index = is->audio_stream;
2189
    else
2190
        start_index = is->subtitle_stream;
2191
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2192
        return;
2193
    stream_index = start_index;
2194
    for(;;) {
2195
        if (++stream_index >= is->ic->nb_streams)
2196
        {
2197
            if (codec_type == CODEC_TYPE_SUBTITLE)
2198
            {
2199
                stream_index = -1;
2200
                goto the_end;
2201
            } else
2202
                stream_index = 0;
2203
        }
2204
        if (stream_index == start_index)
2205
            return;
2206
        st = ic->streams[stream_index];
2207
        if (st->codec->codec_type == codec_type) {
2208
            /* check that parameters are OK */
2209
            switch(codec_type) {
2210
            case CODEC_TYPE_AUDIO:
2211
                if (st->codec->sample_rate != 0 &&
2212
                    st->codec->channels != 0)
2213
                    goto the_end;
2214
                break;
2215
            case CODEC_TYPE_VIDEO:
2216
            case CODEC_TYPE_SUBTITLE:
2217
                goto the_end;
2218
            default:
2219
                break;
2220
            }
2221
        }
2222
    }
2223
 the_end:
2224
    stream_component_close(is, start_index);
2225
    stream_component_open(is, stream_index);
2226
}
2227

    
2228

    
2229
static void toggle_full_screen(void)
2230
{
2231
    is_full_screen = !is_full_screen;
2232
    if (!fs_screen_width) {
2233
        /* use default SDL method */
2234
//        SDL_WM_ToggleFullScreen(screen);
2235
    }
2236
    video_open(cur_stream);
2237
}
2238

    
2239
static void toggle_pause(void)
2240
{
2241
    if (cur_stream)
2242
        stream_pause(cur_stream);
2243
    step = 0;
2244
}
2245

    
2246
static void step_to_next_frame(void)
2247
{
2248
    if (cur_stream) {
2249
        /* if the stream is paused unpause it, then step */
2250
        if (cur_stream->paused)
2251
            stream_pause(cur_stream);
2252
    }
2253
    step = 1;
2254
}
2255

    
2256
static void do_exit(void)
2257
{
2258
    int i;
2259
    if (cur_stream) {
2260
        stream_close(cur_stream);
2261
        cur_stream = NULL;
2262
    }
2263
    for (i = 0; i < CODEC_TYPE_NB; i++)
2264
        av_free(avcodec_opts[i]);
2265
    av_free(avformat_opts);
2266
    av_free(sws_opts);
2267
    if (show_status)
2268
        printf("\n");
2269
    SDL_Quit();
2270
    exit(0);
2271
}
2272

    
2273
static void toggle_audio_display(void)
2274
{
2275
    if (cur_stream) {
2276
        cur_stream->show_audio = !cur_stream->show_audio;
2277
    }
2278
}
2279

    
2280
/* handle an event sent by the GUI */
2281
static void event_loop(void)
2282
{
2283
    SDL_Event event;
2284
    double incr, pos, frac;
2285

    
2286
    for(;;) {
2287
        SDL_WaitEvent(&event);
2288
        switch(event.type) {
2289
        case SDL_KEYDOWN:
2290
            switch(event.key.keysym.sym) {
2291
            case SDLK_ESCAPE:
2292
            case SDLK_q:
2293
                do_exit();
2294
                break;
2295
            case SDLK_f:
2296
                toggle_full_screen();
2297
                break;
2298
            case SDLK_p:
2299
            case SDLK_SPACE:
2300
                toggle_pause();
2301
                break;
2302
            case SDLK_s: //S: Step to next frame
2303
                step_to_next_frame();
2304
                break;
2305
            case SDLK_a:
2306
                if (cur_stream)
2307
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2308
                break;
2309
            case SDLK_v:
2310
                if (cur_stream)
2311
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2312
                break;
2313
            case SDLK_t:
2314
                if (cur_stream)
2315
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2316
                break;
2317
            case SDLK_w:
2318
                toggle_audio_display();
2319
                break;
2320
            case SDLK_LEFT:
2321
                incr = -10.0;
2322
                goto do_seek;
2323
            case SDLK_RIGHT:
2324
                incr = 10.0;
2325
                goto do_seek;
2326
            case SDLK_UP:
2327
                incr = 60.0;
2328
                goto do_seek;
2329
            case SDLK_DOWN:
2330
                incr = -60.0;
2331
            do_seek:
2332
                if (cur_stream) {
2333
                    if (seek_by_bytes) {
2334
                        pos = url_ftell(cur_stream->ic->pb);
2335
                        if (cur_stream->ic->bit_rate)
2336
                            incr *= cur_stream->ic->bit_rate / 60.0;
2337
                        else
2338
                            incr *= 180000.0;
2339
                        pos += incr;
2340
                        stream_seek(cur_stream, pos, incr);
2341
                    } else {
2342
                        pos = get_master_clock(cur_stream);
2343
                        pos += incr;
2344
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2345
                    }
2346
                }
2347
                break;
2348
            default:
2349
                break;
2350
            }
2351
            break;
2352
        case SDL_MOUSEBUTTONDOWN:
2353
            if (cur_stream) {
2354
                int64_t ts;
2355
                int ns, hh, mm, ss;
2356
                int tns, thh, tmm, tss;
2357
                tns = cur_stream->ic->duration/1000000LL;
2358
                thh = tns/3600;
2359
                tmm = (tns%3600)/60;
2360
                tss = (tns%60);
2361
                frac = (double)event.button.x/(double)cur_stream->width;
2362
                ns = frac*tns;
2363
                hh = ns/3600;
2364
                mm = (ns%3600)/60;
2365
                ss = (ns%60);
2366
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2367
                        hh, mm, ss, thh, tmm, tss);
2368
                ts = frac*cur_stream->ic->duration;
2369
                if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2370
                    ts += cur_stream->ic->start_time;
2371
                stream_seek(cur_stream, ts, 0);
2372
            }
2373
            break;
2374
        case SDL_VIDEORESIZE:
2375
            if (cur_stream) {
2376
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2377
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2378
                screen_width = cur_stream->width = event.resize.w;
2379
                screen_height= cur_stream->height= event.resize.h;
2380
            }
2381
            break;
2382
        case SDL_QUIT:
2383
        case FF_QUIT_EVENT:
2384
            do_exit();
2385
            break;
2386
        case FF_ALLOC_EVENT:
2387
            video_open(event.user.data1);
2388
            alloc_picture(event.user.data1);
2389
            break;
2390
        case FF_REFRESH_EVENT:
2391
            video_refresh_timer(event.user.data1);
2392
            break;
2393
        default:
2394
            break;
2395
        }
2396
    }
2397
}
2398

    
2399
static void opt_frame_size(const char *arg)
2400
{
2401
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2402
        fprintf(stderr, "Incorrect frame size\n");
2403
        exit(1);
2404
    }
2405
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2406
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2407
        exit(1);
2408
    }
2409
}
2410

    
2411
static int opt_width(const char *opt, const char *arg)
2412
{
2413
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2414
    return 0;
2415
}
2416

    
2417
static int opt_height(const char *opt, const char *arg)
2418
{
2419
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2420
    return 0;
2421
}
2422

    
2423
static void opt_format(const char *arg)
2424
{
2425
    file_iformat = av_find_input_format(arg);
2426
    if (!file_iformat) {
2427
        fprintf(stderr, "Unknown input format: %s\n", arg);
2428
        exit(1);
2429
    }
2430
}
2431

    
2432
static void opt_frame_pix_fmt(const char *arg)
2433
{
2434
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2435
}
2436

    
2437
static int opt_sync(const char *opt, const char *arg)
2438
{
2439
    if (!strcmp(arg, "audio"))
2440
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2441
    else if (!strcmp(arg, "video"))
2442
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2443
    else if (!strcmp(arg, "ext"))
2444
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2445
    else {
2446
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2447
        exit(1);
2448
    }
2449
    return 0;
2450
}
2451

    
2452
static int opt_seek(const char *opt, const char *arg)
2453
{
2454
    start_time = parse_time_or_die(opt, arg, 1);
2455
    return 0;
2456
}
2457

    
2458
static int opt_debug(const char *opt, const char *arg)
2459
{
2460
    av_log_set_level(99);
2461
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2462
    return 0;
2463
}
2464

    
2465
static int opt_vismv(const char *opt, const char *arg)
2466
{
2467
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2468
    return 0;
2469
}
2470

    
2471
static int opt_thread_count(const char *opt, const char *arg)
2472
{
2473
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2474
#if !HAVE_THREADS
2475
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2476
#endif
2477
    return 0;
2478
}
2479

    
2480
static const OptionDef options[] = {
2481
    { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2482
    { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2483
    { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2484
    { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2485
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2486
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2487
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2488
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2489
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2490
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2491
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2492
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2493
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2494
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2495
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2496
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2497
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2498
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2499
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2500
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2501
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2502
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2503
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2504
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2505
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2506
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2507
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2508
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2509
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2510
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2511
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2512
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2513
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2514
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2515
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2516
    { NULL, },
2517
};
2518

    
2519
static void show_help(void)
2520
{
2521
    printf("usage: ffplay [options] input_file\n"
2522
           "Simple media player\n");
2523
    printf("\n");
2524
    show_help_options(options, "Main options:\n",
2525
                      OPT_EXPERT, 0);
2526
    show_help_options(options, "\nAdvanced options:\n",
2527
                      OPT_EXPERT, OPT_EXPERT);
2528
    printf("\nWhile playing:\n"
2529
           "q, ESC              quit\n"
2530
           "f                   toggle full screen\n"
2531
           "p, SPC              pause\n"
2532
           "a                   cycle audio channel\n"
2533
           "v                   cycle video channel\n"
2534
           "t                   cycle subtitle channel\n"
2535
           "w                   show audio waves\n"
2536
           "left/right          seek backward/forward 10 seconds\n"
2537
           "down/up             seek backward/forward 1 minute\n"
2538
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2539
           );
2540
}
2541

    
2542
static void opt_input_file(const char *filename)
2543
{
2544
    if (!strcmp(filename, "-"))
2545
        filename = "pipe:";
2546
    input_filename = filename;
2547
}
2548

    
2549
/* Called from the main */
2550
int main(int argc, char **argv)
2551
{
2552
    int flags, i;
2553

    
2554
    /* register all codecs, demux and protocols */
2555
    avcodec_register_all();
2556
    avdevice_register_all();
2557
    av_register_all();
2558

    
2559
    for(i=0; i<CODEC_TYPE_NB; i++){
2560
        avcodec_opts[i]= avcodec_alloc_context2(i);
2561
    }
2562
    avformat_opts = avformat_alloc_context();
2563
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2564

    
2565
    show_banner();
2566

    
2567
    parse_options(argc, argv, options, opt_input_file);
2568

    
2569
    if (!input_filename) {
2570
        fprintf(stderr, "An input file must be specified\n");
2571
        exit(1);
2572
    }
2573

    
2574
    if (display_disable) {
2575
        video_disable = 1;
2576
    }
2577
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2578
#if !defined(__MINGW32__) && !defined(__APPLE__)
2579
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2580
#endif
2581
    if (SDL_Init (flags)) {
2582
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2583
        exit(1);
2584
    }
2585

    
2586
    if (!display_disable) {
2587
#if HAVE_SDL_VIDEO_SIZE
2588
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2589
        fs_screen_width = vi->current_w;
2590
        fs_screen_height = vi->current_h;
2591
#endif
2592
    }
2593

    
2594
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2595
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2596
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2597
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2598

    
2599
    av_init_packet(&flush_pkt);
2600
    flush_pkt.data= "FLUSH";
2601

    
2602
    cur_stream = stream_open(input_filename, file_iformat);
2603

    
2604
    event_loop();
2605

    
2606
    /* never returns */
2607

    
2608
    return 0;
2609
}