Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ e4637d6a

History | View | Annotate | Download (78.1 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavformat/avformat.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30
#include "libavcodec/colorspace.h"
31
#include "libavcodec/opt.h"
32

    
33
#include "cmdutils.h"
34

    
35
#include <SDL.h>
36
#include <SDL_thread.h>
37

    
38
#ifdef __MINGW32__
39
#undef main /* We don't want SDL to override our main() */
40
#endif
41

    
42
#undef exit
43

    
44
const char program_name[] = "FFplay";
45
const int program_birth_year = 2003;
46

    
47
//#define DEBUG_SYNC
48

    
49
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
50
#define MAX_AUDIOQ_SIZE (20 * 16 * 1024)
51
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
52

    
53
/* SDL audio buffer size, in samples. Should be small to have precise
54
   A/V sync as SDL does not have hardware buffer fullness info. */
55
#define SDL_AUDIO_BUFFER_SIZE 1024
56

    
57
/* no AV sync correction is done if below the AV sync threshold */
58
#define AV_SYNC_THRESHOLD 0.01
59
/* no AV correction is done if too big error */
60
#define AV_NOSYNC_THRESHOLD 10.0
61

    
62
/* maximum audio speed change to get correct sync */
63
#define SAMPLE_CORRECTION_PERCENT_MAX 10
64

    
65
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
66
#define AUDIO_DIFF_AVG_NB   20
67

    
68
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
69
#define SAMPLE_ARRAY_SIZE (2*65536)
70

    
71
static int sws_flags = SWS_BICUBIC;
72

    
73
typedef struct PacketQueue {
74
    AVPacketList *first_pkt, *last_pkt;
75
    int nb_packets;
76
    int size;
77
    int abort_request;
78
    SDL_mutex *mutex;
79
    SDL_cond *cond;
80
} PacketQueue;
81

    
82
#define VIDEO_PICTURE_QUEUE_SIZE 1
83
#define SUBPICTURE_QUEUE_SIZE 4
84

    
85
typedef struct VideoPicture {
86
    double pts;                                  ///<presentation time stamp for this picture
87
    SDL_Overlay *bmp;
88
    int width, height; /* source height & width */
89
    int allocated;
90
} VideoPicture;
91

    
92
typedef struct SubPicture {
93
    double pts; /* presentation time stamp for this picture */
94
    AVSubtitle sub;
95
} SubPicture;
96

    
97
enum {
98
    AV_SYNC_AUDIO_MASTER, /* default choice */
99
    AV_SYNC_VIDEO_MASTER,
100
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101
};
102

    
103
typedef struct VideoState {
104
    SDL_Thread *parse_tid;
105
    SDL_Thread *video_tid;
106
    AVInputFormat *iformat;
107
    int no_background;
108
    int abort_request;
109
    int paused;
110
    int last_paused;
111
    int seek_req;
112
    int seek_flags;
113
    int64_t seek_pos;
114
    int64_t seek_rel;
115
    AVFormatContext *ic;
116
    int dtg_active_format;
117

    
118
    int audio_stream;
119

    
120
    int av_sync_type;
121
    double external_clock; /* external clock base */
122
    int64_t external_clock_time;
123

    
124
    double audio_clock;
125
    double audio_diff_cum; /* used for AV difference average computation */
126
    double audio_diff_avg_coef;
127
    double audio_diff_threshold;
128
    int audio_diff_avg_count;
129
    AVStream *audio_st;
130
    PacketQueue audioq;
131
    int audio_hw_buf_size;
132
    /* samples output by the codec. we reserve more space for avsync
133
       compensation */
134
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
136
    uint8_t *audio_buf;
137
    unsigned int audio_buf_size; /* in bytes */
138
    int audio_buf_index; /* in bytes */
139
    AVPacket audio_pkt_temp;
140
    AVPacket audio_pkt;
141
    enum SampleFormat audio_src_fmt;
142
    AVAudioConvert *reformat_ctx;
143

    
144
    int show_audio; /* if true, display audio samples */
145
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
146
    int sample_array_index;
147
    int last_i_start;
148

    
149
    SDL_Thread *subtitle_tid;
150
    int subtitle_stream;
151
    int subtitle_stream_changed;
152
    AVStream *subtitle_st;
153
    PacketQueue subtitleq;
154
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
155
    int subpq_size, subpq_rindex, subpq_windex;
156
    SDL_mutex *subpq_mutex;
157
    SDL_cond *subpq_cond;
158

    
159
    double frame_timer;
160
    double frame_last_pts;
161
    double frame_last_delay;
162
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
163
    int video_stream;
164
    AVStream *video_st;
165
    PacketQueue videoq;
166
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
167
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
168
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
169
    int pictq_size, pictq_rindex, pictq_windex;
170
    SDL_mutex *pictq_mutex;
171
    SDL_cond *pictq_cond;
172
    struct SwsContext *img_convert_ctx;
173

    
174
    //    QETimer *video_timer;
175
    char filename[1024];
176
    int width, height, xleft, ytop;
177
} VideoState;
178

    
179
static void show_help(void);
180
static int audio_write_get_buf_size(VideoState *is);
181

    
182
/* options specified by the user */
183
static AVInputFormat *file_iformat;
184
static const char *input_filename;
185
static int fs_screen_width;
186
static int fs_screen_height;
187
static int screen_width = 0;
188
static int screen_height = 0;
189
static int frame_width = 0;
190
static int frame_height = 0;
191
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
192
static int audio_disable;
193
static int video_disable;
194
static int wanted_audio_stream= 0;
195
static int wanted_video_stream= 0;
196
static int wanted_subtitle_stream= -1;
197
static int seek_by_bytes;
198
static int display_disable;
199
static int show_status = 1;
200
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
201
static int64_t start_time = AV_NOPTS_VALUE;
202
static int debug = 0;
203
static int debug_mv = 0;
204
static int step = 0;
205
static int thread_count = 1;
206
static int workaround_bugs = 1;
207
static int fast = 0;
208
static int genpts = 0;
209
static int lowres = 0;
210
static int idct = FF_IDCT_AUTO;
211
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
212
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
213
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
214
static int error_recognition = FF_ER_CAREFUL;
215
static int error_concealment = 3;
216
static int decoder_reorder_pts= 0;
217

    
218
/* current context */
219
static int is_full_screen;
220
static VideoState *cur_stream;
221
static int64_t audio_callback_time;
222

    
223
static AVPacket flush_pkt;
224

    
225
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
226
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
227
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
228

    
229
static SDL_Surface *screen;
230

    
231
/* packet queue handling */
232
static void packet_queue_init(PacketQueue *q)
233
{
234
    memset(q, 0, sizeof(PacketQueue));
235
    q->mutex = SDL_CreateMutex();
236
    q->cond = SDL_CreateCond();
237
}
238

    
239
static void packet_queue_flush(PacketQueue *q)
240
{
241
    AVPacketList *pkt, *pkt1;
242

    
243
    SDL_LockMutex(q->mutex);
244
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
245
        pkt1 = pkt->next;
246
        av_free_packet(&pkt->pkt);
247
        av_freep(&pkt);
248
    }
249
    q->last_pkt = NULL;
250
    q->first_pkt = NULL;
251
    q->nb_packets = 0;
252
    q->size = 0;
253
    SDL_UnlockMutex(q->mutex);
254
}
255

    
256
static void packet_queue_end(PacketQueue *q)
257
{
258
    packet_queue_flush(q);
259
    SDL_DestroyMutex(q->mutex);
260
    SDL_DestroyCond(q->cond);
261
}
262

    
263
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
264
{
265
    AVPacketList *pkt1;
266

    
267
    /* duplicate the packet */
268
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
269
        return -1;
270

    
271
    pkt1 = av_malloc(sizeof(AVPacketList));
272
    if (!pkt1)
273
        return -1;
274
    pkt1->pkt = *pkt;
275
    pkt1->next = NULL;
276

    
277

    
278
    SDL_LockMutex(q->mutex);
279

    
280
    if (!q->last_pkt)
281

    
282
        q->first_pkt = pkt1;
283
    else
284
        q->last_pkt->next = pkt1;
285
    q->last_pkt = pkt1;
286
    q->nb_packets++;
287
    q->size += pkt1->pkt.size + sizeof(*pkt1);
288
    /* XXX: should duplicate packet data in DV case */
289
    SDL_CondSignal(q->cond);
290

    
291
    SDL_UnlockMutex(q->mutex);
292
    return 0;
293
}
294

    
295
static void packet_queue_abort(PacketQueue *q)
296
{
297
    SDL_LockMutex(q->mutex);
298

    
299
    q->abort_request = 1;
300

    
301
    SDL_CondSignal(q->cond);
302

    
303
    SDL_UnlockMutex(q->mutex);
304
}
305

    
306
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
307
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
308
{
309
    AVPacketList *pkt1;
310
    int ret;
311

    
312
    SDL_LockMutex(q->mutex);
313

    
314
    for(;;) {
315
        if (q->abort_request) {
316
            ret = -1;
317
            break;
318
        }
319

    
320
        pkt1 = q->first_pkt;
321
        if (pkt1) {
322
            q->first_pkt = pkt1->next;
323
            if (!q->first_pkt)
324
                q->last_pkt = NULL;
325
            q->nb_packets--;
326
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
327
            *pkt = pkt1->pkt;
328
            av_free(pkt1);
329
            ret = 1;
330
            break;
331
        } else if (!block) {
332
            ret = 0;
333
            break;
334
        } else {
335
            SDL_CondWait(q->cond, q->mutex);
336
        }
337
    }
338
    SDL_UnlockMutex(q->mutex);
339
    return ret;
340
}
341

    
342
static inline void fill_rectangle(SDL_Surface *screen,
343
                                  int x, int y, int w, int h, int color)
344
{
345
    SDL_Rect rect;
346
    rect.x = x;
347
    rect.y = y;
348
    rect.w = w;
349
    rect.h = h;
350
    SDL_FillRect(screen, &rect, color);
351
}
352

    
353
#if 0
354
/* draw only the border of a rectangle */
355
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
356
{
357
    int w1, w2, h1, h2;
358

359
    /* fill the background */
360
    w1 = x;
361
    if (w1 < 0)
362
        w1 = 0;
363
    w2 = s->width - (x + w);
364
    if (w2 < 0)
365
        w2 = 0;
366
    h1 = y;
367
    if (h1 < 0)
368
        h1 = 0;
369
    h2 = s->height - (y + h);
370
    if (h2 < 0)
371
        h2 = 0;
372
    fill_rectangle(screen,
373
                   s->xleft, s->ytop,
374
                   w1, s->height,
375
                   color);
376
    fill_rectangle(screen,
377
                   s->xleft + s->width - w2, s->ytop,
378
                   w2, s->height,
379
                   color);
380
    fill_rectangle(screen,
381
                   s->xleft + w1, s->ytop,
382
                   s->width - w1 - w2, h1,
383
                   color);
384
    fill_rectangle(screen,
385
                   s->xleft + w1, s->ytop + s->height - h2,
386
                   s->width - w1 - w2, h2,
387
                   color);
388
}
389
#endif
390

    
391
#define ALPHA_BLEND(a, oldp, newp, s)\
392
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
393

    
394
#define RGBA_IN(r, g, b, a, s)\
395
{\
396
    unsigned int v = ((const uint32_t *)(s))[0];\
397
    a = (v >> 24) & 0xff;\
398
    r = (v >> 16) & 0xff;\
399
    g = (v >> 8) & 0xff;\
400
    b = v & 0xff;\
401
}
402

    
403
#define YUVA_IN(y, u, v, a, s, pal)\
404
{\
405
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
406
    a = (val >> 24) & 0xff;\
407
    y = (val >> 16) & 0xff;\
408
    u = (val >> 8) & 0xff;\
409
    v = val & 0xff;\
410
}
411

    
412
#define YUVA_OUT(d, y, u, v, a)\
413
{\
414
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
415
}
416

    
417

    
418
#define BPP 1
419

    
420
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
421
{
422
    int wrap, wrap3, width2, skip2;
423
    int y, u, v, a, u1, v1, a1, w, h;
424
    uint8_t *lum, *cb, *cr;
425
    const uint8_t *p;
426
    const uint32_t *pal;
427
    int dstx, dsty, dstw, dsth;
428

    
429
    dstw = av_clip(rect->w, 0, imgw);
430
    dsth = av_clip(rect->h, 0, imgh);
431
    dstx = av_clip(rect->x, 0, imgw - dstw);
432
    dsty = av_clip(rect->y, 0, imgh - dsth);
433
    lum = dst->data[0] + dsty * dst->linesize[0];
434
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
435
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
436

    
437
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
438
    skip2 = dstx >> 1;
439
    wrap = dst->linesize[0];
440
    wrap3 = rect->pict.linesize[0];
441
    p = rect->pict.data[0];
442
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
443

    
444
    if (dsty & 1) {
445
        lum += dstx;
446
        cb += skip2;
447
        cr += skip2;
448

    
449
        if (dstx & 1) {
450
            YUVA_IN(y, u, v, a, p, pal);
451
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
452
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
453
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
454
            cb++;
455
            cr++;
456
            lum++;
457
            p += BPP;
458
        }
459
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
460
            YUVA_IN(y, u, v, a, p, pal);
461
            u1 = u;
462
            v1 = v;
463
            a1 = a;
464
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465

    
466
            YUVA_IN(y, u, v, a, p + BPP, pal);
467
            u1 += u;
468
            v1 += v;
469
            a1 += a;
470
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
471
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
472
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
473
            cb++;
474
            cr++;
475
            p += 2 * BPP;
476
            lum += 2;
477
        }
478
        if (w) {
479
            YUVA_IN(y, u, v, a, p, pal);
480
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
482
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
483
            p++;
484
            lum++;
485
        }
486
        p += wrap3 - dstw * BPP;
487
        lum += wrap - dstw - dstx;
488
        cb += dst->linesize[1] - width2 - skip2;
489
        cr += dst->linesize[2] - width2 - skip2;
490
    }
491
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
492
        lum += dstx;
493
        cb += skip2;
494
        cr += skip2;
495

    
496
        if (dstx & 1) {
497
            YUVA_IN(y, u, v, a, p, pal);
498
            u1 = u;
499
            v1 = v;
500
            a1 = a;
501
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502
            p += wrap3;
503
            lum += wrap;
504
            YUVA_IN(y, u, v, a, p, pal);
505
            u1 += u;
506
            v1 += v;
507
            a1 += a;
508
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
510
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
511
            cb++;
512
            cr++;
513
            p += -wrap3 + BPP;
514
            lum += -wrap + 1;
515
        }
516
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
517
            YUVA_IN(y, u, v, a, p, pal);
518
            u1 = u;
519
            v1 = v;
520
            a1 = a;
521
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522

    
523
            YUVA_IN(y, u, v, a, p + BPP, pal);
524
            u1 += u;
525
            v1 += v;
526
            a1 += a;
527
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
528
            p += wrap3;
529
            lum += wrap;
530

    
531
            YUVA_IN(y, u, v, a, p, pal);
532
            u1 += u;
533
            v1 += v;
534
            a1 += a;
535
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536

    
537
            YUVA_IN(y, u, v, a, p + BPP, pal);
538
            u1 += u;
539
            v1 += v;
540
            a1 += a;
541
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
542

    
543
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
544
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
545

    
546
            cb++;
547
            cr++;
548
            p += -wrap3 + 2 * BPP;
549
            lum += -wrap + 2;
550
        }
551
        if (w) {
552
            YUVA_IN(y, u, v, a, p, pal);
553
            u1 = u;
554
            v1 = v;
555
            a1 = a;
556
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557
            p += wrap3;
558
            lum += wrap;
559
            YUVA_IN(y, u, v, a, p, pal);
560
            u1 += u;
561
            v1 += v;
562
            a1 += a;
563
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566
            cb++;
567
            cr++;
568
            p += -wrap3 + BPP;
569
            lum += -wrap + 1;
570
        }
571
        p += wrap3 + (wrap3 - dstw * BPP);
572
        lum += wrap + (wrap - dstw - dstx);
573
        cb += dst->linesize[1] - width2 - skip2;
574
        cr += dst->linesize[2] - width2 - skip2;
575
    }
576
    /* handle odd height */
577
    if (h) {
578
        lum += dstx;
579
        cb += skip2;
580
        cr += skip2;
581

    
582
        if (dstx & 1) {
583
            YUVA_IN(y, u, v, a, p, pal);
584
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
586
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
587
            cb++;
588
            cr++;
589
            lum++;
590
            p += BPP;
591
        }
592
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
593
            YUVA_IN(y, u, v, a, p, pal);
594
            u1 = u;
595
            v1 = v;
596
            a1 = a;
597
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598

    
599
            YUVA_IN(y, u, v, a, p + BPP, pal);
600
            u1 += u;
601
            v1 += v;
602
            a1 += a;
603
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
604
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
605
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
606
            cb++;
607
            cr++;
608
            p += 2 * BPP;
609
            lum += 2;
610
        }
611
        if (w) {
612
            YUVA_IN(y, u, v, a, p, pal);
613
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
615
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
616
        }
617
    }
618
}
619

    
620
static void free_subpicture(SubPicture *sp)
621
{
622
    int i;
623

    
624
    for (i = 0; i < sp->sub.num_rects; i++)
625
    {
626
        av_freep(&sp->sub.rects[i]->pict.data[0]);
627
        av_freep(&sp->sub.rects[i]->pict.data[1]);
628
        av_freep(&sp->sub.rects[i]);
629
    }
630

    
631
    av_free(sp->sub.rects);
632

    
633
    memset(&sp->sub, 0, sizeof(AVSubtitle));
634
}
635

    
636
static void video_image_display(VideoState *is)
637
{
638
    VideoPicture *vp;
639
    SubPicture *sp;
640
    AVPicture pict;
641
    float aspect_ratio;
642
    int width, height, x, y;
643
    SDL_Rect rect;
644
    int i;
645

    
646
    vp = &is->pictq[is->pictq_rindex];
647
    if (vp->bmp) {
648
        /* XXX: use variable in the frame */
649
        if (is->video_st->sample_aspect_ratio.num)
650
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
651
        else if (is->video_st->codec->sample_aspect_ratio.num)
652
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
653
        else
654
            aspect_ratio = 0;
655
        if (aspect_ratio <= 0.0)
656
            aspect_ratio = 1.0;
657
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
658
        /* if an active format is indicated, then it overrides the
659
           mpeg format */
660
#if 0
661
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
662
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
663
            printf("dtg_active_format=%d\n", is->dtg_active_format);
664
        }
665
#endif
666
#if 0
667
        switch(is->video_st->codec->dtg_active_format) {
668
        case FF_DTG_AFD_SAME:
669
        default:
670
            /* nothing to do */
671
            break;
672
        case FF_DTG_AFD_4_3:
673
            aspect_ratio = 4.0 / 3.0;
674
            break;
675
        case FF_DTG_AFD_16_9:
676
            aspect_ratio = 16.0 / 9.0;
677
            break;
678
        case FF_DTG_AFD_14_9:
679
            aspect_ratio = 14.0 / 9.0;
680
            break;
681
        case FF_DTG_AFD_4_3_SP_14_9:
682
            aspect_ratio = 14.0 / 9.0;
683
            break;
684
        case FF_DTG_AFD_16_9_SP_14_9:
685
            aspect_ratio = 14.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_SP_4_3:
688
            aspect_ratio = 4.0 / 3.0;
689
            break;
690
        }
691
#endif
692

    
693
        if (is->subtitle_st)
694
        {
695
            if (is->subpq_size > 0)
696
            {
697
                sp = &is->subpq[is->subpq_rindex];
698

    
699
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
700
                {
701
                    SDL_LockYUVOverlay (vp->bmp);
702

    
703
                    pict.data[0] = vp->bmp->pixels[0];
704
                    pict.data[1] = vp->bmp->pixels[2];
705
                    pict.data[2] = vp->bmp->pixels[1];
706

    
707
                    pict.linesize[0] = vp->bmp->pitches[0];
708
                    pict.linesize[1] = vp->bmp->pitches[2];
709
                    pict.linesize[2] = vp->bmp->pitches[1];
710

    
711
                    for (i = 0; i < sp->sub.num_rects; i++)
712
                        blend_subrect(&pict, sp->sub.rects[i],
713
                                      vp->bmp->w, vp->bmp->h);
714

    
715
                    SDL_UnlockYUVOverlay (vp->bmp);
716
                }
717
            }
718
        }
719

    
720

    
721
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
722
        height = is->height;
723
        width = ((int)rint(height * aspect_ratio)) & ~1;
724
        if (width > is->width) {
725
            width = is->width;
726
            height = ((int)rint(width / aspect_ratio)) & ~1;
727
        }
728
        x = (is->width - width) / 2;
729
        y = (is->height - height) / 2;
730
        if (!is->no_background) {
731
            /* fill the background */
732
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
733
        } else {
734
            is->no_background = 0;
735
        }
736
        rect.x = is->xleft + x;
737
        rect.y = is->ytop  + y;
738
        rect.w = width;
739
        rect.h = height;
740
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
741
    } else {
742
#if 0
743
        fill_rectangle(screen,
744
                       is->xleft, is->ytop, is->width, is->height,
745
                       QERGB(0x00, 0x00, 0x00));
746
#endif
747
    }
748
}
749

    
750
static inline int compute_mod(int a, int b)
751
{
752
    a = a % b;
753
    if (a >= 0)
754
        return a;
755
    else
756
        return a + b;
757
}
758

    
759
static void video_audio_display(VideoState *s)
760
{
761
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
762
    int ch, channels, h, h2, bgcolor, fgcolor;
763
    int16_t time_diff;
764

    
765
    /* compute display index : center on currently output samples */
766
    channels = s->audio_st->codec->channels;
767
    nb_display_channels = channels;
768
    if (!s->paused) {
769
        n = 2 * channels;
770
        delay = audio_write_get_buf_size(s);
771
        delay /= n;
772

    
773
        /* to be more precise, we take into account the time spent since
774
           the last buffer computation */
775
        if (audio_callback_time) {
776
            time_diff = av_gettime() - audio_callback_time;
777
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
778
        }
779

    
780
        delay -= s->width / 2;
781
        if (delay < s->width)
782
            delay = s->width;
783

    
784
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
785

    
786
        h= INT_MIN;
787
        for(i=0; i<1000; i+=channels){
788
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
789
            int a= s->sample_array[idx];
790
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
791
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
792
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
793
            int score= a-d;
794
            if(h<score && (b^c)<0){
795
                h= score;
796
                i_start= idx;
797
            }
798
        }
799

    
800
        s->last_i_start = i_start;
801
    } else {
802
        i_start = s->last_i_start;
803
    }
804

    
805
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
806
    fill_rectangle(screen,
807
                   s->xleft, s->ytop, s->width, s->height,
808
                   bgcolor);
809

    
810
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
811

    
812
    /* total height for one channel */
813
    h = s->height / nb_display_channels;
814
    /* graph height / 2 */
815
    h2 = (h * 9) / 20;
816
    for(ch = 0;ch < nb_display_channels; ch++) {
817
        i = i_start + ch;
818
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
819
        for(x = 0; x < s->width; x++) {
820
            y = (s->sample_array[i] * h2) >> 15;
821
            if (y < 0) {
822
                y = -y;
823
                ys = y1 - y;
824
            } else {
825
                ys = y1;
826
            }
827
            fill_rectangle(screen,
828
                           s->xleft + x, ys, 1, y,
829
                           fgcolor);
830
            i += channels;
831
            if (i >= SAMPLE_ARRAY_SIZE)
832
                i -= SAMPLE_ARRAY_SIZE;
833
        }
834
    }
835

    
836
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
837

    
838
    for(ch = 1;ch < nb_display_channels; ch++) {
839
        y = s->ytop + ch * h;
840
        fill_rectangle(screen,
841
                       s->xleft, y, s->width, 1,
842
                       fgcolor);
843
    }
844
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
845
}
846

    
847
static int video_open(VideoState *is){
848
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
849
    int w,h;
850

    
851
    if(is_full_screen) flags |= SDL_FULLSCREEN;
852
    else               flags |= SDL_RESIZABLE;
853

    
854
    if (is_full_screen && fs_screen_width) {
855
        w = fs_screen_width;
856
        h = fs_screen_height;
857
    } else if(!is_full_screen && screen_width){
858
        w = screen_width;
859
        h = screen_height;
860
    }else if (is->video_st && is->video_st->codec->width){
861
        w = is->video_st->codec->width;
862
        h = is->video_st->codec->height;
863
    } else {
864
        w = 640;
865
        h = 480;
866
    }
867
#ifndef __APPLE__
868
    screen = SDL_SetVideoMode(w, h, 0, flags);
869
#else
870
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
871
    screen = SDL_SetVideoMode(w, h, 24, flags);
872
#endif
873
    if (!screen) {
874
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
875
        return -1;
876
    }
877
    SDL_WM_SetCaption("FFplay", "FFplay");
878

    
879
    is->width = screen->w;
880
    is->height = screen->h;
881

    
882
    return 0;
883
}
884

    
885
/* display the current picture, if any */
886
static void video_display(VideoState *is)
887
{
888
    if(!screen)
889
        video_open(cur_stream);
890
    if (is->audio_st && is->show_audio)
891
        video_audio_display(is);
892
    else if (is->video_st)
893
        video_image_display(is);
894
}
895

    
896
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
897
{
898
    SDL_Event event;
899
    event.type = FF_REFRESH_EVENT;
900
    event.user.data1 = opaque;
901
    SDL_PushEvent(&event);
902
    return 0; /* 0 means stop timer */
903
}
904

    
905
/* schedule a video refresh in 'delay' ms */
906
static void schedule_refresh(VideoState *is, int delay)
907
{
908
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
909
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
910
}
911

    
912
/* get the current audio clock value */
913
static double get_audio_clock(VideoState *is)
914
{
915
    double pts;
916
    int hw_buf_size, bytes_per_sec;
917
    pts = is->audio_clock;
918
    hw_buf_size = audio_write_get_buf_size(is);
919
    bytes_per_sec = 0;
920
    if (is->audio_st) {
921
        bytes_per_sec = is->audio_st->codec->sample_rate *
922
            2 * is->audio_st->codec->channels;
923
    }
924
    if (bytes_per_sec)
925
        pts -= (double)hw_buf_size / bytes_per_sec;
926
    return pts;
927
}
928

    
929
/* get the current video clock value */
930
static double get_video_clock(VideoState *is)
931
{
932
    double delta;
933
    if (is->paused) {
934
        delta = 0;
935
    } else {
936
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
937
    }
938
    return is->video_current_pts + delta;
939
}
940

    
941
/* get the current external clock value */
942
static double get_external_clock(VideoState *is)
943
{
944
    int64_t ti;
945
    ti = av_gettime();
946
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
947
}
948

    
949
/* get the current master clock value */
950
static double get_master_clock(VideoState *is)
951
{
952
    double val;
953

    
954
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
955
        if (is->video_st)
956
            val = get_video_clock(is);
957
        else
958
            val = get_audio_clock(is);
959
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
960
        if (is->audio_st)
961
            val = get_audio_clock(is);
962
        else
963
            val = get_video_clock(is);
964
    } else {
965
        val = get_external_clock(is);
966
    }
967
    return val;
968
}
969

    
970
/* seek in the stream */
971
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
972
{
973
    if (!is->seek_req) {
974
        is->seek_pos = pos;
975
        is->seek_rel = rel;
976
        if (seek_by_bytes)
977
            is->seek_flags |= AVSEEK_FLAG_BYTE;
978
        is->seek_req = 1;
979
    }
980
}
981

    
982
/* pause or resume the video */
983
static void stream_pause(VideoState *is)
984
{
985
    is->paused = !is->paused;
986
    if (!is->paused) {
987
        is->video_current_pts = get_video_clock(is);
988
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
989
    }
990
}
991

    
992
static double compute_frame_delay(double frame_current_pts, VideoState *is)
993
{
994
    double actual_delay, delay, sync_threshold, ref_clock, diff;
995

    
996
    /* compute nominal delay */
997
    delay = frame_current_pts - is->frame_last_pts;
998
    if (delay <= 0 || delay >= 10.0) {
999
        /* if incorrect delay, use previous one */
1000
        delay = is->frame_last_delay;
1001
    } else {
1002
        is->frame_last_delay = delay;
1003
    }
1004
    is->frame_last_pts = frame_current_pts;
1005

    
1006
    /* update delay to follow master synchronisation source */
1007
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1008
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1009
        /* if video is slave, we try to correct big delays by
1010
           duplicating or deleting a frame */
1011
        ref_clock = get_master_clock(is);
1012
        diff = frame_current_pts - ref_clock;
1013

    
1014
        /* skip or repeat frame. We take into account the
1015
           delay to compute the threshold. I still don't know
1016
           if it is the best guess */
1017
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1018
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1019
            if (diff <= -sync_threshold)
1020
                delay = 0;
1021
            else if (diff >= sync_threshold)
1022
                delay = 2 * delay;
1023
        }
1024
    }
1025

    
1026
    is->frame_timer += delay;
1027
    /* compute the REAL delay (we need to do that to avoid
1028
       long term errors */
1029
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1030
    if (actual_delay < 0.010) {
1031
        /* XXX: should skip picture */
1032
        actual_delay = 0.010;
1033
    }
1034

    
1035
#if defined(DEBUG_SYNC)
1036
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1037
            delay, actual_delay, frame_current_pts, -diff);
1038
#endif
1039

    
1040
    return actual_delay;
1041
}
1042

    
1043
/* called to display each frame */
1044
static void video_refresh_timer(void *opaque)
1045
{
1046
    VideoState *is = opaque;
1047
    VideoPicture *vp;
1048

    
1049
    SubPicture *sp, *sp2;
1050

    
1051
    if (is->video_st) {
1052
        if (is->pictq_size == 0) {
1053
            /* if no picture, need to wait */
1054
            schedule_refresh(is, 1);
1055
        } else {
1056
            /* dequeue the picture */
1057
            vp = &is->pictq[is->pictq_rindex];
1058

    
1059
            /* update current video pts */
1060
            is->video_current_pts = vp->pts;
1061
            is->video_current_pts_time = av_gettime();
1062

    
1063
            /* launch timer for next picture */
1064
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1065

    
1066
            if(is->subtitle_st) {
1067
                if (is->subtitle_stream_changed) {
1068
                    SDL_LockMutex(is->subpq_mutex);
1069

    
1070
                    while (is->subpq_size) {
1071
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1072

    
1073
                        /* update queue size and signal for next picture */
1074
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1075
                            is->subpq_rindex = 0;
1076

    
1077
                        is->subpq_size--;
1078
                    }
1079
                    is->subtitle_stream_changed = 0;
1080

    
1081
                    SDL_CondSignal(is->subpq_cond);
1082
                    SDL_UnlockMutex(is->subpq_mutex);
1083
                } else {
1084
                    if (is->subpq_size > 0) {
1085
                        sp = &is->subpq[is->subpq_rindex];
1086

    
1087
                        if (is->subpq_size > 1)
1088
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1089
                        else
1090
                            sp2 = NULL;
1091

    
1092
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1093
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1094
                        {
1095
                            free_subpicture(sp);
1096

    
1097
                            /* update queue size and signal for next picture */
1098
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1099
                                is->subpq_rindex = 0;
1100

    
1101
                            SDL_LockMutex(is->subpq_mutex);
1102
                            is->subpq_size--;
1103
                            SDL_CondSignal(is->subpq_cond);
1104
                            SDL_UnlockMutex(is->subpq_mutex);
1105
                        }
1106
                    }
1107
                }
1108
            }
1109

    
1110
            /* display picture */
1111
            video_display(is);
1112

    
1113
            /* update queue size and signal for next picture */
1114
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1115
                is->pictq_rindex = 0;
1116

    
1117
            SDL_LockMutex(is->pictq_mutex);
1118
            is->pictq_size--;
1119
            SDL_CondSignal(is->pictq_cond);
1120
            SDL_UnlockMutex(is->pictq_mutex);
1121
        }
1122
    } else if (is->audio_st) {
1123
        /* draw the next audio frame */
1124

    
1125
        schedule_refresh(is, 40);
1126

    
1127
        /* if only audio stream, then display the audio bars (better
1128
           than nothing, just to test the implementation */
1129

    
1130
        /* display picture */
1131
        video_display(is);
1132
    } else {
1133
        schedule_refresh(is, 100);
1134
    }
1135
    if (show_status) {
1136
        static int64_t last_time;
1137
        int64_t cur_time;
1138
        int aqsize, vqsize, sqsize;
1139
        double av_diff;
1140

    
1141
        cur_time = av_gettime();
1142
        if (!last_time || (cur_time - last_time) >= 30000) {
1143
            aqsize = 0;
1144
            vqsize = 0;
1145
            sqsize = 0;
1146
            if (is->audio_st)
1147
                aqsize = is->audioq.size;
1148
            if (is->video_st)
1149
                vqsize = is->videoq.size;
1150
            if (is->subtitle_st)
1151
                sqsize = is->subtitleq.size;
1152
            av_diff = 0;
1153
            if (is->audio_st && is->video_st)
1154
                av_diff = get_audio_clock(is) - get_video_clock(is);
1155
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1156
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1157
            fflush(stdout);
1158
            last_time = cur_time;
1159
        }
1160
    }
1161
}
1162

    
1163
/* allocate a picture (needs to do that in main thread to avoid
1164
   potential locking problems */
1165
static void alloc_picture(void *opaque)
1166
{
1167
    VideoState *is = opaque;
1168
    VideoPicture *vp;
1169

    
1170
    vp = &is->pictq[is->pictq_windex];
1171

    
1172
    if (vp->bmp)
1173
        SDL_FreeYUVOverlay(vp->bmp);
1174

    
1175
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1176
                                   is->video_st->codec->height,
1177
                                   SDL_YV12_OVERLAY,
1178
                                   screen);
1179
    vp->width = is->video_st->codec->width;
1180
    vp->height = is->video_st->codec->height;
1181

    
1182
    SDL_LockMutex(is->pictq_mutex);
1183
    vp->allocated = 1;
1184
    SDL_CondSignal(is->pictq_cond);
1185
    SDL_UnlockMutex(is->pictq_mutex);
1186
}
1187

    
1188
/**
1189
 *
1190
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1191
 */
1192
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1193
{
1194
    VideoPicture *vp;
1195
    int dst_pix_fmt;
1196

    
1197
    /* wait until we have space to put a new picture */
1198
    SDL_LockMutex(is->pictq_mutex);
1199
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1200
           !is->videoq.abort_request) {
1201
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1202
    }
1203
    SDL_UnlockMutex(is->pictq_mutex);
1204

    
1205
    if (is->videoq.abort_request)
1206
        return -1;
1207

    
1208
    vp = &is->pictq[is->pictq_windex];
1209

    
1210
    /* alloc or resize hardware picture buffer */
1211
    if (!vp->bmp ||
1212
        vp->width != is->video_st->codec->width ||
1213
        vp->height != is->video_st->codec->height) {
1214
        SDL_Event event;
1215

    
1216
        vp->allocated = 0;
1217

    
1218
        /* the allocation must be done in the main thread to avoid
1219
           locking problems */
1220
        event.type = FF_ALLOC_EVENT;
1221
        event.user.data1 = is;
1222
        SDL_PushEvent(&event);
1223

    
1224
        /* wait until the picture is allocated */
1225
        SDL_LockMutex(is->pictq_mutex);
1226
        while (!vp->allocated && !is->videoq.abort_request) {
1227
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1228
        }
1229
        SDL_UnlockMutex(is->pictq_mutex);
1230

    
1231
        if (is->videoq.abort_request)
1232
            return -1;
1233
    }
1234

    
1235
    /* if the frame is not skipped, then display it */
1236
    if (vp->bmp) {
1237
        AVPicture pict;
1238

    
1239
        /* get a pointer on the bitmap */
1240
        SDL_LockYUVOverlay (vp->bmp);
1241

    
1242
        dst_pix_fmt = PIX_FMT_YUV420P;
1243
        memset(&pict,0,sizeof(AVPicture));
1244
        pict.data[0] = vp->bmp->pixels[0];
1245
        pict.data[1] = vp->bmp->pixels[2];
1246
        pict.data[2] = vp->bmp->pixels[1];
1247

    
1248
        pict.linesize[0] = vp->bmp->pitches[0];
1249
        pict.linesize[1] = vp->bmp->pitches[2];
1250
        pict.linesize[2] = vp->bmp->pitches[1];
1251
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1252
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1253
            is->video_st->codec->width, is->video_st->codec->height,
1254
            is->video_st->codec->pix_fmt,
1255
            is->video_st->codec->width, is->video_st->codec->height,
1256
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1257
        if (is->img_convert_ctx == NULL) {
1258
            fprintf(stderr, "Cannot initialize the conversion context\n");
1259
            exit(1);
1260
        }
1261
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1262
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1263
        /* update the bitmap content */
1264
        SDL_UnlockYUVOverlay(vp->bmp);
1265

    
1266
        vp->pts = pts;
1267

    
1268
        /* now we can update the picture count */
1269
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1270
            is->pictq_windex = 0;
1271
        SDL_LockMutex(is->pictq_mutex);
1272
        is->pictq_size++;
1273
        SDL_UnlockMutex(is->pictq_mutex);
1274
    }
1275
    return 0;
1276
}
1277

    
1278
/**
1279
 * compute the exact PTS for the picture if it is omitted in the stream
1280
 * @param pts1 the dts of the pkt / pts of the frame
1281
 */
1282
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1283
{
1284
    double frame_delay, pts;
1285

    
1286
    pts = pts1;
1287

    
1288
    if (pts != 0) {
1289
        /* update video clock with pts, if present */
1290
        is->video_clock = pts;
1291
    } else {
1292
        pts = is->video_clock;
1293
    }
1294
    /* update video clock for next frame */
1295
    frame_delay = av_q2d(is->video_st->codec->time_base);
1296
    /* for MPEG2, the frame can be repeated, so we update the
1297
       clock accordingly */
1298
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1299
    is->video_clock += frame_delay;
1300

    
1301
#if defined(DEBUG_SYNC) && 0
1302
    {
1303
        int ftype;
1304
        if (src_frame->pict_type == FF_B_TYPE)
1305
            ftype = 'B';
1306
        else if (src_frame->pict_type == FF_I_TYPE)
1307
            ftype = 'I';
1308
        else
1309
            ftype = 'P';
1310
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1311
               ftype, pts, pts1);
1312
    }
1313
#endif
1314
    return queue_picture(is, src_frame, pts);
1315
}
1316

    
1317
static int video_thread(void *arg)
1318
{
1319
    VideoState *is = arg;
1320
    AVPacket pkt1, *pkt = &pkt1;
1321
    int len1, got_picture;
1322
    AVFrame *frame= avcodec_alloc_frame();
1323
    double pts;
1324

    
1325
    for(;;) {
1326
        while (is->paused && !is->videoq.abort_request) {
1327
            SDL_Delay(10);
1328
        }
1329
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1330
            break;
1331

    
1332
        if(pkt->data == flush_pkt.data){
1333
            avcodec_flush_buffers(is->video_st->codec);
1334
            continue;
1335
        }
1336

    
1337
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1338
           this packet, if any */
1339
        is->video_st->codec->reordered_opaque= pkt->pts;
1340
        len1 = avcodec_decode_video2(is->video_st->codec,
1341
                                    frame, &got_picture,
1342
                                    pkt);
1343

    
1344
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1345
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1346
            pts= frame->reordered_opaque;
1347
        else if(pkt->dts != AV_NOPTS_VALUE)
1348
            pts= pkt->dts;
1349
        else
1350
            pts= 0;
1351
        pts *= av_q2d(is->video_st->time_base);
1352

    
1353
//            if (len1 < 0)
1354
//                break;
1355
        if (got_picture) {
1356
            if (output_picture2(is, frame, pts) < 0)
1357
                goto the_end;
1358
        }
1359
        av_free_packet(pkt);
1360
        if (step)
1361
            if (cur_stream)
1362
                stream_pause(cur_stream);
1363
    }
1364
 the_end:
1365
    av_free(frame);
1366
    return 0;
1367
}
1368

    
1369
static int subtitle_thread(void *arg)
1370
{
1371
    VideoState *is = arg;
1372
    SubPicture *sp;
1373
    AVPacket pkt1, *pkt = &pkt1;
1374
    int len1, got_subtitle;
1375
    double pts;
1376
    int i, j;
1377
    int r, g, b, y, u, v, a;
1378

    
1379
    for(;;) {
1380
        while (is->paused && !is->subtitleq.abort_request) {
1381
            SDL_Delay(10);
1382
        }
1383
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1384
            break;
1385

    
1386
        if(pkt->data == flush_pkt.data){
1387
            avcodec_flush_buffers(is->subtitle_st->codec);
1388
            continue;
1389
        }
1390
        SDL_LockMutex(is->subpq_mutex);
1391
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1392
               !is->subtitleq.abort_request) {
1393
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1394
        }
1395
        SDL_UnlockMutex(is->subpq_mutex);
1396

    
1397
        if (is->subtitleq.abort_request)
1398
            goto the_end;
1399

    
1400
        sp = &is->subpq[is->subpq_windex];
1401

    
1402
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1403
           this packet, if any */
1404
        pts = 0;
1405
        if (pkt->pts != AV_NOPTS_VALUE)
1406
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1407

    
1408
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1409
                                    &sp->sub, &got_subtitle,
1410
                                    pkt);
1411
//            if (len1 < 0)
1412
//                break;
1413
        if (got_subtitle && sp->sub.format == 0) {
1414
            sp->pts = pts;
1415

    
1416
            for (i = 0; i < sp->sub.num_rects; i++)
1417
            {
1418
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1419
                {
1420
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1421
                    y = RGB_TO_Y_CCIR(r, g, b);
1422
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1423
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1424
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1425
                }
1426
            }
1427

    
1428
            /* now we can update the picture count */
1429
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1430
                is->subpq_windex = 0;
1431
            SDL_LockMutex(is->subpq_mutex);
1432
            is->subpq_size++;
1433
            SDL_UnlockMutex(is->subpq_mutex);
1434
        }
1435
        av_free_packet(pkt);
1436
//        if (step)
1437
//            if (cur_stream)
1438
//                stream_pause(cur_stream);
1439
    }
1440
 the_end:
1441
    return 0;
1442
}
1443

    
1444
/* copy samples for viewing in editor window */
1445
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1446
{
1447
    int size, len, channels;
1448

    
1449
    channels = is->audio_st->codec->channels;
1450

    
1451
    size = samples_size / sizeof(short);
1452
    while (size > 0) {
1453
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1454
        if (len > size)
1455
            len = size;
1456
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1457
        samples += len;
1458
        is->sample_array_index += len;
1459
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1460
            is->sample_array_index = 0;
1461
        size -= len;
1462
    }
1463
}
1464

    
1465
/* return the new audio buffer size (samples can be added or deleted
1466
   to get better sync if video or external master clock) */
1467
static int synchronize_audio(VideoState *is, short *samples,
1468
                             int samples_size1, double pts)
1469
{
1470
    int n, samples_size;
1471
    double ref_clock;
1472

    
1473
    n = 2 * is->audio_st->codec->channels;
1474
    samples_size = samples_size1;
1475

    
1476
    /* if not master, then we try to remove or add samples to correct the clock */
1477
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1478
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1479
        double diff, avg_diff;
1480
        int wanted_size, min_size, max_size, nb_samples;
1481

    
1482
        ref_clock = get_master_clock(is);
1483
        diff = get_audio_clock(is) - ref_clock;
1484

    
1485
        if (diff < AV_NOSYNC_THRESHOLD) {
1486
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1487
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1488
                /* not enough measures to have a correct estimate */
1489
                is->audio_diff_avg_count++;
1490
            } else {
1491
                /* estimate the A-V difference */
1492
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1493

    
1494
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1495
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1496
                    nb_samples = samples_size / n;
1497

    
1498
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1499
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1500
                    if (wanted_size < min_size)
1501
                        wanted_size = min_size;
1502
                    else if (wanted_size > max_size)
1503
                        wanted_size = max_size;
1504

    
1505
                    /* add or remove samples to correction the synchro */
1506
                    if (wanted_size < samples_size) {
1507
                        /* remove samples */
1508
                        samples_size = wanted_size;
1509
                    } else if (wanted_size > samples_size) {
1510
                        uint8_t *samples_end, *q;
1511
                        int nb;
1512

    
1513
                        /* add samples */
1514
                        nb = (samples_size - wanted_size);
1515
                        samples_end = (uint8_t *)samples + samples_size - n;
1516
                        q = samples_end + n;
1517
                        while (nb > 0) {
1518
                            memcpy(q, samples_end, n);
1519
                            q += n;
1520
                            nb -= n;
1521
                        }
1522
                        samples_size = wanted_size;
1523
                    }
1524
                }
1525
#if 0
1526
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1527
                       diff, avg_diff, samples_size - samples_size1,
1528
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1529
#endif
1530
            }
1531
        } else {
1532
            /* too big difference : may be initial PTS errors, so
1533
               reset A-V filter */
1534
            is->audio_diff_avg_count = 0;
1535
            is->audio_diff_cum = 0;
1536
        }
1537
    }
1538

    
1539
    return samples_size;
1540
}
1541

    
1542
/* decode one audio frame and returns its uncompressed size */
1543
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1544
{
1545
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1546
    AVPacket *pkt = &is->audio_pkt;
1547
    AVCodecContext *dec= is->audio_st->codec;
1548
    int n, len1, data_size;
1549
    double pts;
1550

    
1551
    for(;;) {
1552
        /* NOTE: the audio packet can contain several frames */
1553
        while (pkt_temp->size > 0) {
1554
            data_size = sizeof(is->audio_buf1);
1555
            len1 = avcodec_decode_audio3(dec,
1556
                                        (int16_t *)is->audio_buf1, &data_size,
1557
                                        pkt_temp);
1558
            if (len1 < 0) {
1559
                /* if error, we skip the frame */
1560
                pkt_temp->size = 0;
1561
                break;
1562
            }
1563

    
1564
            pkt_temp->data += len1;
1565
            pkt_temp->size -= len1;
1566
            if (data_size <= 0)
1567
                continue;
1568

    
1569
            if (dec->sample_fmt != is->audio_src_fmt) {
1570
                if (is->reformat_ctx)
1571
                    av_audio_convert_free(is->reformat_ctx);
1572
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1573
                                                         dec->sample_fmt, 1, NULL, 0);
1574
                if (!is->reformat_ctx) {
1575
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1576
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1577
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1578
                        break;
1579
                }
1580
                is->audio_src_fmt= dec->sample_fmt;
1581
            }
1582

    
1583
            if (is->reformat_ctx) {
1584
                const void *ibuf[6]= {is->audio_buf1};
1585
                void *obuf[6]= {is->audio_buf2};
1586
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1587
                int ostride[6]= {2};
1588
                int len= data_size/istride[0];
1589
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1590
                    printf("av_audio_convert() failed\n");
1591
                    break;
1592
                }
1593
                is->audio_buf= is->audio_buf2;
1594
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1595
                          remove this legacy cruft */
1596
                data_size= len*2;
1597
            }else{
1598
                is->audio_buf= is->audio_buf1;
1599
            }
1600

    
1601
            /* if no pts, then compute it */
1602
            pts = is->audio_clock;
1603
            *pts_ptr = pts;
1604
            n = 2 * dec->channels;
1605
            is->audio_clock += (double)data_size /
1606
                (double)(n * dec->sample_rate);
1607
#if defined(DEBUG_SYNC)
1608
            {
1609
                static double last_clock;
1610
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1611
                       is->audio_clock - last_clock,
1612
                       is->audio_clock, pts);
1613
                last_clock = is->audio_clock;
1614
            }
1615
#endif
1616
            return data_size;
1617
        }
1618

    
1619
        /* free the current packet */
1620
        if (pkt->data)
1621
            av_free_packet(pkt);
1622

    
1623
        if (is->paused || is->audioq.abort_request) {
1624
            return -1;
1625
        }
1626

    
1627
        /* read next packet */
1628
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1629
            return -1;
1630
        if(pkt->data == flush_pkt.data){
1631
            avcodec_flush_buffers(dec);
1632
            continue;
1633
        }
1634

    
1635
        pkt_temp->data = pkt->data;
1636
        pkt_temp->size = pkt->size;
1637

    
1638
        /* if update the audio clock with the pts */
1639
        if (pkt->pts != AV_NOPTS_VALUE) {
1640
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1641
        }
1642
    }
1643
}
1644

    
1645
/* get the current audio output buffer size, in samples. With SDL, we
1646
   cannot have a precise information */
1647
static int audio_write_get_buf_size(VideoState *is)
1648
{
1649
    return is->audio_buf_size - is->audio_buf_index;
1650
}
1651

    
1652

    
1653
/* prepare a new audio buffer */
1654
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1655
{
1656
    VideoState *is = opaque;
1657
    int audio_size, len1;
1658
    double pts;
1659

    
1660
    audio_callback_time = av_gettime();
1661

    
1662
    while (len > 0) {
1663
        if (is->audio_buf_index >= is->audio_buf_size) {
1664
           audio_size = audio_decode_frame(is, &pts);
1665
           if (audio_size < 0) {
1666
                /* if error, just output silence */
1667
               is->audio_buf = is->audio_buf1;
1668
               is->audio_buf_size = 1024;
1669
               memset(is->audio_buf, 0, is->audio_buf_size);
1670
           } else {
1671
               if (is->show_audio)
1672
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1673
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1674
                                              pts);
1675
               is->audio_buf_size = audio_size;
1676
           }
1677
           is->audio_buf_index = 0;
1678
        }
1679
        len1 = is->audio_buf_size - is->audio_buf_index;
1680
        if (len1 > len)
1681
            len1 = len;
1682
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1683
        len -= len1;
1684
        stream += len1;
1685
        is->audio_buf_index += len1;
1686
    }
1687
}
1688

    
1689
/* open a given stream. Return 0 if OK */
1690
static int stream_component_open(VideoState *is, int stream_index)
1691
{
1692
    AVFormatContext *ic = is->ic;
1693
    AVCodecContext *enc;
1694
    AVCodec *codec;
1695
    SDL_AudioSpec wanted_spec, spec;
1696

    
1697
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1698
        return -1;
1699
    enc = ic->streams[stream_index]->codec;
1700

    
1701
    /* prepare audio output */
1702
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1703
        if (enc->channels > 0) {
1704
            enc->request_channels = FFMIN(2, enc->channels);
1705
        } else {
1706
            enc->request_channels = 2;
1707
        }
1708
    }
1709

    
1710
    codec = avcodec_find_decoder(enc->codec_id);
1711
    enc->debug_mv = debug_mv;
1712
    enc->debug = debug;
1713
    enc->workaround_bugs = workaround_bugs;
1714
    enc->lowres = lowres;
1715
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1716
    enc->idct_algo= idct;
1717
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1718
    enc->skip_frame= skip_frame;
1719
    enc->skip_idct= skip_idct;
1720
    enc->skip_loop_filter= skip_loop_filter;
1721
    enc->error_recognition= error_recognition;
1722
    enc->error_concealment= error_concealment;
1723

    
1724
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1725

    
1726
    if (!codec ||
1727
        avcodec_open(enc, codec) < 0)
1728
        return -1;
1729

    
1730
    /* prepare audio output */
1731
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1732
        wanted_spec.freq = enc->sample_rate;
1733
        wanted_spec.format = AUDIO_S16SYS;
1734
        wanted_spec.channels = enc->channels;
1735
        wanted_spec.silence = 0;
1736
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1737
        wanted_spec.callback = sdl_audio_callback;
1738
        wanted_spec.userdata = is;
1739
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1740
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1741
            return -1;
1742
        }
1743
        is->audio_hw_buf_size = spec.size;
1744
        is->audio_src_fmt= SAMPLE_FMT_S16;
1745
    }
1746

    
1747
    if(thread_count>1)
1748
        avcodec_thread_init(enc, thread_count);
1749
    enc->thread_count= thread_count;
1750
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1751
    switch(enc->codec_type) {
1752
    case CODEC_TYPE_AUDIO:
1753
        is->audio_stream = stream_index;
1754
        is->audio_st = ic->streams[stream_index];
1755
        is->audio_buf_size = 0;
1756
        is->audio_buf_index = 0;
1757

    
1758
        /* init averaging filter */
1759
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1760
        is->audio_diff_avg_count = 0;
1761
        /* since we do not have a precise anough audio fifo fullness,
1762
           we correct audio sync only if larger than this threshold */
1763
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1764

    
1765
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1766
        packet_queue_init(&is->audioq);
1767
        SDL_PauseAudio(0);
1768
        break;
1769
    case CODEC_TYPE_VIDEO:
1770
        is->video_stream = stream_index;
1771
        is->video_st = ic->streams[stream_index];
1772

    
1773
        is->frame_last_delay = 40e-3;
1774
        is->frame_timer = (double)av_gettime() / 1000000.0;
1775
        is->video_current_pts_time = av_gettime();
1776

    
1777
        packet_queue_init(&is->videoq);
1778
        is->video_tid = SDL_CreateThread(video_thread, is);
1779
        break;
1780
    case CODEC_TYPE_SUBTITLE:
1781
        is->subtitle_stream = stream_index;
1782
        is->subtitle_st = ic->streams[stream_index];
1783
        packet_queue_init(&is->subtitleq);
1784

    
1785
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1786
        break;
1787
    default:
1788
        break;
1789
    }
1790
    return 0;
1791
}
1792

    
1793
static void stream_component_close(VideoState *is, int stream_index)
1794
{
1795
    AVFormatContext *ic = is->ic;
1796
    AVCodecContext *enc;
1797

    
1798
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1799
        return;
1800
    enc = ic->streams[stream_index]->codec;
1801

    
1802
    switch(enc->codec_type) {
1803
    case CODEC_TYPE_AUDIO:
1804
        packet_queue_abort(&is->audioq);
1805

    
1806
        SDL_CloseAudio();
1807

    
1808
        packet_queue_end(&is->audioq);
1809
        if (is->reformat_ctx)
1810
            av_audio_convert_free(is->reformat_ctx);
1811
        break;
1812
    case CODEC_TYPE_VIDEO:
1813
        packet_queue_abort(&is->videoq);
1814

    
1815
        /* note: we also signal this mutex to make sure we deblock the
1816
           video thread in all cases */
1817
        SDL_LockMutex(is->pictq_mutex);
1818
        SDL_CondSignal(is->pictq_cond);
1819
        SDL_UnlockMutex(is->pictq_mutex);
1820

    
1821
        SDL_WaitThread(is->video_tid, NULL);
1822

    
1823
        packet_queue_end(&is->videoq);
1824
        break;
1825
    case CODEC_TYPE_SUBTITLE:
1826
        packet_queue_abort(&is->subtitleq);
1827

    
1828
        /* note: we also signal this mutex to make sure we deblock the
1829
           video thread in all cases */
1830
        SDL_LockMutex(is->subpq_mutex);
1831
        is->subtitle_stream_changed = 1;
1832

    
1833
        SDL_CondSignal(is->subpq_cond);
1834
        SDL_UnlockMutex(is->subpq_mutex);
1835

    
1836
        SDL_WaitThread(is->subtitle_tid, NULL);
1837

    
1838
        packet_queue_end(&is->subtitleq);
1839
        break;
1840
    default:
1841
        break;
1842
    }
1843

    
1844
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1845
    avcodec_close(enc);
1846
    switch(enc->codec_type) {
1847
    case CODEC_TYPE_AUDIO:
1848
        is->audio_st = NULL;
1849
        is->audio_stream = -1;
1850
        break;
1851
    case CODEC_TYPE_VIDEO:
1852
        is->video_st = NULL;
1853
        is->video_stream = -1;
1854
        break;
1855
    case CODEC_TYPE_SUBTITLE:
1856
        is->subtitle_st = NULL;
1857
        is->subtitle_stream = -1;
1858
        break;
1859
    default:
1860
        break;
1861
    }
1862
}
1863

    
1864
/* since we have only one decoding thread, we can use a global
1865
   variable instead of a thread local variable */
1866
static VideoState *global_video_state;
1867

    
1868
static int decode_interrupt_cb(void)
1869
{
1870
    return (global_video_state && global_video_state->abort_request);
1871
}
1872

    
1873
/* this thread gets the stream from the disk or the network */
1874
static int decode_thread(void *arg)
1875
{
1876
    VideoState *is = arg;
1877
    AVFormatContext *ic;
1878
    int err, i, ret, video_index, audio_index, subtitle_index;
1879
    AVPacket pkt1, *pkt = &pkt1;
1880
    AVFormatParameters params, *ap = &params;
1881
    int eof=0;
1882

    
1883
    video_index = -1;
1884
    audio_index = -1;
1885
    subtitle_index = -1;
1886
    is->video_stream = -1;
1887
    is->audio_stream = -1;
1888
    is->subtitle_stream = -1;
1889

    
1890
    global_video_state = is;
1891
    url_set_interrupt_cb(decode_interrupt_cb);
1892

    
1893
    memset(ap, 0, sizeof(*ap));
1894

    
1895
    ap->width = frame_width;
1896
    ap->height= frame_height;
1897
    ap->time_base= (AVRational){1, 25};
1898
    ap->pix_fmt = frame_pix_fmt;
1899

    
1900
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1901
    if (err < 0) {
1902
        print_error(is->filename, err);
1903
        ret = -1;
1904
        goto fail;
1905
    }
1906
    is->ic = ic;
1907

    
1908
    if(genpts)
1909
        ic->flags |= AVFMT_FLAG_GENPTS;
1910

    
1911
    err = av_find_stream_info(ic);
1912
    if (err < 0) {
1913
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1914
        ret = -1;
1915
        goto fail;
1916
    }
1917
    if(ic->pb)
1918
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1919

    
1920
    /* if seeking requested, we execute it */
1921
    if (start_time != AV_NOPTS_VALUE) {
1922
        int64_t timestamp;
1923

    
1924
        timestamp = start_time;
1925
        /* add the stream start time */
1926
        if (ic->start_time != AV_NOPTS_VALUE)
1927
            timestamp += ic->start_time;
1928
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1929
        if (ret < 0) {
1930
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1931
                    is->filename, (double)timestamp / AV_TIME_BASE);
1932
        }
1933
    }
1934

    
1935
    for(i = 0; i < ic->nb_streams; i++) {
1936
        AVCodecContext *enc = ic->streams[i]->codec;
1937
        ic->streams[i]->discard = AVDISCARD_ALL;
1938
        switch(enc->codec_type) {
1939
        case CODEC_TYPE_AUDIO:
1940
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1941
                audio_index = i;
1942
            break;
1943
        case CODEC_TYPE_VIDEO:
1944
            if (wanted_video_stream-- >= 0 && !video_disable)
1945
                video_index = i;
1946
            break;
1947
        case CODEC_TYPE_SUBTITLE:
1948
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1949
                subtitle_index = i;
1950
            break;
1951
        default:
1952
            break;
1953
        }
1954
    }
1955
    if (show_status) {
1956
        dump_format(ic, 0, is->filename, 0);
1957
    }
1958

    
1959
    /* open the streams */
1960
    if (audio_index >= 0) {
1961
        stream_component_open(is, audio_index);
1962
    }
1963

    
1964
    if (video_index >= 0) {
1965
        stream_component_open(is, video_index);
1966
    } else {
1967
        if (!display_disable)
1968
            is->show_audio = 1;
1969
    }
1970

    
1971
    if (subtitle_index >= 0) {
1972
        stream_component_open(is, subtitle_index);
1973
    }
1974

    
1975
    if (is->video_stream < 0 && is->audio_stream < 0) {
1976
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1977
        ret = -1;
1978
        goto fail;
1979
    }
1980

    
1981
    for(;;) {
1982
        if (is->abort_request)
1983
            break;
1984
        if (is->paused != is->last_paused) {
1985
            is->last_paused = is->paused;
1986
            if (is->paused)
1987
                av_read_pause(ic);
1988
            else
1989
                av_read_play(ic);
1990
        }
1991
#if CONFIG_RTSP_DEMUXER
1992
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1993
            /* wait 10 ms to avoid trying to get another packet */
1994
            /* XXX: horrible */
1995
            SDL_Delay(10);
1996
            continue;
1997
        }
1998
#endif
1999
        if (is->seek_req) {
2000
            int64_t seek_target= is->seek_pos;
2001
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2002
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2003
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2004
//      of the seek_pos/seek_rel variables
2005

    
2006
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2007
            if (ret < 0) {
2008
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2009
            }else{
2010
                if (is->audio_stream >= 0) {
2011
                    packet_queue_flush(&is->audioq);
2012
                    packet_queue_put(&is->audioq, &flush_pkt);
2013
                }
2014
                if (is->subtitle_stream >= 0) {
2015
                    packet_queue_flush(&is->subtitleq);
2016
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2017
                }
2018
                if (is->video_stream >= 0) {
2019
                    packet_queue_flush(&is->videoq);
2020
                    packet_queue_put(&is->videoq, &flush_pkt);
2021
                }
2022
            }
2023
            is->seek_req = 0;
2024
            eof= 0;
2025
        }
2026

    
2027
        /* if the queue are full, no need to read more */
2028
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2029
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2030
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2031
            /* wait 10 ms */
2032
            SDL_Delay(10);
2033
            continue;
2034
        }
2035
        if(url_feof(ic->pb) || eof) {
2036
            if(is->video_stream >= 0){
2037
                av_init_packet(pkt);
2038
                pkt->data=NULL;
2039
                pkt->size=0;
2040
                pkt->stream_index= is->video_stream;
2041
                packet_queue_put(&is->videoq, pkt);
2042
            }
2043
            SDL_Delay(10);
2044
            continue;
2045
        }
2046
        ret = av_read_frame(ic, pkt);
2047
        if (ret < 0) {
2048
            if (ret == AVERROR_EOF)
2049
                eof=1;
2050
            if (url_ferror(ic->pb))
2051
                break;
2052
            SDL_Delay(100); /* wait for user event */
2053
            continue;
2054
        }
2055
        if (pkt->stream_index == is->audio_stream) {
2056
            packet_queue_put(&is->audioq, pkt);
2057
        } else if (pkt->stream_index == is->video_stream) {
2058
            packet_queue_put(&is->videoq, pkt);
2059
        } else if (pkt->stream_index == is->subtitle_stream) {
2060
            packet_queue_put(&is->subtitleq, pkt);
2061
        } else {
2062
            av_free_packet(pkt);
2063
        }
2064
    }
2065
    /* wait until the end */
2066
    while (!is->abort_request) {
2067
        SDL_Delay(100);
2068
    }
2069

    
2070
    ret = 0;
2071
 fail:
2072
    /* disable interrupting */
2073
    global_video_state = NULL;
2074

    
2075
    /* close each stream */
2076
    if (is->audio_stream >= 0)
2077
        stream_component_close(is, is->audio_stream);
2078
    if (is->video_stream >= 0)
2079
        stream_component_close(is, is->video_stream);
2080
    if (is->subtitle_stream >= 0)
2081
        stream_component_close(is, is->subtitle_stream);
2082
    if (is->ic) {
2083
        av_close_input_file(is->ic);
2084
        is->ic = NULL; /* safety */
2085
    }
2086
    url_set_interrupt_cb(NULL);
2087

    
2088
    if (ret != 0) {
2089
        SDL_Event event;
2090

    
2091
        event.type = FF_QUIT_EVENT;
2092
        event.user.data1 = is;
2093
        SDL_PushEvent(&event);
2094
    }
2095
    return 0;
2096
}
2097

    
2098
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2099
{
2100
    VideoState *is;
2101

    
2102
    is = av_mallocz(sizeof(VideoState));
2103
    if (!is)
2104
        return NULL;
2105
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2106
    is->iformat = iformat;
2107
    is->ytop = 0;
2108
    is->xleft = 0;
2109

    
2110
    /* start video display */
2111
    is->pictq_mutex = SDL_CreateMutex();
2112
    is->pictq_cond = SDL_CreateCond();
2113

    
2114
    is->subpq_mutex = SDL_CreateMutex();
2115
    is->subpq_cond = SDL_CreateCond();
2116

    
2117
    /* add the refresh timer to draw the picture */
2118
    schedule_refresh(is, 40);
2119

    
2120
    is->av_sync_type = av_sync_type;
2121
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2122
    if (!is->parse_tid) {
2123
        av_free(is);
2124
        return NULL;
2125
    }
2126
    return is;
2127
}
2128

    
2129
static void stream_close(VideoState *is)
2130
{
2131
    VideoPicture *vp;
2132
    int i;
2133
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2134
    is->abort_request = 1;
2135
    SDL_WaitThread(is->parse_tid, NULL);
2136

    
2137
    /* free all pictures */
2138
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2139
        vp = &is->pictq[i];
2140
        if (vp->bmp) {
2141
            SDL_FreeYUVOverlay(vp->bmp);
2142
            vp->bmp = NULL;
2143
        }
2144
    }
2145
    SDL_DestroyMutex(is->pictq_mutex);
2146
    SDL_DestroyCond(is->pictq_cond);
2147
    SDL_DestroyMutex(is->subpq_mutex);
2148
    SDL_DestroyCond(is->subpq_cond);
2149
    if (is->img_convert_ctx)
2150
        sws_freeContext(is->img_convert_ctx);
2151
    av_free(is);
2152
}
2153

    
2154
static void stream_cycle_channel(VideoState *is, int codec_type)
2155
{
2156
    AVFormatContext *ic = is->ic;
2157
    int start_index, stream_index;
2158
    AVStream *st;
2159

    
2160
    if (codec_type == CODEC_TYPE_VIDEO)
2161
        start_index = is->video_stream;
2162
    else if (codec_type == CODEC_TYPE_AUDIO)
2163
        start_index = is->audio_stream;
2164
    else
2165
        start_index = is->subtitle_stream;
2166
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2167
        return;
2168
    stream_index = start_index;
2169
    for(;;) {
2170
        if (++stream_index >= is->ic->nb_streams)
2171
        {
2172
            if (codec_type == CODEC_TYPE_SUBTITLE)
2173
            {
2174
                stream_index = -1;
2175
                goto the_end;
2176
            } else
2177
                stream_index = 0;
2178
        }
2179
        if (stream_index == start_index)
2180
            return;
2181
        st = ic->streams[stream_index];
2182
        if (st->codec->codec_type == codec_type) {
2183
            /* check that parameters are OK */
2184
            switch(codec_type) {
2185
            case CODEC_TYPE_AUDIO:
2186
                if (st->codec->sample_rate != 0 &&
2187
                    st->codec->channels != 0)
2188
                    goto the_end;
2189
                break;
2190
            case CODEC_TYPE_VIDEO:
2191
            case CODEC_TYPE_SUBTITLE:
2192
                goto the_end;
2193
            default:
2194
                break;
2195
            }
2196
        }
2197
    }
2198
 the_end:
2199
    stream_component_close(is, start_index);
2200
    stream_component_open(is, stream_index);
2201
}
2202

    
2203

    
2204
static void toggle_full_screen(void)
2205
{
2206
    is_full_screen = !is_full_screen;
2207
    if (!fs_screen_width) {
2208
        /* use default SDL method */
2209
//        SDL_WM_ToggleFullScreen(screen);
2210
    }
2211
    video_open(cur_stream);
2212
}
2213

    
2214
static void toggle_pause(void)
2215
{
2216
    if (cur_stream)
2217
        stream_pause(cur_stream);
2218
    step = 0;
2219
}
2220

    
2221
static void step_to_next_frame(void)
2222
{
2223
    if (cur_stream) {
2224
        /* if the stream is paused unpause it, then step */
2225
        if (cur_stream->paused)
2226
            stream_pause(cur_stream);
2227
    }
2228
    step = 1;
2229
}
2230

    
2231
static void do_exit(void)
2232
{
2233
    int i;
2234
    if (cur_stream) {
2235
        stream_close(cur_stream);
2236
        cur_stream = NULL;
2237
    }
2238
    for (i = 0; i < CODEC_TYPE_NB; i++)
2239
        av_free(avcodec_opts[i]);
2240
    av_free(avformat_opts);
2241
    av_free(sws_opts);
2242
    if (show_status)
2243
        printf("\n");
2244
    SDL_Quit();
2245
    exit(0);
2246
}
2247

    
2248
static void toggle_audio_display(void)
2249
{
2250
    if (cur_stream) {
2251
        cur_stream->show_audio = !cur_stream->show_audio;
2252
    }
2253
}
2254

    
2255
/* handle an event sent by the GUI */
2256
static void event_loop(void)
2257
{
2258
    SDL_Event event;
2259
    double incr, pos, frac;
2260

    
2261
    for(;;) {
2262
        SDL_WaitEvent(&event);
2263
        switch(event.type) {
2264
        case SDL_KEYDOWN:
2265
            switch(event.key.keysym.sym) {
2266
            case SDLK_ESCAPE:
2267
            case SDLK_q:
2268
                do_exit();
2269
                break;
2270
            case SDLK_f:
2271
                toggle_full_screen();
2272
                break;
2273
            case SDLK_p:
2274
            case SDLK_SPACE:
2275
                toggle_pause();
2276
                break;
2277
            case SDLK_s: //S: Step to next frame
2278
                step_to_next_frame();
2279
                break;
2280
            case SDLK_a:
2281
                if (cur_stream)
2282
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2283
                break;
2284
            case SDLK_v:
2285
                if (cur_stream)
2286
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2287
                break;
2288
            case SDLK_t:
2289
                if (cur_stream)
2290
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2291
                break;
2292
            case SDLK_w:
2293
                toggle_audio_display();
2294
                break;
2295
            case SDLK_LEFT:
2296
                incr = -10.0;
2297
                goto do_seek;
2298
            case SDLK_RIGHT:
2299
                incr = 10.0;
2300
                goto do_seek;
2301
            case SDLK_UP:
2302
                incr = 60.0;
2303
                goto do_seek;
2304
            case SDLK_DOWN:
2305
                incr = -60.0;
2306
            do_seek:
2307
                if (cur_stream) {
2308
                    if (seek_by_bytes) {
2309
                        pos = url_ftell(cur_stream->ic->pb);
2310
                        if (cur_stream->ic->bit_rate)
2311
                            incr *= cur_stream->ic->bit_rate / 60.0;
2312
                        else
2313
                            incr *= 180000.0;
2314
                        pos += incr;
2315
                        stream_seek(cur_stream, pos, incr);
2316
                    } else {
2317
                        pos = get_master_clock(cur_stream);
2318
                        pos += incr;
2319
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2320
                    }
2321
                }
2322
                break;
2323
            default:
2324
                break;
2325
            }
2326
            break;
2327
        case SDL_MOUSEBUTTONDOWN:
2328
            if (cur_stream) {
2329
                int64_t ts;
2330
                int ns, hh, mm, ss;
2331
                int tns, thh, tmm, tss;
2332
                tns = cur_stream->ic->duration/1000000LL;
2333
                thh = tns/3600;
2334
                tmm = (tns%3600)/60;
2335
                tss = (tns%60);
2336
                frac = (double)event.button.x/(double)cur_stream->width;
2337
                ns = frac*tns;
2338
                hh = ns/3600;
2339
                mm = (ns%3600)/60;
2340
                ss = (ns%60);
2341
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2342
                        hh, mm, ss, thh, tmm, tss);
2343
                ts = frac*cur_stream->ic->duration;
2344
                if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2345
                    ts += cur_stream->ic->start_time;
2346
                stream_seek(cur_stream, ts, 0);
2347
            }
2348
            break;
2349
        case SDL_VIDEORESIZE:
2350
            if (cur_stream) {
2351
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2352
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2353
                screen_width = cur_stream->width = event.resize.w;
2354
                screen_height= cur_stream->height= event.resize.h;
2355
            }
2356
            break;
2357
        case SDL_QUIT:
2358
        case FF_QUIT_EVENT:
2359
            do_exit();
2360
            break;
2361
        case FF_ALLOC_EVENT:
2362
            video_open(event.user.data1);
2363
            alloc_picture(event.user.data1);
2364
            break;
2365
        case FF_REFRESH_EVENT:
2366
            video_refresh_timer(event.user.data1);
2367
            break;
2368
        default:
2369
            break;
2370
        }
2371
    }
2372
}
2373

    
2374
static void opt_frame_size(const char *arg)
2375
{
2376
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2377
        fprintf(stderr, "Incorrect frame size\n");
2378
        exit(1);
2379
    }
2380
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2381
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2382
        exit(1);
2383
    }
2384
}
2385

    
2386
static int opt_width(const char *opt, const char *arg)
2387
{
2388
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2389
    return 0;
2390
}
2391

    
2392
static int opt_height(const char *opt, const char *arg)
2393
{
2394
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2395
    return 0;
2396
}
2397

    
2398
static void opt_format(const char *arg)
2399
{
2400
    file_iformat = av_find_input_format(arg);
2401
    if (!file_iformat) {
2402
        fprintf(stderr, "Unknown input format: %s\n", arg);
2403
        exit(1);
2404
    }
2405
}
2406

    
2407
static void opt_frame_pix_fmt(const char *arg)
2408
{
2409
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2410
}
2411

    
2412
static int opt_sync(const char *opt, const char *arg)
2413
{
2414
    if (!strcmp(arg, "audio"))
2415
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2416
    else if (!strcmp(arg, "video"))
2417
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2418
    else if (!strcmp(arg, "ext"))
2419
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2420
    else {
2421
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2422
        exit(1);
2423
    }
2424
    return 0;
2425
}
2426

    
2427
static int opt_seek(const char *opt, const char *arg)
2428
{
2429
    start_time = parse_time_or_die(opt, arg, 1);
2430
    return 0;
2431
}
2432

    
2433
static int opt_debug(const char *opt, const char *arg)
2434
{
2435
    av_log_set_level(99);
2436
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2437
    return 0;
2438
}
2439

    
2440
static int opt_vismv(const char *opt, const char *arg)
2441
{
2442
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2443
    return 0;
2444
}
2445

    
2446
static int opt_thread_count(const char *opt, const char *arg)
2447
{
2448
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2449
#if !HAVE_THREADS
2450
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2451
#endif
2452
    return 0;
2453
}
2454

    
2455
static const OptionDef options[] = {
2456
#include "cmdutils_common_opts.h"
2457
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2458
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2459
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2460
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2461
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2462
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2463
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2464
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2465
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2466
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2467
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2468
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2469
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2470
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2471
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2472
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2473
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2474
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2475
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2476
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2477
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2478
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2479
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2480
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2481
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2482
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2483
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2484
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2485
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2486
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2487
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2488
    { NULL, },
2489
};
2490

    
2491
static void show_help(void)
2492
{
2493
    printf("usage: ffplay [options] input_file\n"
2494
           "Simple media player\n");
2495
    printf("\n");
2496
    show_help_options(options, "Main options:\n",
2497
                      OPT_EXPERT, 0);
2498
    show_help_options(options, "\nAdvanced options:\n",
2499
                      OPT_EXPERT, OPT_EXPERT);
2500
    printf("\nWhile playing:\n"
2501
           "q, ESC              quit\n"
2502
           "f                   toggle full screen\n"
2503
           "p, SPC              pause\n"
2504
           "a                   cycle audio channel\n"
2505
           "v                   cycle video channel\n"
2506
           "t                   cycle subtitle channel\n"
2507
           "w                   show audio waves\n"
2508
           "left/right          seek backward/forward 10 seconds\n"
2509
           "down/up             seek backward/forward 1 minute\n"
2510
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2511
           );
2512
}
2513

    
2514
static void opt_input_file(const char *filename)
2515
{
2516
    if (!strcmp(filename, "-"))
2517
        filename = "pipe:";
2518
    input_filename = filename;
2519
}
2520

    
2521
/* Called from the main */
2522
int main(int argc, char **argv)
2523
{
2524
    int flags, i;
2525

    
2526
    /* register all codecs, demux and protocols */
2527
    avcodec_register_all();
2528
    avdevice_register_all();
2529
    av_register_all();
2530

    
2531
    for(i=0; i<CODEC_TYPE_NB; i++){
2532
        avcodec_opts[i]= avcodec_alloc_context2(i);
2533
    }
2534
    avformat_opts = avformat_alloc_context();
2535
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2536

    
2537
    show_banner();
2538

    
2539
    parse_options(argc, argv, options, opt_input_file);
2540

    
2541
    if (!input_filename) {
2542
        show_help();
2543
        fprintf(stderr, "An input file must be specified\n");
2544
        exit(1);
2545
    }
2546

    
2547
    if (display_disable) {
2548
        video_disable = 1;
2549
    }
2550
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2551
#if !defined(__MINGW32__) && !defined(__APPLE__)
2552
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2553
#endif
2554
    if (SDL_Init (flags)) {
2555
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2556
        exit(1);
2557
    }
2558

    
2559
    if (!display_disable) {
2560
#if HAVE_SDL_VIDEO_SIZE
2561
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2562
        fs_screen_width = vi->current_w;
2563
        fs_screen_height = vi->current_h;
2564
#endif
2565
    }
2566

    
2567
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2568
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2569
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2570
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2571

    
2572
    av_init_packet(&flush_pkt);
2573
    flush_pkt.data= "FLUSH";
2574

    
2575
    cur_stream = stream_open(input_filename, file_iformat);
2576

    
2577
    event_loop();
2578

    
2579
    /* never returns */
2580

    
2581
    return 0;
2582
}