Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 40ccc754

History | View | Annotate | Download (78.2 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavformat/avformat.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30
#include "libavcodec/colorspace.h"
31
#include "libavcodec/opt.h"
32

    
33
#include "cmdutils.h"
34

    
35
#include <SDL.h>
36
#include <SDL_thread.h>
37

    
38
#ifdef __MINGW32__
39
#undef main /* We don't want SDL to override our main() */
40
#endif
41

    
42
#undef exit
43
#undef printf
44
#undef fprintf
45

    
46
const char program_name[] = "FFplay";
47
const int program_birth_year = 2003;
48

    
49
//#define DEBUG_SYNC
50

    
51
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
52
#define MAX_AUDIOQ_SIZE (20 * 16 * 1024)
53
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
54

    
55
/* SDL audio buffer size, in samples. Should be small to have precise
56
   A/V sync as SDL does not have hardware buffer fullness info. */
57
#define SDL_AUDIO_BUFFER_SIZE 1024
58

    
59
/* no AV sync correction is done if below the AV sync threshold */
60
#define AV_SYNC_THRESHOLD 0.01
61
/* no AV correction is done if too big error */
62
#define AV_NOSYNC_THRESHOLD 10.0
63

    
64
/* maximum audio speed change to get correct sync */
65
#define SAMPLE_CORRECTION_PERCENT_MAX 10
66

    
67
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
68
#define AUDIO_DIFF_AVG_NB   20
69

    
70
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
71
#define SAMPLE_ARRAY_SIZE (2*65536)
72

    
73
static int sws_flags = SWS_BICUBIC;
74

    
75
typedef struct PacketQueue {
76
    AVPacketList *first_pkt, *last_pkt;
77
    int nb_packets;
78
    int size;
79
    int abort_request;
80
    SDL_mutex *mutex;
81
    SDL_cond *cond;
82
} PacketQueue;
83

    
84
#define VIDEO_PICTURE_QUEUE_SIZE 1
85
#define SUBPICTURE_QUEUE_SIZE 4
86

    
87
typedef struct VideoPicture {
88
    double pts;                                  ///<presentation time stamp for this picture
89
    SDL_Overlay *bmp;
90
    int width, height; /* source height & width */
91
    int allocated;
92
} VideoPicture;
93

    
94
typedef struct SubPicture {
95
    double pts; /* presentation time stamp for this picture */
96
    AVSubtitle sub;
97
} SubPicture;
98

    
99
enum {
100
    AV_SYNC_AUDIO_MASTER, /* default choice */
101
    AV_SYNC_VIDEO_MASTER,
102
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
103
};
104

    
105
typedef struct VideoState {
106
    SDL_Thread *parse_tid;
107
    SDL_Thread *video_tid;
108
    AVInputFormat *iformat;
109
    int no_background;
110
    int abort_request;
111
    int paused;
112
    int last_paused;
113
    int seek_req;
114
    int seek_flags;
115
    int64_t seek_pos;
116
    int64_t seek_rel;
117
    AVFormatContext *ic;
118
    int dtg_active_format;
119

    
120
    int audio_stream;
121

    
122
    int av_sync_type;
123
    double external_clock; /* external clock base */
124
    int64_t external_clock_time;
125

    
126
    double audio_clock;
127
    double audio_diff_cum; /* used for AV difference average computation */
128
    double audio_diff_avg_coef;
129
    double audio_diff_threshold;
130
    int audio_diff_avg_count;
131
    AVStream *audio_st;
132
    PacketQueue audioq;
133
    int audio_hw_buf_size;
134
    /* samples output by the codec. we reserve more space for avsync
135
       compensation */
136
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
137
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
138
    uint8_t *audio_buf;
139
    unsigned int audio_buf_size; /* in bytes */
140
    int audio_buf_index; /* in bytes */
141
    AVPacket audio_pkt_temp;
142
    AVPacket audio_pkt;
143
    enum SampleFormat audio_src_fmt;
144
    AVAudioConvert *reformat_ctx;
145

    
146
    int show_audio; /* if true, display audio samples */
147
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
148
    int sample_array_index;
149
    int last_i_start;
150

    
151
    SDL_Thread *subtitle_tid;
152
    int subtitle_stream;
153
    int subtitle_stream_changed;
154
    AVStream *subtitle_st;
155
    PacketQueue subtitleq;
156
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
157
    int subpq_size, subpq_rindex, subpq_windex;
158
    SDL_mutex *subpq_mutex;
159
    SDL_cond *subpq_cond;
160

    
161
    double frame_timer;
162
    double frame_last_pts;
163
    double frame_last_delay;
164
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
165
    int video_stream;
166
    AVStream *video_st;
167
    PacketQueue videoq;
168
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
169
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
170
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
171
    int pictq_size, pictq_rindex, pictq_windex;
172
    SDL_mutex *pictq_mutex;
173
    SDL_cond *pictq_cond;
174
    struct SwsContext *img_convert_ctx;
175

    
176
    //    QETimer *video_timer;
177
    char filename[1024];
178
    int width, height, xleft, ytop;
179
} VideoState;
180

    
181
static void show_help(void);
182
static int audio_write_get_buf_size(VideoState *is);
183

    
184
/* options specified by the user */
185
static AVInputFormat *file_iformat;
186
static const char *input_filename;
187
static int fs_screen_width;
188
static int fs_screen_height;
189
static int screen_width = 0;
190
static int screen_height = 0;
191
static int frame_width = 0;
192
static int frame_height = 0;
193
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
194
static int audio_disable;
195
static int video_disable;
196
static int wanted_audio_stream= 0;
197
static int wanted_video_stream= 0;
198
static int wanted_subtitle_stream= -1;
199
static int seek_by_bytes;
200
static int display_disable;
201
static int show_status = 1;
202
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
203
static int64_t start_time = AV_NOPTS_VALUE;
204
static int debug = 0;
205
static int debug_mv = 0;
206
static int step = 0;
207
static int thread_count = 1;
208
static int workaround_bugs = 1;
209
static int fast = 0;
210
static int genpts = 0;
211
static int lowres = 0;
212
static int idct = FF_IDCT_AUTO;
213
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
214
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
215
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
216
static int error_recognition = FF_ER_CAREFUL;
217
static int error_concealment = 3;
218
static int decoder_reorder_pts= 0;
219

    
220
/* current context */
221
static int is_full_screen;
222
static VideoState *cur_stream;
223
static int64_t audio_callback_time;
224

    
225
static AVPacket flush_pkt;
226

    
227
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
228
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
229
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
230

    
231
static SDL_Surface *screen;
232

    
233
/* packet queue handling */
234
static void packet_queue_init(PacketQueue *q)
235
{
236
    memset(q, 0, sizeof(PacketQueue));
237
    q->mutex = SDL_CreateMutex();
238
    q->cond = SDL_CreateCond();
239
}
240

    
241
static void packet_queue_flush(PacketQueue *q)
242
{
243
    AVPacketList *pkt, *pkt1;
244

    
245
    SDL_LockMutex(q->mutex);
246
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
247
        pkt1 = pkt->next;
248
        av_free_packet(&pkt->pkt);
249
        av_freep(&pkt);
250
    }
251
    q->last_pkt = NULL;
252
    q->first_pkt = NULL;
253
    q->nb_packets = 0;
254
    q->size = 0;
255
    SDL_UnlockMutex(q->mutex);
256
}
257

    
258
static void packet_queue_end(PacketQueue *q)
259
{
260
    packet_queue_flush(q);
261
    SDL_DestroyMutex(q->mutex);
262
    SDL_DestroyCond(q->cond);
263
}
264

    
265
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
266
{
267
    AVPacketList *pkt1;
268

    
269
    /* duplicate the packet */
270
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
271
        return -1;
272

    
273
    pkt1 = av_malloc(sizeof(AVPacketList));
274
    if (!pkt1)
275
        return -1;
276
    pkt1->pkt = *pkt;
277
    pkt1->next = NULL;
278

    
279

    
280
    SDL_LockMutex(q->mutex);
281

    
282
    if (!q->last_pkt)
283

    
284
        q->first_pkt = pkt1;
285
    else
286
        q->last_pkt->next = pkt1;
287
    q->last_pkt = pkt1;
288
    q->nb_packets++;
289
    q->size += pkt1->pkt.size + sizeof(*pkt1);
290
    /* XXX: should duplicate packet data in DV case */
291
    SDL_CondSignal(q->cond);
292

    
293
    SDL_UnlockMutex(q->mutex);
294
    return 0;
295
}
296

    
297
static void packet_queue_abort(PacketQueue *q)
298
{
299
    SDL_LockMutex(q->mutex);
300

    
301
    q->abort_request = 1;
302

    
303
    SDL_CondSignal(q->cond);
304

    
305
    SDL_UnlockMutex(q->mutex);
306
}
307

    
308
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
309
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
310
{
311
    AVPacketList *pkt1;
312
    int ret;
313

    
314
    SDL_LockMutex(q->mutex);
315

    
316
    for(;;) {
317
        if (q->abort_request) {
318
            ret = -1;
319
            break;
320
        }
321

    
322
        pkt1 = q->first_pkt;
323
        if (pkt1) {
324
            q->first_pkt = pkt1->next;
325
            if (!q->first_pkt)
326
                q->last_pkt = NULL;
327
            q->nb_packets--;
328
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
329
            *pkt = pkt1->pkt;
330
            av_free(pkt1);
331
            ret = 1;
332
            break;
333
        } else if (!block) {
334
            ret = 0;
335
            break;
336
        } else {
337
            SDL_CondWait(q->cond, q->mutex);
338
        }
339
    }
340
    SDL_UnlockMutex(q->mutex);
341
    return ret;
342
}
343

    
344
static inline void fill_rectangle(SDL_Surface *screen,
345
                                  int x, int y, int w, int h, int color)
346
{
347
    SDL_Rect rect;
348
    rect.x = x;
349
    rect.y = y;
350
    rect.w = w;
351
    rect.h = h;
352
    SDL_FillRect(screen, &rect, color);
353
}
354

    
355
#if 0
356
/* draw only the border of a rectangle */
357
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
358
{
359
    int w1, w2, h1, h2;
360

361
    /* fill the background */
362
    w1 = x;
363
    if (w1 < 0)
364
        w1 = 0;
365
    w2 = s->width - (x + w);
366
    if (w2 < 0)
367
        w2 = 0;
368
    h1 = y;
369
    if (h1 < 0)
370
        h1 = 0;
371
    h2 = s->height - (y + h);
372
    if (h2 < 0)
373
        h2 = 0;
374
    fill_rectangle(screen,
375
                   s->xleft, s->ytop,
376
                   w1, s->height,
377
                   color);
378
    fill_rectangle(screen,
379
                   s->xleft + s->width - w2, s->ytop,
380
                   w2, s->height,
381
                   color);
382
    fill_rectangle(screen,
383
                   s->xleft + w1, s->ytop,
384
                   s->width - w1 - w2, h1,
385
                   color);
386
    fill_rectangle(screen,
387
                   s->xleft + w1, s->ytop + s->height - h2,
388
                   s->width - w1 - w2, h2,
389
                   color);
390
}
391
#endif
392

    
393
#define ALPHA_BLEND(a, oldp, newp, s)\
394
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
395

    
396
#define RGBA_IN(r, g, b, a, s)\
397
{\
398
    unsigned int v = ((const uint32_t *)(s))[0];\
399
    a = (v >> 24) & 0xff;\
400
    r = (v >> 16) & 0xff;\
401
    g = (v >> 8) & 0xff;\
402
    b = v & 0xff;\
403
}
404

    
405
#define YUVA_IN(y, u, v, a, s, pal)\
406
{\
407
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
408
    a = (val >> 24) & 0xff;\
409
    y = (val >> 16) & 0xff;\
410
    u = (val >> 8) & 0xff;\
411
    v = val & 0xff;\
412
}
413

    
414
#define YUVA_OUT(d, y, u, v, a)\
415
{\
416
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
417
}
418

    
419

    
420
#define BPP 1
421

    
422
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
423
{
424
    int wrap, wrap3, width2, skip2;
425
    int y, u, v, a, u1, v1, a1, w, h;
426
    uint8_t *lum, *cb, *cr;
427
    const uint8_t *p;
428
    const uint32_t *pal;
429
    int dstx, dsty, dstw, dsth;
430

    
431
    dstw = av_clip(rect->w, 0, imgw);
432
    dsth = av_clip(rect->h, 0, imgh);
433
    dstx = av_clip(rect->x, 0, imgw - dstw);
434
    dsty = av_clip(rect->y, 0, imgh - dsth);
435
    lum = dst->data[0] + dsty * dst->linesize[0];
436
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
437
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
438

    
439
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
440
    skip2 = dstx >> 1;
441
    wrap = dst->linesize[0];
442
    wrap3 = rect->pict.linesize[0];
443
    p = rect->pict.data[0];
444
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
445

    
446
    if (dsty & 1) {
447
        lum += dstx;
448
        cb += skip2;
449
        cr += skip2;
450

    
451
        if (dstx & 1) {
452
            YUVA_IN(y, u, v, a, p, pal);
453
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
454
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
455
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
456
            cb++;
457
            cr++;
458
            lum++;
459
            p += BPP;
460
        }
461
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
462
            YUVA_IN(y, u, v, a, p, pal);
463
            u1 = u;
464
            v1 = v;
465
            a1 = a;
466
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467

    
468
            YUVA_IN(y, u, v, a, p + BPP, pal);
469
            u1 += u;
470
            v1 += v;
471
            a1 += a;
472
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
473
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
474
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
475
            cb++;
476
            cr++;
477
            p += 2 * BPP;
478
            lum += 2;
479
        }
480
        if (w) {
481
            YUVA_IN(y, u, v, a, p, pal);
482
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
483
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
484
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
485
            p++;
486
            lum++;
487
        }
488
        p += wrap3 - dstw * BPP;
489
        lum += wrap - dstw - dstx;
490
        cb += dst->linesize[1] - width2 - skip2;
491
        cr += dst->linesize[2] - width2 - skip2;
492
    }
493
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
494
        lum += dstx;
495
        cb += skip2;
496
        cr += skip2;
497

    
498
        if (dstx & 1) {
499
            YUVA_IN(y, u, v, a, p, pal);
500
            u1 = u;
501
            v1 = v;
502
            a1 = a;
503
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504
            p += wrap3;
505
            lum += wrap;
506
            YUVA_IN(y, u, v, a, p, pal);
507
            u1 += u;
508
            v1 += v;
509
            a1 += a;
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
512
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
513
            cb++;
514
            cr++;
515
            p += -wrap3 + BPP;
516
            lum += -wrap + 1;
517
        }
518
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
519
            YUVA_IN(y, u, v, a, p, pal);
520
            u1 = u;
521
            v1 = v;
522
            a1 = a;
523
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
524

    
525
            YUVA_IN(y, u, v, a, p + BPP, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
530
            p += wrap3;
531
            lum += wrap;
532

    
533
            YUVA_IN(y, u, v, a, p, pal);
534
            u1 += u;
535
            v1 += v;
536
            a1 += a;
537
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538

    
539
            YUVA_IN(y, u, v, a, p + BPP, pal);
540
            u1 += u;
541
            v1 += v;
542
            a1 += a;
543
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544

    
545
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
546
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
547

    
548
            cb++;
549
            cr++;
550
            p += -wrap3 + 2 * BPP;
551
            lum += -wrap + 2;
552
        }
553
        if (w) {
554
            YUVA_IN(y, u, v, a, p, pal);
555
            u1 = u;
556
            v1 = v;
557
            a1 = a;
558
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559
            p += wrap3;
560
            lum += wrap;
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 += u;
563
            v1 += v;
564
            a1 += a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
567
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
568
            cb++;
569
            cr++;
570
            p += -wrap3 + BPP;
571
            lum += -wrap + 1;
572
        }
573
        p += wrap3 + (wrap3 - dstw * BPP);
574
        lum += wrap + (wrap - dstw - dstx);
575
        cb += dst->linesize[1] - width2 - skip2;
576
        cr += dst->linesize[2] - width2 - skip2;
577
    }
578
    /* handle odd height */
579
    if (h) {
580
        lum += dstx;
581
        cb += skip2;
582
        cr += skip2;
583

    
584
        if (dstx & 1) {
585
            YUVA_IN(y, u, v, a, p, pal);
586
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
588
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
589
            cb++;
590
            cr++;
591
            lum++;
592
            p += BPP;
593
        }
594
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
595
            YUVA_IN(y, u, v, a, p, pal);
596
            u1 = u;
597
            v1 = v;
598
            a1 = a;
599
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
600

    
601
            YUVA_IN(y, u, v, a, p + BPP, pal);
602
            u1 += u;
603
            v1 += v;
604
            a1 += a;
605
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
606
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
607
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
608
            cb++;
609
            cr++;
610
            p += 2 * BPP;
611
            lum += 2;
612
        }
613
        if (w) {
614
            YUVA_IN(y, u, v, a, p, pal);
615
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
617
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
618
        }
619
    }
620
}
621

    
622
static void free_subpicture(SubPicture *sp)
623
{
624
    int i;
625

    
626
    for (i = 0; i < sp->sub.num_rects; i++)
627
    {
628
        av_freep(&sp->sub.rects[i]->pict.data[0]);
629
        av_freep(&sp->sub.rects[i]->pict.data[1]);
630
        av_freep(&sp->sub.rects[i]);
631
    }
632

    
633
    av_free(sp->sub.rects);
634

    
635
    memset(&sp->sub, 0, sizeof(AVSubtitle));
636
}
637

    
638
static void video_image_display(VideoState *is)
639
{
640
    VideoPicture *vp;
641
    SubPicture *sp;
642
    AVPicture pict;
643
    float aspect_ratio;
644
    int width, height, x, y;
645
    SDL_Rect rect;
646
    int i;
647

    
648
    vp = &is->pictq[is->pictq_rindex];
649
    if (vp->bmp) {
650
        /* XXX: use variable in the frame */
651
        if (is->video_st->sample_aspect_ratio.num)
652
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
653
        else if (is->video_st->codec->sample_aspect_ratio.num)
654
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
655
        else
656
            aspect_ratio = 0;
657
        if (aspect_ratio <= 0.0)
658
            aspect_ratio = 1.0;
659
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
660
        /* if an active format is indicated, then it overrides the
661
           mpeg format */
662
#if 0
663
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
664
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
665
            printf("dtg_active_format=%d\n", is->dtg_active_format);
666
        }
667
#endif
668
#if 0
669
        switch(is->video_st->codec->dtg_active_format) {
670
        case FF_DTG_AFD_SAME:
671
        default:
672
            /* nothing to do */
673
            break;
674
        case FF_DTG_AFD_4_3:
675
            aspect_ratio = 4.0 / 3.0;
676
            break;
677
        case FF_DTG_AFD_16_9:
678
            aspect_ratio = 16.0 / 9.0;
679
            break;
680
        case FF_DTG_AFD_14_9:
681
            aspect_ratio = 14.0 / 9.0;
682
            break;
683
        case FF_DTG_AFD_4_3_SP_14_9:
684
            aspect_ratio = 14.0 / 9.0;
685
            break;
686
        case FF_DTG_AFD_16_9_SP_14_9:
687
            aspect_ratio = 14.0 / 9.0;
688
            break;
689
        case FF_DTG_AFD_SP_4_3:
690
            aspect_ratio = 4.0 / 3.0;
691
            break;
692
        }
693
#endif
694

    
695
        if (is->subtitle_st)
696
        {
697
            if (is->subpq_size > 0)
698
            {
699
                sp = &is->subpq[is->subpq_rindex];
700

    
701
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
702
                {
703
                    SDL_LockYUVOverlay (vp->bmp);
704

    
705
                    pict.data[0] = vp->bmp->pixels[0];
706
                    pict.data[1] = vp->bmp->pixels[2];
707
                    pict.data[2] = vp->bmp->pixels[1];
708

    
709
                    pict.linesize[0] = vp->bmp->pitches[0];
710
                    pict.linesize[1] = vp->bmp->pitches[2];
711
                    pict.linesize[2] = vp->bmp->pitches[1];
712

    
713
                    for (i = 0; i < sp->sub.num_rects; i++)
714
                        blend_subrect(&pict, sp->sub.rects[i],
715
                                      vp->bmp->w, vp->bmp->h);
716

    
717
                    SDL_UnlockYUVOverlay (vp->bmp);
718
                }
719
            }
720
        }
721

    
722

    
723
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
724
        height = is->height;
725
        width = ((int)rint(height * aspect_ratio)) & ~1;
726
        if (width > is->width) {
727
            width = is->width;
728
            height = ((int)rint(width / aspect_ratio)) & ~1;
729
        }
730
        x = (is->width - width) / 2;
731
        y = (is->height - height) / 2;
732
        if (!is->no_background) {
733
            /* fill the background */
734
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
735
        } else {
736
            is->no_background = 0;
737
        }
738
        rect.x = is->xleft + x;
739
        rect.y = is->ytop  + y;
740
        rect.w = width;
741
        rect.h = height;
742
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
743
    } else {
744
#if 0
745
        fill_rectangle(screen,
746
                       is->xleft, is->ytop, is->width, is->height,
747
                       QERGB(0x00, 0x00, 0x00));
748
#endif
749
    }
750
}
751

    
752
static inline int compute_mod(int a, int b)
753
{
754
    a = a % b;
755
    if (a >= 0)
756
        return a;
757
    else
758
        return a + b;
759
}
760

    
761
static void video_audio_display(VideoState *s)
762
{
763
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
764
    int ch, channels, h, h2, bgcolor, fgcolor;
765
    int16_t time_diff;
766

    
767
    /* compute display index : center on currently output samples */
768
    channels = s->audio_st->codec->channels;
769
    nb_display_channels = channels;
770
    if (!s->paused) {
771
        n = 2 * channels;
772
        delay = audio_write_get_buf_size(s);
773
        delay /= n;
774

    
775
        /* to be more precise, we take into account the time spent since
776
           the last buffer computation */
777
        if (audio_callback_time) {
778
            time_diff = av_gettime() - audio_callback_time;
779
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
780
        }
781

    
782
        delay -= s->width / 2;
783
        if (delay < s->width)
784
            delay = s->width;
785

    
786
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
787

    
788
        h= INT_MIN;
789
        for(i=0; i<1000; i+=channels){
790
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
791
            int a= s->sample_array[idx];
792
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
793
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
794
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
795
            int score= a-d;
796
            if(h<score && (b^c)<0){
797
                h= score;
798
                i_start= idx;
799
            }
800
        }
801

    
802
        s->last_i_start = i_start;
803
    } else {
804
        i_start = s->last_i_start;
805
    }
806

    
807
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
808
    fill_rectangle(screen,
809
                   s->xleft, s->ytop, s->width, s->height,
810
                   bgcolor);
811

    
812
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
813

    
814
    /* total height for one channel */
815
    h = s->height / nb_display_channels;
816
    /* graph height / 2 */
817
    h2 = (h * 9) / 20;
818
    for(ch = 0;ch < nb_display_channels; ch++) {
819
        i = i_start + ch;
820
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
821
        for(x = 0; x < s->width; x++) {
822
            y = (s->sample_array[i] * h2) >> 15;
823
            if (y < 0) {
824
                y = -y;
825
                ys = y1 - y;
826
            } else {
827
                ys = y1;
828
            }
829
            fill_rectangle(screen,
830
                           s->xleft + x, ys, 1, y,
831
                           fgcolor);
832
            i += channels;
833
            if (i >= SAMPLE_ARRAY_SIZE)
834
                i -= SAMPLE_ARRAY_SIZE;
835
        }
836
    }
837

    
838
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
839

    
840
    for(ch = 1;ch < nb_display_channels; ch++) {
841
        y = s->ytop + ch * h;
842
        fill_rectangle(screen,
843
                       s->xleft, y, s->width, 1,
844
                       fgcolor);
845
    }
846
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
847
}
848

    
849
static int video_open(VideoState *is){
850
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
851
    int w,h;
852

    
853
    if(is_full_screen) flags |= SDL_FULLSCREEN;
854
    else               flags |= SDL_RESIZABLE;
855

    
856
    if (is_full_screen && fs_screen_width) {
857
        w = fs_screen_width;
858
        h = fs_screen_height;
859
    } else if(!is_full_screen && screen_width){
860
        w = screen_width;
861
        h = screen_height;
862
    }else if (is->video_st && is->video_st->codec->width){
863
        w = is->video_st->codec->width;
864
        h = is->video_st->codec->height;
865
    } else {
866
        w = 640;
867
        h = 480;
868
    }
869
#ifndef __APPLE__
870
    screen = SDL_SetVideoMode(w, h, 0, flags);
871
#else
872
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
873
    screen = SDL_SetVideoMode(w, h, 24, flags);
874
#endif
875
    if (!screen) {
876
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
877
        return -1;
878
    }
879
    SDL_WM_SetCaption("FFplay", "FFplay");
880

    
881
    is->width = screen->w;
882
    is->height = screen->h;
883

    
884
    return 0;
885
}
886

    
887
/* display the current picture, if any */
888
static void video_display(VideoState *is)
889
{
890
    if(!screen)
891
        video_open(cur_stream);
892
    if (is->audio_st && is->show_audio)
893
        video_audio_display(is);
894
    else if (is->video_st)
895
        video_image_display(is);
896
}
897

    
898
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
899
{
900
    SDL_Event event;
901
    event.type = FF_REFRESH_EVENT;
902
    event.user.data1 = opaque;
903
    SDL_PushEvent(&event);
904
    return 0; /* 0 means stop timer */
905
}
906

    
907
/* schedule a video refresh in 'delay' ms */
908
static void schedule_refresh(VideoState *is, int delay)
909
{
910
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
911
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
912
}
913

    
914
/* get the current audio clock value */
915
static double get_audio_clock(VideoState *is)
916
{
917
    double pts;
918
    int hw_buf_size, bytes_per_sec;
919
    pts = is->audio_clock;
920
    hw_buf_size = audio_write_get_buf_size(is);
921
    bytes_per_sec = 0;
922
    if (is->audio_st) {
923
        bytes_per_sec = is->audio_st->codec->sample_rate *
924
            2 * is->audio_st->codec->channels;
925
    }
926
    if (bytes_per_sec)
927
        pts -= (double)hw_buf_size / bytes_per_sec;
928
    return pts;
929
}
930

    
931
/* get the current video clock value */
932
static double get_video_clock(VideoState *is)
933
{
934
    double delta;
935
    if (is->paused) {
936
        delta = 0;
937
    } else {
938
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
939
    }
940
    return is->video_current_pts + delta;
941
}
942

    
943
/* get the current external clock value */
944
static double get_external_clock(VideoState *is)
945
{
946
    int64_t ti;
947
    ti = av_gettime();
948
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
949
}
950

    
951
/* get the current master clock value */
952
static double get_master_clock(VideoState *is)
953
{
954
    double val;
955

    
956
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
957
        if (is->video_st)
958
            val = get_video_clock(is);
959
        else
960
            val = get_audio_clock(is);
961
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
962
        if (is->audio_st)
963
            val = get_audio_clock(is);
964
        else
965
            val = get_video_clock(is);
966
    } else {
967
        val = get_external_clock(is);
968
    }
969
    return val;
970
}
971

    
972
/* seek in the stream */
973
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
974
{
975
    if (!is->seek_req) {
976
        is->seek_pos = pos;
977
        is->seek_rel = rel;
978
        if (seek_by_bytes)
979
            is->seek_flags |= AVSEEK_FLAG_BYTE;
980
        is->seek_req = 1;
981
    }
982
}
983

    
984
/* pause or resume the video */
985
static void stream_pause(VideoState *is)
986
{
987
    is->paused = !is->paused;
988
    if (!is->paused) {
989
        is->video_current_pts = get_video_clock(is);
990
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
991
    }
992
}
993

    
994
static double compute_frame_delay(double frame_current_pts, VideoState *is)
995
{
996
    double actual_delay, delay, sync_threshold, ref_clock, diff;
997

    
998
    /* compute nominal delay */
999
    delay = frame_current_pts - is->frame_last_pts;
1000
    if (delay <= 0 || delay >= 10.0) {
1001
        /* if incorrect delay, use previous one */
1002
        delay = is->frame_last_delay;
1003
    } else {
1004
        is->frame_last_delay = delay;
1005
    }
1006
    is->frame_last_pts = frame_current_pts;
1007

    
1008
    /* update delay to follow master synchronisation source */
1009
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1010
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1011
        /* if video is slave, we try to correct big delays by
1012
           duplicating or deleting a frame */
1013
        ref_clock = get_master_clock(is);
1014
        diff = frame_current_pts - ref_clock;
1015

    
1016
        /* skip or repeat frame. We take into account the
1017
           delay to compute the threshold. I still don't know
1018
           if it is the best guess */
1019
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1020
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1021
            if (diff <= -sync_threshold)
1022
                delay = 0;
1023
            else if (diff >= sync_threshold)
1024
                delay = 2 * delay;
1025
        }
1026
    }
1027

    
1028
    is->frame_timer += delay;
1029
    /* compute the REAL delay (we need to do that to avoid
1030
       long term errors */
1031
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1032
    if (actual_delay < 0.010) {
1033
        /* XXX: should skip picture */
1034
        actual_delay = 0.010;
1035
    }
1036

    
1037
#if defined(DEBUG_SYNC)
1038
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1039
            delay, actual_delay, frame_current_pts, -diff);
1040
#endif
1041

    
1042
    return actual_delay;
1043
}
1044

    
1045
/* called to display each frame */
1046
static void video_refresh_timer(void *opaque)
1047
{
1048
    VideoState *is = opaque;
1049
    VideoPicture *vp;
1050

    
1051
    SubPicture *sp, *sp2;
1052

    
1053
    if (is->video_st) {
1054
        if (is->pictq_size == 0) {
1055
            /* if no picture, need to wait */
1056
            schedule_refresh(is, 1);
1057
        } else {
1058
            /* dequeue the picture */
1059
            vp = &is->pictq[is->pictq_rindex];
1060

    
1061
            /* update current video pts */
1062
            is->video_current_pts = vp->pts;
1063
            is->video_current_pts_time = av_gettime();
1064

    
1065
            /* launch timer for next picture */
1066
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1067

    
1068
            if(is->subtitle_st) {
1069
                if (is->subtitle_stream_changed) {
1070
                    SDL_LockMutex(is->subpq_mutex);
1071

    
1072
                    while (is->subpq_size) {
1073
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1074

    
1075
                        /* update queue size and signal for next picture */
1076
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1077
                            is->subpq_rindex = 0;
1078

    
1079
                        is->subpq_size--;
1080
                    }
1081
                    is->subtitle_stream_changed = 0;
1082

    
1083
                    SDL_CondSignal(is->subpq_cond);
1084
                    SDL_UnlockMutex(is->subpq_mutex);
1085
                } else {
1086
                    if (is->subpq_size > 0) {
1087
                        sp = &is->subpq[is->subpq_rindex];
1088

    
1089
                        if (is->subpq_size > 1)
1090
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1091
                        else
1092
                            sp2 = NULL;
1093

    
1094
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1095
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1096
                        {
1097
                            free_subpicture(sp);
1098

    
1099
                            /* update queue size and signal for next picture */
1100
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1101
                                is->subpq_rindex = 0;
1102

    
1103
                            SDL_LockMutex(is->subpq_mutex);
1104
                            is->subpq_size--;
1105
                            SDL_CondSignal(is->subpq_cond);
1106
                            SDL_UnlockMutex(is->subpq_mutex);
1107
                        }
1108
                    }
1109
                }
1110
            }
1111

    
1112
            /* display picture */
1113
            video_display(is);
1114

    
1115
            /* update queue size and signal for next picture */
1116
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1117
                is->pictq_rindex = 0;
1118

    
1119
            SDL_LockMutex(is->pictq_mutex);
1120
            is->pictq_size--;
1121
            SDL_CondSignal(is->pictq_cond);
1122
            SDL_UnlockMutex(is->pictq_mutex);
1123
        }
1124
    } else if (is->audio_st) {
1125
        /* draw the next audio frame */
1126

    
1127
        schedule_refresh(is, 40);
1128

    
1129
        /* if only audio stream, then display the audio bars (better
1130
           than nothing, just to test the implementation */
1131

    
1132
        /* display picture */
1133
        video_display(is);
1134
    } else {
1135
        schedule_refresh(is, 100);
1136
    }
1137
    if (show_status) {
1138
        static int64_t last_time;
1139
        int64_t cur_time;
1140
        int aqsize, vqsize, sqsize;
1141
        double av_diff;
1142

    
1143
        cur_time = av_gettime();
1144
        if (!last_time || (cur_time - last_time) >= 30000) {
1145
            aqsize = 0;
1146
            vqsize = 0;
1147
            sqsize = 0;
1148
            if (is->audio_st)
1149
                aqsize = is->audioq.size;
1150
            if (is->video_st)
1151
                vqsize = is->videoq.size;
1152
            if (is->subtitle_st)
1153
                sqsize = is->subtitleq.size;
1154
            av_diff = 0;
1155
            if (is->audio_st && is->video_st)
1156
                av_diff = get_audio_clock(is) - get_video_clock(is);
1157
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1158
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1159
            fflush(stdout);
1160
            last_time = cur_time;
1161
        }
1162
    }
1163
}
1164

    
1165
/* allocate a picture (needs to do that in main thread to avoid
1166
   potential locking problems */
1167
static void alloc_picture(void *opaque)
1168
{
1169
    VideoState *is = opaque;
1170
    VideoPicture *vp;
1171

    
1172
    vp = &is->pictq[is->pictq_windex];
1173

    
1174
    if (vp->bmp)
1175
        SDL_FreeYUVOverlay(vp->bmp);
1176

    
1177
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1178
                                   is->video_st->codec->height,
1179
                                   SDL_YV12_OVERLAY,
1180
                                   screen);
1181
    vp->width = is->video_st->codec->width;
1182
    vp->height = is->video_st->codec->height;
1183

    
1184
    SDL_LockMutex(is->pictq_mutex);
1185
    vp->allocated = 1;
1186
    SDL_CondSignal(is->pictq_cond);
1187
    SDL_UnlockMutex(is->pictq_mutex);
1188
}
1189

    
1190
/**
1191
 *
1192
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1193
 */
1194
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1195
{
1196
    VideoPicture *vp;
1197
    int dst_pix_fmt;
1198

    
1199
    /* wait until we have space to put a new picture */
1200
    SDL_LockMutex(is->pictq_mutex);
1201
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1202
           !is->videoq.abort_request) {
1203
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1204
    }
1205
    SDL_UnlockMutex(is->pictq_mutex);
1206

    
1207
    if (is->videoq.abort_request)
1208
        return -1;
1209

    
1210
    vp = &is->pictq[is->pictq_windex];
1211

    
1212
    /* alloc or resize hardware picture buffer */
1213
    if (!vp->bmp ||
1214
        vp->width != is->video_st->codec->width ||
1215
        vp->height != is->video_st->codec->height) {
1216
        SDL_Event event;
1217

    
1218
        vp->allocated = 0;
1219

    
1220
        /* the allocation must be done in the main thread to avoid
1221
           locking problems */
1222
        event.type = FF_ALLOC_EVENT;
1223
        event.user.data1 = is;
1224
        SDL_PushEvent(&event);
1225

    
1226
        /* wait until the picture is allocated */
1227
        SDL_LockMutex(is->pictq_mutex);
1228
        while (!vp->allocated && !is->videoq.abort_request) {
1229
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1230
        }
1231
        SDL_UnlockMutex(is->pictq_mutex);
1232

    
1233
        if (is->videoq.abort_request)
1234
            return -1;
1235
    }
1236

    
1237
    /* if the frame is not skipped, then display it */
1238
    if (vp->bmp) {
1239
        AVPicture pict;
1240

    
1241
        /* get a pointer on the bitmap */
1242
        SDL_LockYUVOverlay (vp->bmp);
1243

    
1244
        dst_pix_fmt = PIX_FMT_YUV420P;
1245
        memset(&pict,0,sizeof(AVPicture));
1246
        pict.data[0] = vp->bmp->pixels[0];
1247
        pict.data[1] = vp->bmp->pixels[2];
1248
        pict.data[2] = vp->bmp->pixels[1];
1249

    
1250
        pict.linesize[0] = vp->bmp->pitches[0];
1251
        pict.linesize[1] = vp->bmp->pitches[2];
1252
        pict.linesize[2] = vp->bmp->pitches[1];
1253
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1254
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1255
            is->video_st->codec->width, is->video_st->codec->height,
1256
            is->video_st->codec->pix_fmt,
1257
            is->video_st->codec->width, is->video_st->codec->height,
1258
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1259
        if (is->img_convert_ctx == NULL) {
1260
            fprintf(stderr, "Cannot initialize the conversion context\n");
1261
            exit(1);
1262
        }
1263
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1264
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1265
        /* update the bitmap content */
1266
        SDL_UnlockYUVOverlay(vp->bmp);
1267

    
1268
        vp->pts = pts;
1269

    
1270
        /* now we can update the picture count */
1271
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1272
            is->pictq_windex = 0;
1273
        SDL_LockMutex(is->pictq_mutex);
1274
        is->pictq_size++;
1275
        SDL_UnlockMutex(is->pictq_mutex);
1276
    }
1277
    return 0;
1278
}
1279

    
1280
/**
1281
 * compute the exact PTS for the picture if it is omitted in the stream
1282
 * @param pts1 the dts of the pkt / pts of the frame
1283
 */
1284
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1285
{
1286
    double frame_delay, pts;
1287

    
1288
    pts = pts1;
1289

    
1290
    if (pts != 0) {
1291
        /* update video clock with pts, if present */
1292
        is->video_clock = pts;
1293
    } else {
1294
        pts = is->video_clock;
1295
    }
1296
    /* update video clock for next frame */
1297
    frame_delay = av_q2d(is->video_st->codec->time_base);
1298
    /* for MPEG2, the frame can be repeated, so we update the
1299
       clock accordingly */
1300
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1301
    is->video_clock += frame_delay;
1302

    
1303
#if defined(DEBUG_SYNC) && 0
1304
    {
1305
        int ftype;
1306
        if (src_frame->pict_type == FF_B_TYPE)
1307
            ftype = 'B';
1308
        else if (src_frame->pict_type == FF_I_TYPE)
1309
            ftype = 'I';
1310
        else
1311
            ftype = 'P';
1312
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1313
               ftype, pts, pts1);
1314
    }
1315
#endif
1316
    return queue_picture(is, src_frame, pts);
1317
}
1318

    
1319
static int video_thread(void *arg)
1320
{
1321
    VideoState *is = arg;
1322
    AVPacket pkt1, *pkt = &pkt1;
1323
    int len1, got_picture;
1324
    AVFrame *frame= avcodec_alloc_frame();
1325
    double pts;
1326

    
1327
    for(;;) {
1328
        while (is->paused && !is->videoq.abort_request) {
1329
            SDL_Delay(10);
1330
        }
1331
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1332
            break;
1333

    
1334
        if(pkt->data == flush_pkt.data){
1335
            avcodec_flush_buffers(is->video_st->codec);
1336
            continue;
1337
        }
1338

    
1339
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1340
           this packet, if any */
1341
        is->video_st->codec->reordered_opaque= pkt->pts;
1342
        len1 = avcodec_decode_video2(is->video_st->codec,
1343
                                    frame, &got_picture,
1344
                                    pkt);
1345

    
1346
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1347
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1348
            pts= frame->reordered_opaque;
1349
        else if(pkt->dts != AV_NOPTS_VALUE)
1350
            pts= pkt->dts;
1351
        else
1352
            pts= 0;
1353
        pts *= av_q2d(is->video_st->time_base);
1354

    
1355
//            if (len1 < 0)
1356
//                break;
1357
        if (got_picture) {
1358
            if (output_picture2(is, frame, pts) < 0)
1359
                goto the_end;
1360
        }
1361
        av_free_packet(pkt);
1362
        if (step)
1363
            if (cur_stream)
1364
                stream_pause(cur_stream);
1365
    }
1366
 the_end:
1367
    av_free(frame);
1368
    return 0;
1369
}
1370

    
1371
static int subtitle_thread(void *arg)
1372
{
1373
    VideoState *is = arg;
1374
    SubPicture *sp;
1375
    AVPacket pkt1, *pkt = &pkt1;
1376
    int len1, got_subtitle;
1377
    double pts;
1378
    int i, j;
1379
    int r, g, b, y, u, v, a;
1380

    
1381
    for(;;) {
1382
        while (is->paused && !is->subtitleq.abort_request) {
1383
            SDL_Delay(10);
1384
        }
1385
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1386
            break;
1387

    
1388
        if(pkt->data == flush_pkt.data){
1389
            avcodec_flush_buffers(is->subtitle_st->codec);
1390
            continue;
1391
        }
1392
        SDL_LockMutex(is->subpq_mutex);
1393
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1394
               !is->subtitleq.abort_request) {
1395
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1396
        }
1397
        SDL_UnlockMutex(is->subpq_mutex);
1398

    
1399
        if (is->subtitleq.abort_request)
1400
            goto the_end;
1401

    
1402
        sp = &is->subpq[is->subpq_windex];
1403

    
1404
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1405
           this packet, if any */
1406
        pts = 0;
1407
        if (pkt->pts != AV_NOPTS_VALUE)
1408
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1409

    
1410
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1411
                                    &sp->sub, &got_subtitle,
1412
                                    pkt);
1413
//            if (len1 < 0)
1414
//                break;
1415
        if (got_subtitle && sp->sub.format == 0) {
1416
            sp->pts = pts;
1417

    
1418
            for (i = 0; i < sp->sub.num_rects; i++)
1419
            {
1420
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1421
                {
1422
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1423
                    y = RGB_TO_Y_CCIR(r, g, b);
1424
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1425
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1426
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1427
                }
1428
            }
1429

    
1430
            /* now we can update the picture count */
1431
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1432
                is->subpq_windex = 0;
1433
            SDL_LockMutex(is->subpq_mutex);
1434
            is->subpq_size++;
1435
            SDL_UnlockMutex(is->subpq_mutex);
1436
        }
1437
        av_free_packet(pkt);
1438
//        if (step)
1439
//            if (cur_stream)
1440
//                stream_pause(cur_stream);
1441
    }
1442
 the_end:
1443
    return 0;
1444
}
1445

    
1446
/* copy samples for viewing in editor window */
1447
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1448
{
1449
    int size, len, channels;
1450

    
1451
    channels = is->audio_st->codec->channels;
1452

    
1453
    size = samples_size / sizeof(short);
1454
    while (size > 0) {
1455
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1456
        if (len > size)
1457
            len = size;
1458
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1459
        samples += len;
1460
        is->sample_array_index += len;
1461
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1462
            is->sample_array_index = 0;
1463
        size -= len;
1464
    }
1465
}
1466

    
1467
/* return the new audio buffer size (samples can be added or deleted
1468
   to get better sync if video or external master clock) */
1469
static int synchronize_audio(VideoState *is, short *samples,
1470
                             int samples_size1, double pts)
1471
{
1472
    int n, samples_size;
1473
    double ref_clock;
1474

    
1475
    n = 2 * is->audio_st->codec->channels;
1476
    samples_size = samples_size1;
1477

    
1478
    /* if not master, then we try to remove or add samples to correct the clock */
1479
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1480
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1481
        double diff, avg_diff;
1482
        int wanted_size, min_size, max_size, nb_samples;
1483

    
1484
        ref_clock = get_master_clock(is);
1485
        diff = get_audio_clock(is) - ref_clock;
1486

    
1487
        if (diff < AV_NOSYNC_THRESHOLD) {
1488
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1489
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1490
                /* not enough measures to have a correct estimate */
1491
                is->audio_diff_avg_count++;
1492
            } else {
1493
                /* estimate the A-V difference */
1494
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1495

    
1496
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1497
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1498
                    nb_samples = samples_size / n;
1499

    
1500
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1501
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1502
                    if (wanted_size < min_size)
1503
                        wanted_size = min_size;
1504
                    else if (wanted_size > max_size)
1505
                        wanted_size = max_size;
1506

    
1507
                    /* add or remove samples to correction the synchro */
1508
                    if (wanted_size < samples_size) {
1509
                        /* remove samples */
1510
                        samples_size = wanted_size;
1511
                    } else if (wanted_size > samples_size) {
1512
                        uint8_t *samples_end, *q;
1513
                        int nb;
1514

    
1515
                        /* add samples */
1516
                        nb = (samples_size - wanted_size);
1517
                        samples_end = (uint8_t *)samples + samples_size - n;
1518
                        q = samples_end + n;
1519
                        while (nb > 0) {
1520
                            memcpy(q, samples_end, n);
1521
                            q += n;
1522
                            nb -= n;
1523
                        }
1524
                        samples_size = wanted_size;
1525
                    }
1526
                }
1527
#if 0
1528
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1529
                       diff, avg_diff, samples_size - samples_size1,
1530
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1531
#endif
1532
            }
1533
        } else {
1534
            /* too big difference : may be initial PTS errors, so
1535
               reset A-V filter */
1536
            is->audio_diff_avg_count = 0;
1537
            is->audio_diff_cum = 0;
1538
        }
1539
    }
1540

    
1541
    return samples_size;
1542
}
1543

    
1544
/* decode one audio frame and returns its uncompressed size */
1545
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1546
{
1547
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1548
    AVPacket *pkt = &is->audio_pkt;
1549
    AVCodecContext *dec= is->audio_st->codec;
1550
    int n, len1, data_size;
1551
    double pts;
1552

    
1553
    for(;;) {
1554
        /* NOTE: the audio packet can contain several frames */
1555
        while (pkt_temp->size > 0) {
1556
            data_size = sizeof(is->audio_buf1);
1557
            len1 = avcodec_decode_audio3(dec,
1558
                                        (int16_t *)is->audio_buf1, &data_size,
1559
                                        pkt_temp);
1560
            if (len1 < 0) {
1561
                /* if error, we skip the frame */
1562
                pkt_temp->size = 0;
1563
                break;
1564
            }
1565

    
1566
            pkt_temp->data += len1;
1567
            pkt_temp->size -= len1;
1568
            if (data_size <= 0)
1569
                continue;
1570

    
1571
            if (dec->sample_fmt != is->audio_src_fmt) {
1572
                if (is->reformat_ctx)
1573
                    av_audio_convert_free(is->reformat_ctx);
1574
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1575
                                                         dec->sample_fmt, 1, NULL, 0);
1576
                if (!is->reformat_ctx) {
1577
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1578
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1579
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1580
                        break;
1581
                }
1582
                is->audio_src_fmt= dec->sample_fmt;
1583
            }
1584

    
1585
            if (is->reformat_ctx) {
1586
                const void *ibuf[6]= {is->audio_buf1};
1587
                void *obuf[6]= {is->audio_buf2};
1588
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1589
                int ostride[6]= {2};
1590
                int len= data_size/istride[0];
1591
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1592
                    printf("av_audio_convert() failed\n");
1593
                    break;
1594
                }
1595
                is->audio_buf= is->audio_buf2;
1596
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1597
                          remove this legacy cruft */
1598
                data_size= len*2;
1599
            }else{
1600
                is->audio_buf= is->audio_buf1;
1601
            }
1602

    
1603
            /* if no pts, then compute it */
1604
            pts = is->audio_clock;
1605
            *pts_ptr = pts;
1606
            n = 2 * dec->channels;
1607
            is->audio_clock += (double)data_size /
1608
                (double)(n * dec->sample_rate);
1609
#if defined(DEBUG_SYNC)
1610
            {
1611
                static double last_clock;
1612
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1613
                       is->audio_clock - last_clock,
1614
                       is->audio_clock, pts);
1615
                last_clock = is->audio_clock;
1616
            }
1617
#endif
1618
            return data_size;
1619
        }
1620

    
1621
        /* free the current packet */
1622
        if (pkt->data)
1623
            av_free_packet(pkt);
1624

    
1625
        if (is->paused || is->audioq.abort_request) {
1626
            return -1;
1627
        }
1628

    
1629
        /* read next packet */
1630
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1631
            return -1;
1632
        if(pkt->data == flush_pkt.data){
1633
            avcodec_flush_buffers(dec);
1634
            continue;
1635
        }
1636

    
1637
        pkt_temp->data = pkt->data;
1638
        pkt_temp->size = pkt->size;
1639

    
1640
        /* if update the audio clock with the pts */
1641
        if (pkt->pts != AV_NOPTS_VALUE) {
1642
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1643
        }
1644
    }
1645
}
1646

    
1647
/* get the current audio output buffer size, in samples. With SDL, we
1648
   cannot have a precise information */
1649
static int audio_write_get_buf_size(VideoState *is)
1650
{
1651
    return is->audio_buf_size - is->audio_buf_index;
1652
}
1653

    
1654

    
1655
/* prepare a new audio buffer */
1656
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1657
{
1658
    VideoState *is = opaque;
1659
    int audio_size, len1;
1660
    double pts;
1661

    
1662
    audio_callback_time = av_gettime();
1663

    
1664
    while (len > 0) {
1665
        if (is->audio_buf_index >= is->audio_buf_size) {
1666
           audio_size = audio_decode_frame(is, &pts);
1667
           if (audio_size < 0) {
1668
                /* if error, just output silence */
1669
               is->audio_buf = is->audio_buf1;
1670
               is->audio_buf_size = 1024;
1671
               memset(is->audio_buf, 0, is->audio_buf_size);
1672
           } else {
1673
               if (is->show_audio)
1674
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1675
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1676
                                              pts);
1677
               is->audio_buf_size = audio_size;
1678
           }
1679
           is->audio_buf_index = 0;
1680
        }
1681
        len1 = is->audio_buf_size - is->audio_buf_index;
1682
        if (len1 > len)
1683
            len1 = len;
1684
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1685
        len -= len1;
1686
        stream += len1;
1687
        is->audio_buf_index += len1;
1688
    }
1689
}
1690

    
1691
/* open a given stream. Return 0 if OK */
1692
static int stream_component_open(VideoState *is, int stream_index)
1693
{
1694
    AVFormatContext *ic = is->ic;
1695
    AVCodecContext *enc;
1696
    AVCodec *codec;
1697
    SDL_AudioSpec wanted_spec, spec;
1698

    
1699
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1700
        return -1;
1701
    enc = ic->streams[stream_index]->codec;
1702

    
1703
    /* prepare audio output */
1704
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1705
        if (enc->channels > 0) {
1706
            enc->request_channels = FFMIN(2, enc->channels);
1707
        } else {
1708
            enc->request_channels = 2;
1709
        }
1710
    }
1711

    
1712
    codec = avcodec_find_decoder(enc->codec_id);
1713
    enc->debug_mv = debug_mv;
1714
    enc->debug = debug;
1715
    enc->workaround_bugs = workaround_bugs;
1716
    enc->lowres = lowres;
1717
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1718
    enc->idct_algo= idct;
1719
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1720
    enc->skip_frame= skip_frame;
1721
    enc->skip_idct= skip_idct;
1722
    enc->skip_loop_filter= skip_loop_filter;
1723
    enc->error_recognition= error_recognition;
1724
    enc->error_concealment= error_concealment;
1725
    avcodec_thread_init(enc, thread_count);
1726

    
1727
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1728

    
1729
    if (!codec ||
1730
        avcodec_open(enc, codec) < 0)
1731
        return -1;
1732

    
1733
    /* prepare audio output */
1734
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1735
        wanted_spec.freq = enc->sample_rate;
1736
        wanted_spec.format = AUDIO_S16SYS;
1737
        wanted_spec.channels = enc->channels;
1738
        wanted_spec.silence = 0;
1739
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1740
        wanted_spec.callback = sdl_audio_callback;
1741
        wanted_spec.userdata = is;
1742
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1743
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1744
            return -1;
1745
        }
1746
        is->audio_hw_buf_size = spec.size;
1747
        is->audio_src_fmt= SAMPLE_FMT_S16;
1748
    }
1749

    
1750
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1751
    switch(enc->codec_type) {
1752
    case CODEC_TYPE_AUDIO:
1753
        is->audio_stream = stream_index;
1754
        is->audio_st = ic->streams[stream_index];
1755
        is->audio_buf_size = 0;
1756
        is->audio_buf_index = 0;
1757

    
1758
        /* init averaging filter */
1759
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1760
        is->audio_diff_avg_count = 0;
1761
        /* since we do not have a precise anough audio fifo fullness,
1762
           we correct audio sync only if larger than this threshold */
1763
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1764

    
1765
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1766
        packet_queue_init(&is->audioq);
1767
        SDL_PauseAudio(0);
1768
        break;
1769
    case CODEC_TYPE_VIDEO:
1770
        is->video_stream = stream_index;
1771
        is->video_st = ic->streams[stream_index];
1772

    
1773
        is->frame_last_delay = 40e-3;
1774
        is->frame_timer = (double)av_gettime() / 1000000.0;
1775
        is->video_current_pts_time = av_gettime();
1776

    
1777
        packet_queue_init(&is->videoq);
1778
        is->video_tid = SDL_CreateThread(video_thread, is);
1779
        break;
1780
    case CODEC_TYPE_SUBTITLE:
1781
        is->subtitle_stream = stream_index;
1782
        is->subtitle_st = ic->streams[stream_index];
1783
        packet_queue_init(&is->subtitleq);
1784

    
1785
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1786
        break;
1787
    default:
1788
        break;
1789
    }
1790
    return 0;
1791
}
1792

    
1793
static void stream_component_close(VideoState *is, int stream_index)
1794
{
1795
    AVFormatContext *ic = is->ic;
1796
    AVCodecContext *enc;
1797

    
1798
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1799
        return;
1800
    enc = ic->streams[stream_index]->codec;
1801

    
1802
    switch(enc->codec_type) {
1803
    case CODEC_TYPE_AUDIO:
1804
        packet_queue_abort(&is->audioq);
1805

    
1806
        SDL_CloseAudio();
1807

    
1808
        packet_queue_end(&is->audioq);
1809
        if (is->reformat_ctx)
1810
            av_audio_convert_free(is->reformat_ctx);
1811
        break;
1812
    case CODEC_TYPE_VIDEO:
1813
        packet_queue_abort(&is->videoq);
1814

    
1815
        /* note: we also signal this mutex to make sure we deblock the
1816
           video thread in all cases */
1817
        SDL_LockMutex(is->pictq_mutex);
1818
        SDL_CondSignal(is->pictq_cond);
1819
        SDL_UnlockMutex(is->pictq_mutex);
1820

    
1821
        SDL_WaitThread(is->video_tid, NULL);
1822

    
1823
        packet_queue_end(&is->videoq);
1824
        break;
1825
    case CODEC_TYPE_SUBTITLE:
1826
        packet_queue_abort(&is->subtitleq);
1827

    
1828
        /* note: we also signal this mutex to make sure we deblock the
1829
           video thread in all cases */
1830
        SDL_LockMutex(is->subpq_mutex);
1831
        is->subtitle_stream_changed = 1;
1832

    
1833
        SDL_CondSignal(is->subpq_cond);
1834
        SDL_UnlockMutex(is->subpq_mutex);
1835

    
1836
        SDL_WaitThread(is->subtitle_tid, NULL);
1837

    
1838
        packet_queue_end(&is->subtitleq);
1839
        break;
1840
    default:
1841
        break;
1842
    }
1843

    
1844
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1845
    avcodec_close(enc);
1846
    switch(enc->codec_type) {
1847
    case CODEC_TYPE_AUDIO:
1848
        is->audio_st = NULL;
1849
        is->audio_stream = -1;
1850
        break;
1851
    case CODEC_TYPE_VIDEO:
1852
        is->video_st = NULL;
1853
        is->video_stream = -1;
1854
        break;
1855
    case CODEC_TYPE_SUBTITLE:
1856
        is->subtitle_st = NULL;
1857
        is->subtitle_stream = -1;
1858
        break;
1859
    default:
1860
        break;
1861
    }
1862
}
1863

    
1864
/* since we have only one decoding thread, we can use a global
1865
   variable instead of a thread local variable */
1866
static VideoState *global_video_state;
1867

    
1868
static int decode_interrupt_cb(void)
1869
{
1870
    return (global_video_state && global_video_state->abort_request);
1871
}
1872

    
1873
/* this thread gets the stream from the disk or the network */
1874
static int decode_thread(void *arg)
1875
{
1876
    VideoState *is = arg;
1877
    AVFormatContext *ic;
1878
    int err, i, ret, video_index, audio_index, subtitle_index;
1879
    AVPacket pkt1, *pkt = &pkt1;
1880
    AVFormatParameters params, *ap = &params;
1881
    int eof=0;
1882

    
1883
    video_index = -1;
1884
    audio_index = -1;
1885
    subtitle_index = -1;
1886
    is->video_stream = -1;
1887
    is->audio_stream = -1;
1888
    is->subtitle_stream = -1;
1889

    
1890
    global_video_state = is;
1891
    url_set_interrupt_cb(decode_interrupt_cb);
1892

    
1893
    memset(ap, 0, sizeof(*ap));
1894

    
1895
    ap->width = frame_width;
1896
    ap->height= frame_height;
1897
    ap->time_base= (AVRational){1, 25};
1898
    ap->pix_fmt = frame_pix_fmt;
1899

    
1900
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1901
    if (err < 0) {
1902
        print_error(is->filename, err);
1903
        ret = -1;
1904
        goto fail;
1905
    }
1906
    is->ic = ic;
1907

    
1908
    if(genpts)
1909
        ic->flags |= AVFMT_FLAG_GENPTS;
1910

    
1911
    err = av_find_stream_info(ic);
1912
    if (err < 0) {
1913
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1914
        ret = -1;
1915
        goto fail;
1916
    }
1917
    if(ic->pb)
1918
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1919

    
1920
    /* if seeking requested, we execute it */
1921
    if (start_time != AV_NOPTS_VALUE) {
1922
        int64_t timestamp;
1923

    
1924
        timestamp = start_time;
1925
        /* add the stream start time */
1926
        if (ic->start_time != AV_NOPTS_VALUE)
1927
            timestamp += ic->start_time;
1928
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1929
        if (ret < 0) {
1930
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1931
                    is->filename, (double)timestamp / AV_TIME_BASE);
1932
        }
1933
    }
1934

    
1935
    for(i = 0; i < ic->nb_streams; i++) {
1936
        AVCodecContext *enc = ic->streams[i]->codec;
1937
        ic->streams[i]->discard = AVDISCARD_ALL;
1938
        switch(enc->codec_type) {
1939
        case CODEC_TYPE_AUDIO:
1940
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1941
                audio_index = i;
1942
            break;
1943
        case CODEC_TYPE_VIDEO:
1944
            if (wanted_video_stream-- >= 0 && !video_disable)
1945
                video_index = i;
1946
            break;
1947
        case CODEC_TYPE_SUBTITLE:
1948
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1949
                subtitle_index = i;
1950
            break;
1951
        default:
1952
            break;
1953
        }
1954
    }
1955
    if (show_status) {
1956
        dump_format(ic, 0, is->filename, 0);
1957
    }
1958

    
1959
    /* open the streams */
1960
    if (audio_index >= 0) {
1961
        stream_component_open(is, audio_index);
1962
    }
1963

    
1964
    if (video_index >= 0) {
1965
        stream_component_open(is, video_index);
1966
    } else {
1967
        if (!display_disable)
1968
            is->show_audio = 1;
1969
    }
1970

    
1971
    if (subtitle_index >= 0) {
1972
        stream_component_open(is, subtitle_index);
1973
    }
1974

    
1975
    if (is->video_stream < 0 && is->audio_stream < 0) {
1976
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1977
        ret = -1;
1978
        goto fail;
1979
    }
1980

    
1981
    for(;;) {
1982
        if (is->abort_request)
1983
            break;
1984
        if (is->paused != is->last_paused) {
1985
            is->last_paused = is->paused;
1986
            if (is->paused)
1987
                av_read_pause(ic);
1988
            else
1989
                av_read_play(ic);
1990
        }
1991
#if CONFIG_RTSP_DEMUXER
1992
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1993
            /* wait 10 ms to avoid trying to get another packet */
1994
            /* XXX: horrible */
1995
            SDL_Delay(10);
1996
            continue;
1997
        }
1998
#endif
1999
        if (is->seek_req) {
2000
            int64_t seek_target= is->seek_pos;
2001
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2002
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2003
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2004
//      of the seek_pos/seek_rel variables
2005

    
2006
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2007
            if (ret < 0) {
2008
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2009
            }else{
2010
                if (is->audio_stream >= 0) {
2011
                    packet_queue_flush(&is->audioq);
2012
                    packet_queue_put(&is->audioq, &flush_pkt);
2013
                }
2014
                if (is->subtitle_stream >= 0) {
2015
                    packet_queue_flush(&is->subtitleq);
2016
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2017
                }
2018
                if (is->video_stream >= 0) {
2019
                    packet_queue_flush(&is->videoq);
2020
                    packet_queue_put(&is->videoq, &flush_pkt);
2021
                }
2022
            }
2023
            is->seek_req = 0;
2024
            eof= 0;
2025
        }
2026

    
2027
        /* if the queue are full, no need to read more */
2028
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2029
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2030
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2031
            /* wait 10 ms */
2032
            SDL_Delay(10);
2033
            continue;
2034
        }
2035
        if(url_feof(ic->pb) || eof) {
2036
            if(is->video_stream >= 0){
2037
                av_init_packet(pkt);
2038
                pkt->data=NULL;
2039
                pkt->size=0;
2040
                pkt->stream_index= is->video_stream;
2041
                packet_queue_put(&is->videoq, pkt);
2042
            }
2043
            SDL_Delay(10);
2044
            continue;
2045
        }
2046
        ret = av_read_frame(ic, pkt);
2047
        if (ret < 0) {
2048
            if (ret == AVERROR_EOF)
2049
                eof=1;
2050
            if (url_ferror(ic->pb))
2051
                break;
2052
            SDL_Delay(100); /* wait for user event */
2053
            continue;
2054
        }
2055
        if (pkt->stream_index == is->audio_stream) {
2056
            packet_queue_put(&is->audioq, pkt);
2057
        } else if (pkt->stream_index == is->video_stream) {
2058
            packet_queue_put(&is->videoq, pkt);
2059
        } else if (pkt->stream_index == is->subtitle_stream) {
2060
            packet_queue_put(&is->subtitleq, pkt);
2061
        } else {
2062
            av_free_packet(pkt);
2063
        }
2064
    }
2065
    /* wait until the end */
2066
    while (!is->abort_request) {
2067
        SDL_Delay(100);
2068
    }
2069

    
2070
    ret = 0;
2071
 fail:
2072
    /* disable interrupting */
2073
    global_video_state = NULL;
2074

    
2075
    /* close each stream */
2076
    if (is->audio_stream >= 0)
2077
        stream_component_close(is, is->audio_stream);
2078
    if (is->video_stream >= 0)
2079
        stream_component_close(is, is->video_stream);
2080
    if (is->subtitle_stream >= 0)
2081
        stream_component_close(is, is->subtitle_stream);
2082
    if (is->ic) {
2083
        av_close_input_file(is->ic);
2084
        is->ic = NULL; /* safety */
2085
    }
2086
    url_set_interrupt_cb(NULL);
2087

    
2088
    if (ret != 0) {
2089
        SDL_Event event;
2090

    
2091
        event.type = FF_QUIT_EVENT;
2092
        event.user.data1 = is;
2093
        SDL_PushEvent(&event);
2094
    }
2095
    return 0;
2096
}
2097

    
2098
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2099
{
2100
    VideoState *is;
2101

    
2102
    is = av_mallocz(sizeof(VideoState));
2103
    if (!is)
2104
        return NULL;
2105
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2106
    is->iformat = iformat;
2107
    is->ytop = 0;
2108
    is->xleft = 0;
2109

    
2110
    /* start video display */
2111
    is->pictq_mutex = SDL_CreateMutex();
2112
    is->pictq_cond = SDL_CreateCond();
2113

    
2114
    is->subpq_mutex = SDL_CreateMutex();
2115
    is->subpq_cond = SDL_CreateCond();
2116

    
2117
    /* add the refresh timer to draw the picture */
2118
    schedule_refresh(is, 40);
2119

    
2120
    is->av_sync_type = av_sync_type;
2121
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2122
    if (!is->parse_tid) {
2123
        av_free(is);
2124
        return NULL;
2125
    }
2126
    return is;
2127
}
2128

    
2129
static void stream_close(VideoState *is)
2130
{
2131
    VideoPicture *vp;
2132
    int i;
2133
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2134
    is->abort_request = 1;
2135
    SDL_WaitThread(is->parse_tid, NULL);
2136

    
2137
    /* free all pictures */
2138
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2139
        vp = &is->pictq[i];
2140
        if (vp->bmp) {
2141
            SDL_FreeYUVOverlay(vp->bmp);
2142
            vp->bmp = NULL;
2143
        }
2144
    }
2145
    SDL_DestroyMutex(is->pictq_mutex);
2146
    SDL_DestroyCond(is->pictq_cond);
2147
    SDL_DestroyMutex(is->subpq_mutex);
2148
    SDL_DestroyCond(is->subpq_cond);
2149
    if (is->img_convert_ctx)
2150
        sws_freeContext(is->img_convert_ctx);
2151
    av_free(is);
2152
}
2153

    
2154
static void stream_cycle_channel(VideoState *is, int codec_type)
2155
{
2156
    AVFormatContext *ic = is->ic;
2157
    int start_index, stream_index;
2158
    AVStream *st;
2159

    
2160
    if (codec_type == CODEC_TYPE_VIDEO)
2161
        start_index = is->video_stream;
2162
    else if (codec_type == CODEC_TYPE_AUDIO)
2163
        start_index = is->audio_stream;
2164
    else
2165
        start_index = is->subtitle_stream;
2166
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2167
        return;
2168
    stream_index = start_index;
2169
    for(;;) {
2170
        if (++stream_index >= is->ic->nb_streams)
2171
        {
2172
            if (codec_type == CODEC_TYPE_SUBTITLE)
2173
            {
2174
                stream_index = -1;
2175
                goto the_end;
2176
            } else
2177
                stream_index = 0;
2178
        }
2179
        if (stream_index == start_index)
2180
            return;
2181
        st = ic->streams[stream_index];
2182
        if (st->codec->codec_type == codec_type) {
2183
            /* check that parameters are OK */
2184
            switch(codec_type) {
2185
            case CODEC_TYPE_AUDIO:
2186
                if (st->codec->sample_rate != 0 &&
2187
                    st->codec->channels != 0)
2188
                    goto the_end;
2189
                break;
2190
            case CODEC_TYPE_VIDEO:
2191
            case CODEC_TYPE_SUBTITLE:
2192
                goto the_end;
2193
            default:
2194
                break;
2195
            }
2196
        }
2197
    }
2198
 the_end:
2199
    stream_component_close(is, start_index);
2200
    stream_component_open(is, stream_index);
2201
}
2202

    
2203

    
2204
static void toggle_full_screen(void)
2205
{
2206
    is_full_screen = !is_full_screen;
2207
    if (!fs_screen_width) {
2208
        /* use default SDL method */
2209
//        SDL_WM_ToggleFullScreen(screen);
2210
    }
2211
    video_open(cur_stream);
2212
}
2213

    
2214
static void toggle_pause(void)
2215
{
2216
    if (cur_stream)
2217
        stream_pause(cur_stream);
2218
    step = 0;
2219
}
2220

    
2221
static void step_to_next_frame(void)
2222
{
2223
    if (cur_stream) {
2224
        /* if the stream is paused unpause it, then step */
2225
        if (cur_stream->paused)
2226
            stream_pause(cur_stream);
2227
    }
2228
    step = 1;
2229
}
2230

    
2231
static void do_exit(void)
2232
{
2233
    int i;
2234
    if (cur_stream) {
2235
        stream_close(cur_stream);
2236
        cur_stream = NULL;
2237
    }
2238
    for (i = 0; i < CODEC_TYPE_NB; i++)
2239
        av_free(avcodec_opts[i]);
2240
    av_free(avformat_opts);
2241
    av_free(sws_opts);
2242
    if (show_status)
2243
        printf("\n");
2244
    SDL_Quit();
2245
    exit(0);
2246
}
2247

    
2248
static void toggle_audio_display(void)
2249
{
2250
    if (cur_stream) {
2251
        cur_stream->show_audio = !cur_stream->show_audio;
2252
    }
2253
}
2254

    
2255
/* handle an event sent by the GUI */
2256
static void event_loop(void)
2257
{
2258
    SDL_Event event;
2259
    double incr, pos, frac;
2260

    
2261
    for(;;) {
2262
        SDL_WaitEvent(&event);
2263
        switch(event.type) {
2264
        case SDL_KEYDOWN:
2265
            switch(event.key.keysym.sym) {
2266
            case SDLK_ESCAPE:
2267
            case SDLK_q:
2268
                do_exit();
2269
                break;
2270
            case SDLK_f:
2271
                toggle_full_screen();
2272
                break;
2273
            case SDLK_p:
2274
            case SDLK_SPACE:
2275
                toggle_pause();
2276
                break;
2277
            case SDLK_s: //S: Step to next frame
2278
                step_to_next_frame();
2279
                break;
2280
            case SDLK_a:
2281
                if (cur_stream)
2282
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2283
                break;
2284
            case SDLK_v:
2285
                if (cur_stream)
2286
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2287
                break;
2288
            case SDLK_t:
2289
                if (cur_stream)
2290
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2291
                break;
2292
            case SDLK_w:
2293
                toggle_audio_display();
2294
                break;
2295
            case SDLK_LEFT:
2296
                incr = -10.0;
2297
                goto do_seek;
2298
            case SDLK_RIGHT:
2299
                incr = 10.0;
2300
                goto do_seek;
2301
            case SDLK_UP:
2302
                incr = 60.0;
2303
                goto do_seek;
2304
            case SDLK_DOWN:
2305
                incr = -60.0;
2306
            do_seek:
2307
                if (cur_stream) {
2308
                    if (seek_by_bytes) {
2309
                        pos = url_ftell(cur_stream->ic->pb);
2310
                        if (cur_stream->ic->bit_rate)
2311
                            incr *= cur_stream->ic->bit_rate / 60.0;
2312
                        else
2313
                            incr *= 180000.0;
2314
                        pos += incr;
2315
                        stream_seek(cur_stream, pos, incr);
2316
                    } else {
2317
                        pos = get_master_clock(cur_stream);
2318
                        pos += incr;
2319
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2320
                    }
2321
                }
2322
                break;
2323
            default:
2324
                break;
2325
            }
2326
            break;
2327
        case SDL_MOUSEBUTTONDOWN:
2328
            if (cur_stream) {
2329
                int64_t ts;
2330
                int ns, hh, mm, ss;
2331
                int tns, thh, tmm, tss;
2332
                tns = cur_stream->ic->duration/1000000LL;
2333
                thh = tns/3600;
2334
                tmm = (tns%3600)/60;
2335
                tss = (tns%60);
2336
                frac = (double)event.button.x/(double)cur_stream->width;
2337
                ns = frac*tns;
2338
                hh = ns/3600;
2339
                mm = (ns%3600)/60;
2340
                ss = (ns%60);
2341
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2342
                        hh, mm, ss, thh, tmm, tss);
2343
                ts = frac*cur_stream->ic->duration;
2344
                if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2345
                    ts += cur_stream->ic->start_time;
2346
                stream_seek(cur_stream, ts, 0);
2347
            }
2348
            break;
2349
        case SDL_VIDEORESIZE:
2350
            if (cur_stream) {
2351
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2352
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2353
                screen_width = cur_stream->width = event.resize.w;
2354
                screen_height= cur_stream->height= event.resize.h;
2355
            }
2356
            break;
2357
        case SDL_QUIT:
2358
        case FF_QUIT_EVENT:
2359
            do_exit();
2360
            break;
2361
        case FF_ALLOC_EVENT:
2362
            video_open(event.user.data1);
2363
            alloc_picture(event.user.data1);
2364
            break;
2365
        case FF_REFRESH_EVENT:
2366
            video_refresh_timer(event.user.data1);
2367
            break;
2368
        default:
2369
            break;
2370
        }
2371
    }
2372
}
2373

    
2374
static void opt_frame_size(const char *arg)
2375
{
2376
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2377
        fprintf(stderr, "Incorrect frame size\n");
2378
        exit(1);
2379
    }
2380
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2381
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2382
        exit(1);
2383
    }
2384
}
2385

    
2386
static int opt_width(const char *opt, const char *arg)
2387
{
2388
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2389
    return 0;
2390
}
2391

    
2392
static int opt_height(const char *opt, const char *arg)
2393
{
2394
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2395
    return 0;
2396
}
2397

    
2398
static void opt_format(const char *arg)
2399
{
2400
    file_iformat = av_find_input_format(arg);
2401
    if (!file_iformat) {
2402
        fprintf(stderr, "Unknown input format: %s\n", arg);
2403
        exit(1);
2404
    }
2405
}
2406

    
2407
static void opt_frame_pix_fmt(const char *arg)
2408
{
2409
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2410
}
2411

    
2412
static int opt_sync(const char *opt, const char *arg)
2413
{
2414
    if (!strcmp(arg, "audio"))
2415
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2416
    else if (!strcmp(arg, "video"))
2417
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2418
    else if (!strcmp(arg, "ext"))
2419
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2420
    else {
2421
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2422
        exit(1);
2423
    }
2424
    return 0;
2425
}
2426

    
2427
static int opt_seek(const char *opt, const char *arg)
2428
{
2429
    start_time = parse_time_or_die(opt, arg, 1);
2430
    return 0;
2431
}
2432

    
2433
static int opt_debug(const char *opt, const char *arg)
2434
{
2435
    av_log_set_level(99);
2436
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2437
    return 0;
2438
}
2439

    
2440
static int opt_vismv(const char *opt, const char *arg)
2441
{
2442
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2443
    return 0;
2444
}
2445

    
2446
static int opt_thread_count(const char *opt, const char *arg)
2447
{
2448
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2449
#if !HAVE_THREADS
2450
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2451
#endif
2452
    return 0;
2453
}
2454

    
2455
static const OptionDef options[] = {
2456
#include "cmdutils_common_opts.h"
2457
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2458
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2459
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2460
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2461
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2462
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2463
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2464
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2465
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2466
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2467
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2468
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2469
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2470
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2471
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2472
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2473
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2474
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2475
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2476
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2477
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2478
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2479
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2480
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2481
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2482
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2483
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2484
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2485
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2486
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2487
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2488
    { NULL, },
2489
};
2490

    
2491
static void show_usage(void)
2492
{
2493
    printf("Simple media player\n");
2494
    printf("usage: ffplay [options] input_file\n");
2495
    printf("\n");
2496
}
2497

    
2498
static void show_help(void)
2499
{
2500
    show_usage();
2501
    show_help_options(options, "Main options:\n",
2502
                      OPT_EXPERT, 0);
2503
    show_help_options(options, "\nAdvanced options:\n",
2504
                      OPT_EXPERT, OPT_EXPERT);
2505
    printf("\nWhile playing:\n"
2506
           "q, ESC              quit\n"
2507
           "f                   toggle full screen\n"
2508
           "p, SPC              pause\n"
2509
           "a                   cycle audio channel\n"
2510
           "v                   cycle video channel\n"
2511
           "t                   cycle subtitle channel\n"
2512
           "w                   show audio waves\n"
2513
           "left/right          seek backward/forward 10 seconds\n"
2514
           "down/up             seek backward/forward 1 minute\n"
2515
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2516
           );
2517
}
2518

    
2519
static void opt_input_file(const char *filename)
2520
{
2521
    if (!strcmp(filename, "-"))
2522
        filename = "pipe:";
2523
    input_filename = filename;
2524
}
2525

    
2526
/* Called from the main */
2527
int main(int argc, char **argv)
2528
{
2529
    int flags, i;
2530

    
2531
    /* register all codecs, demux and protocols */
2532
    avcodec_register_all();
2533
    avdevice_register_all();
2534
    av_register_all();
2535

    
2536
    for(i=0; i<CODEC_TYPE_NB; i++){
2537
        avcodec_opts[i]= avcodec_alloc_context2(i);
2538
    }
2539
    avformat_opts = avformat_alloc_context();
2540
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2541

    
2542
    show_banner();
2543

    
2544
    parse_options(argc, argv, options, opt_input_file);
2545

    
2546
    if (!input_filename) {
2547
        show_usage();
2548
        fprintf(stderr, "An input file must be specified\n");
2549
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2550
        exit(1);
2551
    }
2552

    
2553
    if (display_disable) {
2554
        video_disable = 1;
2555
    }
2556
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2557
#if !defined(__MINGW32__) && !defined(__APPLE__)
2558
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2559
#endif
2560
    if (SDL_Init (flags)) {
2561
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2562
        exit(1);
2563
    }
2564

    
2565
    if (!display_disable) {
2566
#if HAVE_SDL_VIDEO_SIZE
2567
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2568
        fs_screen_width = vi->current_w;
2569
        fs_screen_height = vi->current_h;
2570
#endif
2571
    }
2572

    
2573
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2574
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2575
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2576
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2577

    
2578
    av_init_packet(&flush_pkt);
2579
    flush_pkt.data= "FLUSH";
2580

    
2581
    cur_stream = stream_open(input_filename, file_iformat);
2582

    
2583
    event_loop();
2584

    
2585
    /* never returns */
2586

    
2587
    return 0;
2588
}