Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 59055363

History | View | Annotate | Download (79.7 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33

    
34
#include "cmdutils.h"
35

    
36
#include <SDL.h>
37
#include <SDL_thread.h>
38

    
39
#ifdef __MINGW32__
40
#undef main /* We don't want SDL to override our main() */
41
#endif
42

    
43
#undef exit
44
#undef printf
45
#undef fprintf
46

    
47
const char program_name[] = "FFplay";
48
const int program_birth_year = 2003;
49

    
50
//#define DEBUG_SYNC
51

    
52
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
53
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
54
#define MIN_FRAMES 5
55

    
56
/* SDL audio buffer size, in samples. Should be small to have precise
57
   A/V sync as SDL does not have hardware buffer fullness info. */
58
#define SDL_AUDIO_BUFFER_SIZE 1024
59

    
60
/* no AV sync correction is done if below the AV sync threshold */
61
#define AV_SYNC_THRESHOLD 0.01
62
/* no AV correction is done if too big error */
63
#define AV_NOSYNC_THRESHOLD 10.0
64

    
65
/* maximum audio speed change to get correct sync */
66
#define SAMPLE_CORRECTION_PERCENT_MAX 10
67

    
68
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
69
#define AUDIO_DIFF_AVG_NB   20
70

    
71
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
72
#define SAMPLE_ARRAY_SIZE (2*65536)
73

    
74
static int sws_flags = SWS_BICUBIC;
75

    
76
typedef struct PacketQueue {
77
    AVPacketList *first_pkt, *last_pkt;
78
    int nb_packets;
79
    int size;
80
    int abort_request;
81
    SDL_mutex *mutex;
82
    SDL_cond *cond;
83
} PacketQueue;
84

    
85
#define VIDEO_PICTURE_QUEUE_SIZE 1
86
#define SUBPICTURE_QUEUE_SIZE 4
87

    
88
typedef struct VideoPicture {
89
    double pts;                                  ///<presentation time stamp for this picture
90
    SDL_Overlay *bmp;
91
    int width, height; /* source height & width */
92
    int allocated;
93
} VideoPicture;
94

    
95
typedef struct SubPicture {
96
    double pts; /* presentation time stamp for this picture */
97
    AVSubtitle sub;
98
} SubPicture;
99

    
100
enum {
101
    AV_SYNC_AUDIO_MASTER, /* default choice */
102
    AV_SYNC_VIDEO_MASTER,
103
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
104
};
105

    
106
typedef struct VideoState {
107
    SDL_Thread *parse_tid;
108
    SDL_Thread *video_tid;
109
    AVInputFormat *iformat;
110
    int no_background;
111
    int abort_request;
112
    int paused;
113
    int last_paused;
114
    int seek_req;
115
    int seek_flags;
116
    int64_t seek_pos;
117
    int64_t seek_rel;
118
    AVFormatContext *ic;
119
    int dtg_active_format;
120

    
121
    int audio_stream;
122

    
123
    int av_sync_type;
124
    double external_clock; /* external clock base */
125
    int64_t external_clock_time;
126

    
127
    double audio_clock;
128
    double audio_diff_cum; /* used for AV difference average computation */
129
    double audio_diff_avg_coef;
130
    double audio_diff_threshold;
131
    int audio_diff_avg_count;
132
    AVStream *audio_st;
133
    PacketQueue audioq;
134
    int audio_hw_buf_size;
135
    /* samples output by the codec. we reserve more space for avsync
136
       compensation */
137
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
138
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
139
    uint8_t *audio_buf;
140
    unsigned int audio_buf_size; /* in bytes */
141
    int audio_buf_index; /* in bytes */
142
    AVPacket audio_pkt_temp;
143
    AVPacket audio_pkt;
144
    enum SampleFormat audio_src_fmt;
145
    AVAudioConvert *reformat_ctx;
146

    
147
    int show_audio; /* if true, display audio samples */
148
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
149
    int sample_array_index;
150
    int last_i_start;
151

    
152
    SDL_Thread *subtitle_tid;
153
    int subtitle_stream;
154
    int subtitle_stream_changed;
155
    AVStream *subtitle_st;
156
    PacketQueue subtitleq;
157
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
158
    int subpq_size, subpq_rindex, subpq_windex;
159
    SDL_mutex *subpq_mutex;
160
    SDL_cond *subpq_cond;
161

    
162
    double frame_timer;
163
    double frame_last_pts;
164
    double frame_last_delay;
165
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
166
    int video_stream;
167
    AVStream *video_st;
168
    PacketQueue videoq;
169
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
170
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
171
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
172
    int pictq_size, pictq_rindex, pictq_windex;
173
    SDL_mutex *pictq_mutex;
174
    SDL_cond *pictq_cond;
175
    struct SwsContext *img_convert_ctx;
176

    
177
    //    QETimer *video_timer;
178
    char filename[1024];
179
    int width, height, xleft, ytop;
180

    
181
    int64_t faulty_pts;
182
    int64_t faulty_dts;
183
    int64_t last_dts_for_fault_detection;
184
    int64_t last_pts_for_fault_detection;
185

    
186
} VideoState;
187

    
188
static void show_help(void);
189
static int audio_write_get_buf_size(VideoState *is);
190

    
191
/* options specified by the user */
192
static AVInputFormat *file_iformat;
193
static const char *input_filename;
194
static int fs_screen_width;
195
static int fs_screen_height;
196
static int screen_width = 0;
197
static int screen_height = 0;
198
static int frame_width = 0;
199
static int frame_height = 0;
200
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
201
static int audio_disable;
202
static int video_disable;
203
static int wanted_audio_stream= 0;
204
static int wanted_video_stream= 0;
205
static int wanted_subtitle_stream= -1;
206
static int seek_by_bytes;
207
static int display_disable;
208
static int show_status = 1;
209
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
210
static int64_t start_time = AV_NOPTS_VALUE;
211
static int debug = 0;
212
static int debug_mv = 0;
213
static int step = 0;
214
static int thread_count = 1;
215
static int workaround_bugs = 1;
216
static int fast = 0;
217
static int genpts = 0;
218
static int lowres = 0;
219
static int idct = FF_IDCT_AUTO;
220
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
221
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
222
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
223
static int error_recognition = FF_ER_CAREFUL;
224
static int error_concealment = 3;
225
static int decoder_reorder_pts= -1;
226

    
227
/* current context */
228
static int is_full_screen;
229
static VideoState *cur_stream;
230
static int64_t audio_callback_time;
231

    
232
static AVPacket flush_pkt;
233

    
234
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
235
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
236
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
237

    
238
static SDL_Surface *screen;
239

    
240
/* packet queue handling */
241
static void packet_queue_init(PacketQueue *q)
242
{
243
    memset(q, 0, sizeof(PacketQueue));
244
    q->mutex = SDL_CreateMutex();
245
    q->cond = SDL_CreateCond();
246
}
247

    
248
static void packet_queue_flush(PacketQueue *q)
249
{
250
    AVPacketList *pkt, *pkt1;
251

    
252
    SDL_LockMutex(q->mutex);
253
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
254
        pkt1 = pkt->next;
255
        av_free_packet(&pkt->pkt);
256
        av_freep(&pkt);
257
    }
258
    q->last_pkt = NULL;
259
    q->first_pkt = NULL;
260
    q->nb_packets = 0;
261
    q->size = 0;
262
    SDL_UnlockMutex(q->mutex);
263
}
264

    
265
static void packet_queue_end(PacketQueue *q)
266
{
267
    packet_queue_flush(q);
268
    SDL_DestroyMutex(q->mutex);
269
    SDL_DestroyCond(q->cond);
270
}
271

    
272
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
273
{
274
    AVPacketList *pkt1;
275

    
276
    /* duplicate the packet */
277
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
278
        return -1;
279

    
280
    pkt1 = av_malloc(sizeof(AVPacketList));
281
    if (!pkt1)
282
        return -1;
283
    pkt1->pkt = *pkt;
284
    pkt1->next = NULL;
285

    
286

    
287
    SDL_LockMutex(q->mutex);
288

    
289
    if (!q->last_pkt)
290

    
291
        q->first_pkt = pkt1;
292
    else
293
        q->last_pkt->next = pkt1;
294
    q->last_pkt = pkt1;
295
    q->nb_packets++;
296
    q->size += pkt1->pkt.size + sizeof(*pkt1);
297
    /* XXX: should duplicate packet data in DV case */
298
    SDL_CondSignal(q->cond);
299

    
300
    SDL_UnlockMutex(q->mutex);
301
    return 0;
302
}
303

    
304
static void packet_queue_abort(PacketQueue *q)
305
{
306
    SDL_LockMutex(q->mutex);
307

    
308
    q->abort_request = 1;
309

    
310
    SDL_CondSignal(q->cond);
311

    
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
316
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
317
{
318
    AVPacketList *pkt1;
319
    int ret;
320

    
321
    SDL_LockMutex(q->mutex);
322

    
323
    for(;;) {
324
        if (q->abort_request) {
325
            ret = -1;
326
            break;
327
        }
328

    
329
        pkt1 = q->first_pkt;
330
        if (pkt1) {
331
            q->first_pkt = pkt1->next;
332
            if (!q->first_pkt)
333
                q->last_pkt = NULL;
334
            q->nb_packets--;
335
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
336
            *pkt = pkt1->pkt;
337
            av_free(pkt1);
338
            ret = 1;
339
            break;
340
        } else if (!block) {
341
            ret = 0;
342
            break;
343
        } else {
344
            SDL_CondWait(q->cond, q->mutex);
345
        }
346
    }
347
    SDL_UnlockMutex(q->mutex);
348
    return ret;
349
}
350

    
351
static inline void fill_rectangle(SDL_Surface *screen,
352
                                  int x, int y, int w, int h, int color)
353
{
354
    SDL_Rect rect;
355
    rect.x = x;
356
    rect.y = y;
357
    rect.w = w;
358
    rect.h = h;
359
    SDL_FillRect(screen, &rect, color);
360
}
361

    
362
#if 0
363
/* draw only the border of a rectangle */
364
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
365
{
366
    int w1, w2, h1, h2;
367

368
    /* fill the background */
369
    w1 = x;
370
    if (w1 < 0)
371
        w1 = 0;
372
    w2 = s->width - (x + w);
373
    if (w2 < 0)
374
        w2 = 0;
375
    h1 = y;
376
    if (h1 < 0)
377
        h1 = 0;
378
    h2 = s->height - (y + h);
379
    if (h2 < 0)
380
        h2 = 0;
381
    fill_rectangle(screen,
382
                   s->xleft, s->ytop,
383
                   w1, s->height,
384
                   color);
385
    fill_rectangle(screen,
386
                   s->xleft + s->width - w2, s->ytop,
387
                   w2, s->height,
388
                   color);
389
    fill_rectangle(screen,
390
                   s->xleft + w1, s->ytop,
391
                   s->width - w1 - w2, h1,
392
                   color);
393
    fill_rectangle(screen,
394
                   s->xleft + w1, s->ytop + s->height - h2,
395
                   s->width - w1 - w2, h2,
396
                   color);
397
}
398
#endif
399

    
400
#define ALPHA_BLEND(a, oldp, newp, s)\
401
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402

    
403
#define RGBA_IN(r, g, b, a, s)\
404
{\
405
    unsigned int v = ((const uint32_t *)(s))[0];\
406
    a = (v >> 24) & 0xff;\
407
    r = (v >> 16) & 0xff;\
408
    g = (v >> 8) & 0xff;\
409
    b = v & 0xff;\
410
}
411

    
412
#define YUVA_IN(y, u, v, a, s, pal)\
413
{\
414
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415
    a = (val >> 24) & 0xff;\
416
    y = (val >> 16) & 0xff;\
417
    u = (val >> 8) & 0xff;\
418
    v = val & 0xff;\
419
}
420

    
421
#define YUVA_OUT(d, y, u, v, a)\
422
{\
423
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424
}
425

    
426

    
427
#define BPP 1
428

    
429
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430
{
431
    int wrap, wrap3, width2, skip2;
432
    int y, u, v, a, u1, v1, a1, w, h;
433
    uint8_t *lum, *cb, *cr;
434
    const uint8_t *p;
435
    const uint32_t *pal;
436
    int dstx, dsty, dstw, dsth;
437

    
438
    dstw = av_clip(rect->w, 0, imgw);
439
    dsth = av_clip(rect->h, 0, imgh);
440
    dstx = av_clip(rect->x, 0, imgw - dstw);
441
    dsty = av_clip(rect->y, 0, imgh - dsth);
442
    lum = dst->data[0] + dsty * dst->linesize[0];
443
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445

    
446
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
447
    skip2 = dstx >> 1;
448
    wrap = dst->linesize[0];
449
    wrap3 = rect->pict.linesize[0];
450
    p = rect->pict.data[0];
451
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
452

    
453
    if (dsty & 1) {
454
        lum += dstx;
455
        cb += skip2;
456
        cr += skip2;
457

    
458
        if (dstx & 1) {
459
            YUVA_IN(y, u, v, a, p, pal);
460
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463
            cb++;
464
            cr++;
465
            lum++;
466
            p += BPP;
467
        }
468
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469
            YUVA_IN(y, u, v, a, p, pal);
470
            u1 = u;
471
            v1 = v;
472
            a1 = a;
473
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474

    
475
            YUVA_IN(y, u, v, a, p + BPP, pal);
476
            u1 += u;
477
            v1 += v;
478
            a1 += a;
479
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482
            cb++;
483
            cr++;
484
            p += 2 * BPP;
485
            lum += 2;
486
        }
487
        if (w) {
488
            YUVA_IN(y, u, v, a, p, pal);
489
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492
            p++;
493
            lum++;
494
        }
495
        p += wrap3 - dstw * BPP;
496
        lum += wrap - dstw - dstx;
497
        cb += dst->linesize[1] - width2 - skip2;
498
        cr += dst->linesize[2] - width2 - skip2;
499
    }
500
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            u1 = u;
508
            v1 = v;
509
            a1 = a;
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            p += wrap3;
512
            lum += wrap;
513
            YUVA_IN(y, u, v, a, p, pal);
514
            u1 += u;
515
            v1 += v;
516
            a1 += a;
517
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
519
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
520
            cb++;
521
            cr++;
522
            p += -wrap3 + BPP;
523
            lum += -wrap + 1;
524
        }
525
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
526
            YUVA_IN(y, u, v, a, p, pal);
527
            u1 = u;
528
            v1 = v;
529
            a1 = a;
530
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531

    
532
            YUVA_IN(y, u, v, a, p + BPP, pal);
533
            u1 += u;
534
            v1 += v;
535
            a1 += a;
536
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
537
            p += wrap3;
538
            lum += wrap;
539

    
540
            YUVA_IN(y, u, v, a, p, pal);
541
            u1 += u;
542
            v1 += v;
543
            a1 += a;
544
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545

    
546
            YUVA_IN(y, u, v, a, p + BPP, pal);
547
            u1 += u;
548
            v1 += v;
549
            a1 += a;
550
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551

    
552
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
553
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554

    
555
            cb++;
556
            cr++;
557
            p += -wrap3 + 2 * BPP;
558
            lum += -wrap + 2;
559
        }
560
        if (w) {
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 = u;
563
            v1 = v;
564
            a1 = a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566
            p += wrap3;
567
            lum += wrap;
568
            YUVA_IN(y, u, v, a, p, pal);
569
            u1 += u;
570
            v1 += v;
571
            a1 += a;
572
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
574
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
575
            cb++;
576
            cr++;
577
            p += -wrap3 + BPP;
578
            lum += -wrap + 1;
579
        }
580
        p += wrap3 + (wrap3 - dstw * BPP);
581
        lum += wrap + (wrap - dstw - dstx);
582
        cb += dst->linesize[1] - width2 - skip2;
583
        cr += dst->linesize[2] - width2 - skip2;
584
    }
585
    /* handle odd height */
586
    if (h) {
587
        lum += dstx;
588
        cb += skip2;
589
        cr += skip2;
590

    
591
        if (dstx & 1) {
592
            YUVA_IN(y, u, v, a, p, pal);
593
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
595
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
596
            cb++;
597
            cr++;
598
            lum++;
599
            p += BPP;
600
        }
601
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
602
            YUVA_IN(y, u, v, a, p, pal);
603
            u1 = u;
604
            v1 = v;
605
            a1 = a;
606
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607

    
608
            YUVA_IN(y, u, v, a, p + BPP, pal);
609
            u1 += u;
610
            v1 += v;
611
            a1 += a;
612
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
614
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
615
            cb++;
616
            cr++;
617
            p += 2 * BPP;
618
            lum += 2;
619
        }
620
        if (w) {
621
            YUVA_IN(y, u, v, a, p, pal);
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
624
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
625
        }
626
    }
627
}
628

    
629
static void free_subpicture(SubPicture *sp)
630
{
631
    int i;
632

    
633
    for (i = 0; i < sp->sub.num_rects; i++)
634
    {
635
        av_freep(&sp->sub.rects[i]->pict.data[0]);
636
        av_freep(&sp->sub.rects[i]->pict.data[1]);
637
        av_freep(&sp->sub.rects[i]);
638
    }
639

    
640
    av_free(sp->sub.rects);
641

    
642
    memset(&sp->sub, 0, sizeof(AVSubtitle));
643
}
644

    
645
static void video_image_display(VideoState *is)
646
{
647
    VideoPicture *vp;
648
    SubPicture *sp;
649
    AVPicture pict;
650
    float aspect_ratio;
651
    int width, height, x, y;
652
    SDL_Rect rect;
653
    int i;
654

    
655
    vp = &is->pictq[is->pictq_rindex];
656
    if (vp->bmp) {
657
        /* XXX: use variable in the frame */
658
        if (is->video_st->sample_aspect_ratio.num)
659
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
660
        else if (is->video_st->codec->sample_aspect_ratio.num)
661
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
662
        else
663
            aspect_ratio = 0;
664
        if (aspect_ratio <= 0.0)
665
            aspect_ratio = 1.0;
666
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
667
        /* if an active format is indicated, then it overrides the
668
           mpeg format */
669
#if 0
670
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
671
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
672
            printf("dtg_active_format=%d\n", is->dtg_active_format);
673
        }
674
#endif
675
#if 0
676
        switch(is->video_st->codec->dtg_active_format) {
677
        case FF_DTG_AFD_SAME:
678
        default:
679
            /* nothing to do */
680
            break;
681
        case FF_DTG_AFD_4_3:
682
            aspect_ratio = 4.0 / 3.0;
683
            break;
684
        case FF_DTG_AFD_16_9:
685
            aspect_ratio = 16.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_14_9:
688
            aspect_ratio = 14.0 / 9.0;
689
            break;
690
        case FF_DTG_AFD_4_3_SP_14_9:
691
            aspect_ratio = 14.0 / 9.0;
692
            break;
693
        case FF_DTG_AFD_16_9_SP_14_9:
694
            aspect_ratio = 14.0 / 9.0;
695
            break;
696
        case FF_DTG_AFD_SP_4_3:
697
            aspect_ratio = 4.0 / 3.0;
698
            break;
699
        }
700
#endif
701

    
702
        if (is->subtitle_st)
703
        {
704
            if (is->subpq_size > 0)
705
            {
706
                sp = &is->subpq[is->subpq_rindex];
707

    
708
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
709
                {
710
                    SDL_LockYUVOverlay (vp->bmp);
711

    
712
                    pict.data[0] = vp->bmp->pixels[0];
713
                    pict.data[1] = vp->bmp->pixels[2];
714
                    pict.data[2] = vp->bmp->pixels[1];
715

    
716
                    pict.linesize[0] = vp->bmp->pitches[0];
717
                    pict.linesize[1] = vp->bmp->pitches[2];
718
                    pict.linesize[2] = vp->bmp->pitches[1];
719

    
720
                    for (i = 0; i < sp->sub.num_rects; i++)
721
                        blend_subrect(&pict, sp->sub.rects[i],
722
                                      vp->bmp->w, vp->bmp->h);
723

    
724
                    SDL_UnlockYUVOverlay (vp->bmp);
725
                }
726
            }
727
        }
728

    
729

    
730
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
731
        height = is->height;
732
        width = ((int)rint(height * aspect_ratio)) & ~1;
733
        if (width > is->width) {
734
            width = is->width;
735
            height = ((int)rint(width / aspect_ratio)) & ~1;
736
        }
737
        x = (is->width - width) / 2;
738
        y = (is->height - height) / 2;
739
        if (!is->no_background) {
740
            /* fill the background */
741
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
742
        } else {
743
            is->no_background = 0;
744
        }
745
        rect.x = is->xleft + x;
746
        rect.y = is->ytop  + y;
747
        rect.w = width;
748
        rect.h = height;
749
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
750
    } else {
751
#if 0
752
        fill_rectangle(screen,
753
                       is->xleft, is->ytop, is->width, is->height,
754
                       QERGB(0x00, 0x00, 0x00));
755
#endif
756
    }
757
}
758

    
759
static inline int compute_mod(int a, int b)
760
{
761
    a = a % b;
762
    if (a >= 0)
763
        return a;
764
    else
765
        return a + b;
766
}
767

    
768
static void video_audio_display(VideoState *s)
769
{
770
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
771
    int ch, channels, h, h2, bgcolor, fgcolor;
772
    int16_t time_diff;
773

    
774
    /* compute display index : center on currently output samples */
775
    channels = s->audio_st->codec->channels;
776
    nb_display_channels = channels;
777
    if (!s->paused) {
778
        n = 2 * channels;
779
        delay = audio_write_get_buf_size(s);
780
        delay /= n;
781

    
782
        /* to be more precise, we take into account the time spent since
783
           the last buffer computation */
784
        if (audio_callback_time) {
785
            time_diff = av_gettime() - audio_callback_time;
786
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
787
        }
788

    
789
        delay -= s->width / 2;
790
        if (delay < s->width)
791
            delay = s->width;
792

    
793
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
794

    
795
        h= INT_MIN;
796
        for(i=0; i<1000; i+=channels){
797
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
798
            int a= s->sample_array[idx];
799
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
800
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
801
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
802
            int score= a-d;
803
            if(h<score && (b^c)<0){
804
                h= score;
805
                i_start= idx;
806
            }
807
        }
808

    
809
        s->last_i_start = i_start;
810
    } else {
811
        i_start = s->last_i_start;
812
    }
813

    
814
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
815
    fill_rectangle(screen,
816
                   s->xleft, s->ytop, s->width, s->height,
817
                   bgcolor);
818

    
819
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
820

    
821
    /* total height for one channel */
822
    h = s->height / nb_display_channels;
823
    /* graph height / 2 */
824
    h2 = (h * 9) / 20;
825
    for(ch = 0;ch < nb_display_channels; ch++) {
826
        i = i_start + ch;
827
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
828
        for(x = 0; x < s->width; x++) {
829
            y = (s->sample_array[i] * h2) >> 15;
830
            if (y < 0) {
831
                y = -y;
832
                ys = y1 - y;
833
            } else {
834
                ys = y1;
835
            }
836
            fill_rectangle(screen,
837
                           s->xleft + x, ys, 1, y,
838
                           fgcolor);
839
            i += channels;
840
            if (i >= SAMPLE_ARRAY_SIZE)
841
                i -= SAMPLE_ARRAY_SIZE;
842
        }
843
    }
844

    
845
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
846

    
847
    for(ch = 1;ch < nb_display_channels; ch++) {
848
        y = s->ytop + ch * h;
849
        fill_rectangle(screen,
850
                       s->xleft, y, s->width, 1,
851
                       fgcolor);
852
    }
853
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
854
}
855

    
856
static int video_open(VideoState *is){
857
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
858
    int w,h;
859

    
860
    if(is_full_screen) flags |= SDL_FULLSCREEN;
861
    else               flags |= SDL_RESIZABLE;
862

    
863
    if (is_full_screen && fs_screen_width) {
864
        w = fs_screen_width;
865
        h = fs_screen_height;
866
    } else if(!is_full_screen && screen_width){
867
        w = screen_width;
868
        h = screen_height;
869
    }else if (is->video_st && is->video_st->codec->width){
870
        w = is->video_st->codec->width;
871
        h = is->video_st->codec->height;
872
    } else {
873
        w = 640;
874
        h = 480;
875
    }
876
#ifndef __APPLE__
877
    screen = SDL_SetVideoMode(w, h, 0, flags);
878
#else
879
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
880
    screen = SDL_SetVideoMode(w, h, 24, flags);
881
#endif
882
    if (!screen) {
883
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
884
        return -1;
885
    }
886
    SDL_WM_SetCaption("FFplay", "FFplay");
887

    
888
    is->width = screen->w;
889
    is->height = screen->h;
890

    
891
    return 0;
892
}
893

    
894
/* display the current picture, if any */
895
static void video_display(VideoState *is)
896
{
897
    if(!screen)
898
        video_open(cur_stream);
899
    if (is->audio_st && is->show_audio)
900
        video_audio_display(is);
901
    else if (is->video_st)
902
        video_image_display(is);
903
}
904

    
905
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
906
{
907
    SDL_Event event;
908
    event.type = FF_REFRESH_EVENT;
909
    event.user.data1 = opaque;
910
    SDL_PushEvent(&event);
911
    return 0; /* 0 means stop timer */
912
}
913

    
914
/* schedule a video refresh in 'delay' ms */
915
static void schedule_refresh(VideoState *is, int delay)
916
{
917
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
918
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
919
}
920

    
921
/* get the current audio clock value */
922
static double get_audio_clock(VideoState *is)
923
{
924
    double pts;
925
    int hw_buf_size, bytes_per_sec;
926
    pts = is->audio_clock;
927
    hw_buf_size = audio_write_get_buf_size(is);
928
    bytes_per_sec = 0;
929
    if (is->audio_st) {
930
        bytes_per_sec = is->audio_st->codec->sample_rate *
931
            2 * is->audio_st->codec->channels;
932
    }
933
    if (bytes_per_sec)
934
        pts -= (double)hw_buf_size / bytes_per_sec;
935
    return pts;
936
}
937

    
938
/* get the current video clock value */
939
static double get_video_clock(VideoState *is)
940
{
941
    double delta;
942
    if (is->paused) {
943
        delta = 0;
944
    } else {
945
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
946
    }
947
    return is->video_current_pts + delta;
948
}
949

    
950
/* get the current external clock value */
951
static double get_external_clock(VideoState *is)
952
{
953
    int64_t ti;
954
    ti = av_gettime();
955
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
956
}
957

    
958
/* get the current master clock value */
959
static double get_master_clock(VideoState *is)
960
{
961
    double val;
962

    
963
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
964
        if (is->video_st)
965
            val = get_video_clock(is);
966
        else
967
            val = get_audio_clock(is);
968
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
969
        if (is->audio_st)
970
            val = get_audio_clock(is);
971
        else
972
            val = get_video_clock(is);
973
    } else {
974
        val = get_external_clock(is);
975
    }
976
    return val;
977
}
978

    
979
/* seek in the stream */
980
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
981
{
982
    if (!is->seek_req) {
983
        is->seek_pos = pos;
984
        is->seek_rel = rel;
985
        if (seek_by_bytes)
986
            is->seek_flags |= AVSEEK_FLAG_BYTE;
987
        is->seek_req = 1;
988
    }
989
}
990

    
991
/* pause or resume the video */
992
static void stream_pause(VideoState *is)
993
{
994
    is->paused = !is->paused;
995
    if (!is->paused) {
996
        is->video_current_pts = get_video_clock(is);
997
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
998
    }
999
}
1000

    
1001
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1002
{
1003
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1004

    
1005
    /* compute nominal delay */
1006
    delay = frame_current_pts - is->frame_last_pts;
1007
    if (delay <= 0 || delay >= 10.0) {
1008
        /* if incorrect delay, use previous one */
1009
        delay = is->frame_last_delay;
1010
    } else {
1011
        is->frame_last_delay = delay;
1012
    }
1013
    is->frame_last_pts = frame_current_pts;
1014

    
1015
    /* update delay to follow master synchronisation source */
1016
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1017
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1018
        /* if video is slave, we try to correct big delays by
1019
           duplicating or deleting a frame */
1020
        ref_clock = get_master_clock(is);
1021
        diff = frame_current_pts - ref_clock;
1022

    
1023
        /* skip or repeat frame. We take into account the
1024
           delay to compute the threshold. I still don't know
1025
           if it is the best guess */
1026
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1027
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1028
            if (diff <= -sync_threshold)
1029
                delay = 0;
1030
            else if (diff >= sync_threshold)
1031
                delay = 2 * delay;
1032
        }
1033
    }
1034

    
1035
    is->frame_timer += delay;
1036
    /* compute the REAL delay (we need to do that to avoid
1037
       long term errors */
1038
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1039
    if (actual_delay < 0.010) {
1040
        /* XXX: should skip picture */
1041
        actual_delay = 0.010;
1042
    }
1043

    
1044
#if defined(DEBUG_SYNC)
1045
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1046
            delay, actual_delay, frame_current_pts, -diff);
1047
#endif
1048

    
1049
    return actual_delay;
1050
}
1051

    
1052
/* called to display each frame */
1053
static void video_refresh_timer(void *opaque)
1054
{
1055
    VideoState *is = opaque;
1056
    VideoPicture *vp;
1057

    
1058
    SubPicture *sp, *sp2;
1059

    
1060
    if (is->video_st) {
1061
        if (is->pictq_size == 0) {
1062
            /* if no picture, need to wait */
1063
            schedule_refresh(is, 1);
1064
        } else {
1065
            /* dequeue the picture */
1066
            vp = &is->pictq[is->pictq_rindex];
1067

    
1068
            /* update current video pts */
1069
            is->video_current_pts = vp->pts;
1070
            is->video_current_pts_time = av_gettime();
1071

    
1072
            /* launch timer for next picture */
1073
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1074

    
1075
            if(is->subtitle_st) {
1076
                if (is->subtitle_stream_changed) {
1077
                    SDL_LockMutex(is->subpq_mutex);
1078

    
1079
                    while (is->subpq_size) {
1080
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1081

    
1082
                        /* update queue size and signal for next picture */
1083
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1084
                            is->subpq_rindex = 0;
1085

    
1086
                        is->subpq_size--;
1087
                    }
1088
                    is->subtitle_stream_changed = 0;
1089

    
1090
                    SDL_CondSignal(is->subpq_cond);
1091
                    SDL_UnlockMutex(is->subpq_mutex);
1092
                } else {
1093
                    if (is->subpq_size > 0) {
1094
                        sp = &is->subpq[is->subpq_rindex];
1095

    
1096
                        if (is->subpq_size > 1)
1097
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1098
                        else
1099
                            sp2 = NULL;
1100

    
1101
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1102
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1103
                        {
1104
                            free_subpicture(sp);
1105

    
1106
                            /* update queue size and signal for next picture */
1107
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1108
                                is->subpq_rindex = 0;
1109

    
1110
                            SDL_LockMutex(is->subpq_mutex);
1111
                            is->subpq_size--;
1112
                            SDL_CondSignal(is->subpq_cond);
1113
                            SDL_UnlockMutex(is->subpq_mutex);
1114
                        }
1115
                    }
1116
                }
1117
            }
1118

    
1119
            /* display picture */
1120
            video_display(is);
1121

    
1122
            /* update queue size and signal for next picture */
1123
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1124
                is->pictq_rindex = 0;
1125

    
1126
            SDL_LockMutex(is->pictq_mutex);
1127
            is->pictq_size--;
1128
            SDL_CondSignal(is->pictq_cond);
1129
            SDL_UnlockMutex(is->pictq_mutex);
1130
        }
1131
    } else if (is->audio_st) {
1132
        /* draw the next audio frame */
1133

    
1134
        schedule_refresh(is, 40);
1135

    
1136
        /* if only audio stream, then display the audio bars (better
1137
           than nothing, just to test the implementation */
1138

    
1139
        /* display picture */
1140
        video_display(is);
1141
    } else {
1142
        schedule_refresh(is, 100);
1143
    }
1144
    if (show_status) {
1145
        static int64_t last_time;
1146
        int64_t cur_time;
1147
        int aqsize, vqsize, sqsize;
1148
        double av_diff;
1149

    
1150
        cur_time = av_gettime();
1151
        if (!last_time || (cur_time - last_time) >= 30000) {
1152
            aqsize = 0;
1153
            vqsize = 0;
1154
            sqsize = 0;
1155
            if (is->audio_st)
1156
                aqsize = is->audioq.size;
1157
            if (is->video_st)
1158
                vqsize = is->videoq.size;
1159
            if (is->subtitle_st)
1160
                sqsize = is->subtitleq.size;
1161
            av_diff = 0;
1162
            if (is->audio_st && is->video_st)
1163
                av_diff = get_audio_clock(is) - get_video_clock(is);
1164
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1165
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1166
            fflush(stdout);
1167
            last_time = cur_time;
1168
        }
1169
    }
1170
}
1171

    
1172
/* allocate a picture (needs to do that in main thread to avoid
1173
   potential locking problems */
1174
static void alloc_picture(void *opaque)
1175
{
1176
    VideoState *is = opaque;
1177
    VideoPicture *vp;
1178

    
1179
    vp = &is->pictq[is->pictq_windex];
1180

    
1181
    if (vp->bmp)
1182
        SDL_FreeYUVOverlay(vp->bmp);
1183

    
1184
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1185
                                   is->video_st->codec->height,
1186
                                   SDL_YV12_OVERLAY,
1187
                                   screen);
1188
    vp->width = is->video_st->codec->width;
1189
    vp->height = is->video_st->codec->height;
1190

    
1191
    SDL_LockMutex(is->pictq_mutex);
1192
    vp->allocated = 1;
1193
    SDL_CondSignal(is->pictq_cond);
1194
    SDL_UnlockMutex(is->pictq_mutex);
1195
}
1196

    
1197
/**
1198
 *
1199
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1200
 */
1201
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1202
{
1203
    VideoPicture *vp;
1204
    int dst_pix_fmt;
1205

    
1206
    /* wait until we have space to put a new picture */
1207
    SDL_LockMutex(is->pictq_mutex);
1208
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1209
           !is->videoq.abort_request) {
1210
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1211
    }
1212
    SDL_UnlockMutex(is->pictq_mutex);
1213

    
1214
    if (is->videoq.abort_request)
1215
        return -1;
1216

    
1217
    vp = &is->pictq[is->pictq_windex];
1218

    
1219
    /* alloc or resize hardware picture buffer */
1220
    if (!vp->bmp ||
1221
        vp->width != is->video_st->codec->width ||
1222
        vp->height != is->video_st->codec->height) {
1223
        SDL_Event event;
1224

    
1225
        vp->allocated = 0;
1226

    
1227
        /* the allocation must be done in the main thread to avoid
1228
           locking problems */
1229
        event.type = FF_ALLOC_EVENT;
1230
        event.user.data1 = is;
1231
        SDL_PushEvent(&event);
1232

    
1233
        /* wait until the picture is allocated */
1234
        SDL_LockMutex(is->pictq_mutex);
1235
        while (!vp->allocated && !is->videoq.abort_request) {
1236
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1237
        }
1238
        SDL_UnlockMutex(is->pictq_mutex);
1239

    
1240
        if (is->videoq.abort_request)
1241
            return -1;
1242
    }
1243

    
1244
    /* if the frame is not skipped, then display it */
1245
    if (vp->bmp) {
1246
        AVPicture pict;
1247

    
1248
        /* get a pointer on the bitmap */
1249
        SDL_LockYUVOverlay (vp->bmp);
1250

    
1251
        dst_pix_fmt = PIX_FMT_YUV420P;
1252
        memset(&pict,0,sizeof(AVPicture));
1253
        pict.data[0] = vp->bmp->pixels[0];
1254
        pict.data[1] = vp->bmp->pixels[2];
1255
        pict.data[2] = vp->bmp->pixels[1];
1256

    
1257
        pict.linesize[0] = vp->bmp->pitches[0];
1258
        pict.linesize[1] = vp->bmp->pitches[2];
1259
        pict.linesize[2] = vp->bmp->pitches[1];
1260
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1261
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1262
            is->video_st->codec->width, is->video_st->codec->height,
1263
            is->video_st->codec->pix_fmt,
1264
            is->video_st->codec->width, is->video_st->codec->height,
1265
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1266
        if (is->img_convert_ctx == NULL) {
1267
            fprintf(stderr, "Cannot initialize the conversion context\n");
1268
            exit(1);
1269
        }
1270
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1271
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1272
        /* update the bitmap content */
1273
        SDL_UnlockYUVOverlay(vp->bmp);
1274

    
1275
        vp->pts = pts;
1276

    
1277
        /* now we can update the picture count */
1278
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1279
            is->pictq_windex = 0;
1280
        SDL_LockMutex(is->pictq_mutex);
1281
        is->pictq_size++;
1282
        SDL_UnlockMutex(is->pictq_mutex);
1283
    }
1284
    return 0;
1285
}
1286

    
1287
/**
1288
 * compute the exact PTS for the picture if it is omitted in the stream
1289
 * @param pts1 the dts of the pkt / pts of the frame
1290
 */
1291
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1292
{
1293
    double frame_delay, pts;
1294

    
1295
    pts = pts1;
1296

    
1297
    if (pts != 0) {
1298
        /* update video clock with pts, if present */
1299
        is->video_clock = pts;
1300
    } else {
1301
        pts = is->video_clock;
1302
    }
1303
    /* update video clock for next frame */
1304
    frame_delay = av_q2d(is->video_st->codec->time_base);
1305
    /* for MPEG2, the frame can be repeated, so we update the
1306
       clock accordingly */
1307
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1308
    is->video_clock += frame_delay;
1309

    
1310
#if defined(DEBUG_SYNC) && 0
1311
    {
1312
        int ftype;
1313
        if (src_frame->pict_type == FF_B_TYPE)
1314
            ftype = 'B';
1315
        else if (src_frame->pict_type == FF_I_TYPE)
1316
            ftype = 'I';
1317
        else
1318
            ftype = 'P';
1319
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1320
               ftype, pts, pts1);
1321
    }
1322
#endif
1323
    return queue_picture(is, src_frame, pts);
1324
}
1325

    
1326
static int video_thread(void *arg)
1327
{
1328
    VideoState *is = arg;
1329
    AVPacket pkt1, *pkt = &pkt1;
1330
    int len1, got_picture;
1331
    AVFrame *frame= avcodec_alloc_frame();
1332
    double pts;
1333

    
1334
    for(;;) {
1335
        while (is->paused && !is->videoq.abort_request) {
1336
            SDL_Delay(10);
1337
        }
1338
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1339
            break;
1340

    
1341
        if(pkt->data == flush_pkt.data){
1342
            avcodec_flush_buffers(is->video_st->codec);
1343
            is->last_dts_for_fault_detection=
1344
            is->last_pts_for_fault_detection= INT64_MIN;
1345
            continue;
1346
        }
1347

    
1348
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1349
           this packet, if any */
1350
        is->video_st->codec->reordered_opaque= pkt->pts;
1351
        len1 = avcodec_decode_video2(is->video_st->codec,
1352
                                    frame, &got_picture,
1353
                                    pkt);
1354

    
1355
        if(pkt->dts != AV_NOPTS_VALUE){
1356
            is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1357
            is->last_dts_for_fault_detection= pkt->dts;
1358
        }
1359
        if(frame->reordered_opaque != AV_NOPTS_VALUE){
1360
            is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1361
            is->last_pts_for_fault_detection= frame->reordered_opaque;
1362
        }
1363

    
1364
        if(   (   decoder_reorder_pts==1
1365
               || decoder_reorder_pts && is->faulty_pts<is->faulty_dts
1366
               || pkt->dts == AV_NOPTS_VALUE)
1367
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1368
            pts= frame->reordered_opaque;
1369
        else if(pkt->dts != AV_NOPTS_VALUE)
1370
            pts= pkt->dts;
1371
        else
1372
            pts= 0;
1373
        pts *= av_q2d(is->video_st->time_base);
1374

    
1375
//            if (len1 < 0)
1376
//                break;
1377
        if (got_picture) {
1378
            if (output_picture2(is, frame, pts) < 0)
1379
                goto the_end;
1380
        }
1381
        av_free_packet(pkt);
1382
        if (step)
1383
            if (cur_stream)
1384
                stream_pause(cur_stream);
1385
    }
1386
 the_end:
1387
    av_free(frame);
1388
    return 0;
1389
}
1390

    
1391
static int subtitle_thread(void *arg)
1392
{
1393
    VideoState *is = arg;
1394
    SubPicture *sp;
1395
    AVPacket pkt1, *pkt = &pkt1;
1396
    int len1, got_subtitle;
1397
    double pts;
1398
    int i, j;
1399
    int r, g, b, y, u, v, a;
1400

    
1401
    for(;;) {
1402
        while (is->paused && !is->subtitleq.abort_request) {
1403
            SDL_Delay(10);
1404
        }
1405
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1406
            break;
1407

    
1408
        if(pkt->data == flush_pkt.data){
1409
            avcodec_flush_buffers(is->subtitle_st->codec);
1410
            continue;
1411
        }
1412
        SDL_LockMutex(is->subpq_mutex);
1413
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1414
               !is->subtitleq.abort_request) {
1415
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1416
        }
1417
        SDL_UnlockMutex(is->subpq_mutex);
1418

    
1419
        if (is->subtitleq.abort_request)
1420
            goto the_end;
1421

    
1422
        sp = &is->subpq[is->subpq_windex];
1423

    
1424
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1425
           this packet, if any */
1426
        pts = 0;
1427
        if (pkt->pts != AV_NOPTS_VALUE)
1428
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1429

    
1430
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1431
                                    &sp->sub, &got_subtitle,
1432
                                    pkt);
1433
//            if (len1 < 0)
1434
//                break;
1435
        if (got_subtitle && sp->sub.format == 0) {
1436
            sp->pts = pts;
1437

    
1438
            for (i = 0; i < sp->sub.num_rects; i++)
1439
            {
1440
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1441
                {
1442
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1443
                    y = RGB_TO_Y_CCIR(r, g, b);
1444
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1445
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1446
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1447
                }
1448
            }
1449

    
1450
            /* now we can update the picture count */
1451
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1452
                is->subpq_windex = 0;
1453
            SDL_LockMutex(is->subpq_mutex);
1454
            is->subpq_size++;
1455
            SDL_UnlockMutex(is->subpq_mutex);
1456
        }
1457
        av_free_packet(pkt);
1458
//        if (step)
1459
//            if (cur_stream)
1460
//                stream_pause(cur_stream);
1461
    }
1462
 the_end:
1463
    return 0;
1464
}
1465

    
1466
/* copy samples for viewing in editor window */
1467
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1468
{
1469
    int size, len, channels;
1470

    
1471
    channels = is->audio_st->codec->channels;
1472

    
1473
    size = samples_size / sizeof(short);
1474
    while (size > 0) {
1475
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1476
        if (len > size)
1477
            len = size;
1478
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1479
        samples += len;
1480
        is->sample_array_index += len;
1481
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1482
            is->sample_array_index = 0;
1483
        size -= len;
1484
    }
1485
}
1486

    
1487
/* return the new audio buffer size (samples can be added or deleted
1488
   to get better sync if video or external master clock) */
1489
static int synchronize_audio(VideoState *is, short *samples,
1490
                             int samples_size1, double pts)
1491
{
1492
    int n, samples_size;
1493
    double ref_clock;
1494

    
1495
    n = 2 * is->audio_st->codec->channels;
1496
    samples_size = samples_size1;
1497

    
1498
    /* if not master, then we try to remove or add samples to correct the clock */
1499
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1500
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1501
        double diff, avg_diff;
1502
        int wanted_size, min_size, max_size, nb_samples;
1503

    
1504
        ref_clock = get_master_clock(is);
1505
        diff = get_audio_clock(is) - ref_clock;
1506

    
1507
        if (diff < AV_NOSYNC_THRESHOLD) {
1508
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1509
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1510
                /* not enough measures to have a correct estimate */
1511
                is->audio_diff_avg_count++;
1512
            } else {
1513
                /* estimate the A-V difference */
1514
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1515

    
1516
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1517
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1518
                    nb_samples = samples_size / n;
1519

    
1520
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1521
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1522
                    if (wanted_size < min_size)
1523
                        wanted_size = min_size;
1524
                    else if (wanted_size > max_size)
1525
                        wanted_size = max_size;
1526

    
1527
                    /* add or remove samples to correction the synchro */
1528
                    if (wanted_size < samples_size) {
1529
                        /* remove samples */
1530
                        samples_size = wanted_size;
1531
                    } else if (wanted_size > samples_size) {
1532
                        uint8_t *samples_end, *q;
1533
                        int nb;
1534

    
1535
                        /* add samples */
1536
                        nb = (samples_size - wanted_size);
1537
                        samples_end = (uint8_t *)samples + samples_size - n;
1538
                        q = samples_end + n;
1539
                        while (nb > 0) {
1540
                            memcpy(q, samples_end, n);
1541
                            q += n;
1542
                            nb -= n;
1543
                        }
1544
                        samples_size = wanted_size;
1545
                    }
1546
                }
1547
#if 0
1548
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1549
                       diff, avg_diff, samples_size - samples_size1,
1550
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1551
#endif
1552
            }
1553
        } else {
1554
            /* too big difference : may be initial PTS errors, so
1555
               reset A-V filter */
1556
            is->audio_diff_avg_count = 0;
1557
            is->audio_diff_cum = 0;
1558
        }
1559
    }
1560

    
1561
    return samples_size;
1562
}
1563

    
1564
/* decode one audio frame and returns its uncompressed size */
1565
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1566
{
1567
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1568
    AVPacket *pkt = &is->audio_pkt;
1569
    AVCodecContext *dec= is->audio_st->codec;
1570
    int n, len1, data_size;
1571
    double pts;
1572

    
1573
    for(;;) {
1574
        /* NOTE: the audio packet can contain several frames */
1575
        while (pkt_temp->size > 0) {
1576
            data_size = sizeof(is->audio_buf1);
1577
            len1 = avcodec_decode_audio3(dec,
1578
                                        (int16_t *)is->audio_buf1, &data_size,
1579
                                        pkt_temp);
1580
            if (len1 < 0) {
1581
                /* if error, we skip the frame */
1582
                pkt_temp->size = 0;
1583
                break;
1584
            }
1585

    
1586
            pkt_temp->data += len1;
1587
            pkt_temp->size -= len1;
1588
            if (data_size <= 0)
1589
                continue;
1590

    
1591
            if (dec->sample_fmt != is->audio_src_fmt) {
1592
                if (is->reformat_ctx)
1593
                    av_audio_convert_free(is->reformat_ctx);
1594
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1595
                                                         dec->sample_fmt, 1, NULL, 0);
1596
                if (!is->reformat_ctx) {
1597
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1598
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1599
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1600
                        break;
1601
                }
1602
                is->audio_src_fmt= dec->sample_fmt;
1603
            }
1604

    
1605
            if (is->reformat_ctx) {
1606
                const void *ibuf[6]= {is->audio_buf1};
1607
                void *obuf[6]= {is->audio_buf2};
1608
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1609
                int ostride[6]= {2};
1610
                int len= data_size/istride[0];
1611
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1612
                    printf("av_audio_convert() failed\n");
1613
                    break;
1614
                }
1615
                is->audio_buf= is->audio_buf2;
1616
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1617
                          remove this legacy cruft */
1618
                data_size= len*2;
1619
            }else{
1620
                is->audio_buf= is->audio_buf1;
1621
            }
1622

    
1623
            /* if no pts, then compute it */
1624
            pts = is->audio_clock;
1625
            *pts_ptr = pts;
1626
            n = 2 * dec->channels;
1627
            is->audio_clock += (double)data_size /
1628
                (double)(n * dec->sample_rate);
1629
#if defined(DEBUG_SYNC)
1630
            {
1631
                static double last_clock;
1632
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1633
                       is->audio_clock - last_clock,
1634
                       is->audio_clock, pts);
1635
                last_clock = is->audio_clock;
1636
            }
1637
#endif
1638
            return data_size;
1639
        }
1640

    
1641
        /* free the current packet */
1642
        if (pkt->data)
1643
            av_free_packet(pkt);
1644

    
1645
        if (is->paused || is->audioq.abort_request) {
1646
            return -1;
1647
        }
1648

    
1649
        /* read next packet */
1650
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1651
            return -1;
1652
        if(pkt->data == flush_pkt.data){
1653
            avcodec_flush_buffers(dec);
1654
            continue;
1655
        }
1656

    
1657
        pkt_temp->data = pkt->data;
1658
        pkt_temp->size = pkt->size;
1659

    
1660
        /* if update the audio clock with the pts */
1661
        if (pkt->pts != AV_NOPTS_VALUE) {
1662
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1663
        }
1664
    }
1665
}
1666

    
1667
/* get the current audio output buffer size, in samples. With SDL, we
1668
   cannot have a precise information */
1669
static int audio_write_get_buf_size(VideoState *is)
1670
{
1671
    return is->audio_buf_size - is->audio_buf_index;
1672
}
1673

    
1674

    
1675
/* prepare a new audio buffer */
1676
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1677
{
1678
    VideoState *is = opaque;
1679
    int audio_size, len1;
1680
    double pts;
1681

    
1682
    audio_callback_time = av_gettime();
1683

    
1684
    while (len > 0) {
1685
        if (is->audio_buf_index >= is->audio_buf_size) {
1686
           audio_size = audio_decode_frame(is, &pts);
1687
           if (audio_size < 0) {
1688
                /* if error, just output silence */
1689
               is->audio_buf = is->audio_buf1;
1690
               is->audio_buf_size = 1024;
1691
               memset(is->audio_buf, 0, is->audio_buf_size);
1692
           } else {
1693
               if (is->show_audio)
1694
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1695
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1696
                                              pts);
1697
               is->audio_buf_size = audio_size;
1698
           }
1699
           is->audio_buf_index = 0;
1700
        }
1701
        len1 = is->audio_buf_size - is->audio_buf_index;
1702
        if (len1 > len)
1703
            len1 = len;
1704
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1705
        len -= len1;
1706
        stream += len1;
1707
        is->audio_buf_index += len1;
1708
    }
1709
}
1710

    
1711
/* open a given stream. Return 0 if OK */
1712
static int stream_component_open(VideoState *is, int stream_index)
1713
{
1714
    AVFormatContext *ic = is->ic;
1715
    AVCodecContext *enc;
1716
    AVCodec *codec;
1717
    SDL_AudioSpec wanted_spec, spec;
1718

    
1719
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1720
        return -1;
1721
    enc = ic->streams[stream_index]->codec;
1722

    
1723
    /* prepare audio output */
1724
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1725
        if (enc->channels > 0) {
1726
            enc->request_channels = FFMIN(2, enc->channels);
1727
        } else {
1728
            enc->request_channels = 2;
1729
        }
1730
    }
1731

    
1732
    codec = avcodec_find_decoder(enc->codec_id);
1733
    enc->debug_mv = debug_mv;
1734
    enc->debug = debug;
1735
    enc->workaround_bugs = workaround_bugs;
1736
    enc->lowres = lowres;
1737
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1738
    enc->idct_algo= idct;
1739
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1740
    enc->skip_frame= skip_frame;
1741
    enc->skip_idct= skip_idct;
1742
    enc->skip_loop_filter= skip_loop_filter;
1743
    enc->error_recognition= error_recognition;
1744
    enc->error_concealment= error_concealment;
1745
    avcodec_thread_init(enc, thread_count);
1746

    
1747
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1748

    
1749
    if (!codec ||
1750
        avcodec_open(enc, codec) < 0)
1751
        return -1;
1752

    
1753
    /* prepare audio output */
1754
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1755
        wanted_spec.freq = enc->sample_rate;
1756
        wanted_spec.format = AUDIO_S16SYS;
1757
        wanted_spec.channels = enc->channels;
1758
        wanted_spec.silence = 0;
1759
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1760
        wanted_spec.callback = sdl_audio_callback;
1761
        wanted_spec.userdata = is;
1762
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1763
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1764
            return -1;
1765
        }
1766
        is->audio_hw_buf_size = spec.size;
1767
        is->audio_src_fmt= SAMPLE_FMT_S16;
1768
    }
1769

    
1770
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1771
    switch(enc->codec_type) {
1772
    case CODEC_TYPE_AUDIO:
1773
        is->audio_stream = stream_index;
1774
        is->audio_st = ic->streams[stream_index];
1775
        is->audio_buf_size = 0;
1776
        is->audio_buf_index = 0;
1777

    
1778
        /* init averaging filter */
1779
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1780
        is->audio_diff_avg_count = 0;
1781
        /* since we do not have a precise anough audio fifo fullness,
1782
           we correct audio sync only if larger than this threshold */
1783
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1784

    
1785
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1786
        packet_queue_init(&is->audioq);
1787
        SDL_PauseAudio(0);
1788
        break;
1789
    case CODEC_TYPE_VIDEO:
1790
        is->video_stream = stream_index;
1791
        is->video_st = ic->streams[stream_index];
1792

    
1793
        is->frame_last_delay = 40e-3;
1794
        is->frame_timer = (double)av_gettime() / 1000000.0;
1795
        is->video_current_pts_time = av_gettime();
1796

    
1797
        packet_queue_init(&is->videoq);
1798
        is->video_tid = SDL_CreateThread(video_thread, is);
1799
        break;
1800
    case CODEC_TYPE_SUBTITLE:
1801
        is->subtitle_stream = stream_index;
1802
        is->subtitle_st = ic->streams[stream_index];
1803
        packet_queue_init(&is->subtitleq);
1804

    
1805
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1806
        break;
1807
    default:
1808
        break;
1809
    }
1810
    return 0;
1811
}
1812

    
1813
static void stream_component_close(VideoState *is, int stream_index)
1814
{
1815
    AVFormatContext *ic = is->ic;
1816
    AVCodecContext *enc;
1817

    
1818
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1819
        return;
1820
    enc = ic->streams[stream_index]->codec;
1821

    
1822
    switch(enc->codec_type) {
1823
    case CODEC_TYPE_AUDIO:
1824
        packet_queue_abort(&is->audioq);
1825

    
1826
        SDL_CloseAudio();
1827

    
1828
        packet_queue_end(&is->audioq);
1829
        if (is->reformat_ctx)
1830
            av_audio_convert_free(is->reformat_ctx);
1831
        break;
1832
    case CODEC_TYPE_VIDEO:
1833
        packet_queue_abort(&is->videoq);
1834

    
1835
        /* note: we also signal this mutex to make sure we deblock the
1836
           video thread in all cases */
1837
        SDL_LockMutex(is->pictq_mutex);
1838
        SDL_CondSignal(is->pictq_cond);
1839
        SDL_UnlockMutex(is->pictq_mutex);
1840

    
1841
        SDL_WaitThread(is->video_tid, NULL);
1842

    
1843
        packet_queue_end(&is->videoq);
1844
        break;
1845
    case CODEC_TYPE_SUBTITLE:
1846
        packet_queue_abort(&is->subtitleq);
1847

    
1848
        /* note: we also signal this mutex to make sure we deblock the
1849
           video thread in all cases */
1850
        SDL_LockMutex(is->subpq_mutex);
1851
        is->subtitle_stream_changed = 1;
1852

    
1853
        SDL_CondSignal(is->subpq_cond);
1854
        SDL_UnlockMutex(is->subpq_mutex);
1855

    
1856
        SDL_WaitThread(is->subtitle_tid, NULL);
1857

    
1858
        packet_queue_end(&is->subtitleq);
1859
        break;
1860
    default:
1861
        break;
1862
    }
1863

    
1864
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1865
    avcodec_close(enc);
1866
    switch(enc->codec_type) {
1867
    case CODEC_TYPE_AUDIO:
1868
        is->audio_st = NULL;
1869
        is->audio_stream = -1;
1870
        break;
1871
    case CODEC_TYPE_VIDEO:
1872
        is->video_st = NULL;
1873
        is->video_stream = -1;
1874
        break;
1875
    case CODEC_TYPE_SUBTITLE:
1876
        is->subtitle_st = NULL;
1877
        is->subtitle_stream = -1;
1878
        break;
1879
    default:
1880
        break;
1881
    }
1882
}
1883

    
1884
/* since we have only one decoding thread, we can use a global
1885
   variable instead of a thread local variable */
1886
static VideoState *global_video_state;
1887

    
1888
static int decode_interrupt_cb(void)
1889
{
1890
    return (global_video_state && global_video_state->abort_request);
1891
}
1892

    
1893
/* this thread gets the stream from the disk or the network */
1894
static int decode_thread(void *arg)
1895
{
1896
    VideoState *is = arg;
1897
    AVFormatContext *ic;
1898
    int err, i, ret, video_index, audio_index, subtitle_index;
1899
    AVPacket pkt1, *pkt = &pkt1;
1900
    AVFormatParameters params, *ap = &params;
1901
    int eof=0;
1902

    
1903
    ic = avformat_alloc_context();
1904

    
1905
    video_index = -1;
1906
    audio_index = -1;
1907
    subtitle_index = -1;
1908
    is->video_stream = -1;
1909
    is->audio_stream = -1;
1910
    is->subtitle_stream = -1;
1911

    
1912
    global_video_state = is;
1913
    url_set_interrupt_cb(decode_interrupt_cb);
1914

    
1915
    memset(ap, 0, sizeof(*ap));
1916

    
1917
    ap->prealloced_context = 1;
1918
    ap->width = frame_width;
1919
    ap->height= frame_height;
1920
    ap->time_base= (AVRational){1, 25};
1921
    ap->pix_fmt = frame_pix_fmt;
1922

    
1923
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
1924

    
1925
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1926
    if (err < 0) {
1927
        print_error(is->filename, err);
1928
        ret = -1;
1929
        goto fail;
1930
    }
1931
    is->ic = ic;
1932

    
1933
    if(genpts)
1934
        ic->flags |= AVFMT_FLAG_GENPTS;
1935

    
1936
    err = av_find_stream_info(ic);
1937
    if (err < 0) {
1938
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1939
        ret = -1;
1940
        goto fail;
1941
    }
1942
    if(ic->pb)
1943
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1944

    
1945
    /* if seeking requested, we execute it */
1946
    if (start_time != AV_NOPTS_VALUE) {
1947
        int64_t timestamp;
1948

    
1949
        timestamp = start_time;
1950
        /* add the stream start time */
1951
        if (ic->start_time != AV_NOPTS_VALUE)
1952
            timestamp += ic->start_time;
1953
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1954
        if (ret < 0) {
1955
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1956
                    is->filename, (double)timestamp / AV_TIME_BASE);
1957
        }
1958
    }
1959

    
1960
    for(i = 0; i < ic->nb_streams; i++) {
1961
        AVCodecContext *enc = ic->streams[i]->codec;
1962
        ic->streams[i]->discard = AVDISCARD_ALL;
1963
        switch(enc->codec_type) {
1964
        case CODEC_TYPE_AUDIO:
1965
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1966
                audio_index = i;
1967
            break;
1968
        case CODEC_TYPE_VIDEO:
1969
            if (wanted_video_stream-- >= 0 && !video_disable)
1970
                video_index = i;
1971
            break;
1972
        case CODEC_TYPE_SUBTITLE:
1973
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1974
                subtitle_index = i;
1975
            break;
1976
        default:
1977
            break;
1978
        }
1979
    }
1980
    if (show_status) {
1981
        dump_format(ic, 0, is->filename, 0);
1982
    }
1983

    
1984
    /* open the streams */
1985
    if (audio_index >= 0) {
1986
        stream_component_open(is, audio_index);
1987
    }
1988

    
1989
    if (video_index >= 0) {
1990
        stream_component_open(is, video_index);
1991
    } else {
1992
        if (!display_disable)
1993
            is->show_audio = 1;
1994
    }
1995

    
1996
    if (subtitle_index >= 0) {
1997
        stream_component_open(is, subtitle_index);
1998
    }
1999

    
2000
    if (is->video_stream < 0 && is->audio_stream < 0) {
2001
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2002
        ret = -1;
2003
        goto fail;
2004
    }
2005

    
2006
    for(;;) {
2007
        if (is->abort_request)
2008
            break;
2009
        if (is->paused != is->last_paused) {
2010
            is->last_paused = is->paused;
2011
            if (is->paused)
2012
                av_read_pause(ic);
2013
            else
2014
                av_read_play(ic);
2015
        }
2016
#if CONFIG_RTSP_DEMUXER
2017
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2018
            /* wait 10 ms to avoid trying to get another packet */
2019
            /* XXX: horrible */
2020
            SDL_Delay(10);
2021
            continue;
2022
        }
2023
#endif
2024
        if (is->seek_req) {
2025
            int64_t seek_target= is->seek_pos;
2026
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2027
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2028
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2029
//      of the seek_pos/seek_rel variables
2030

    
2031
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2032
            if (ret < 0) {
2033
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2034
            }else{
2035
                if (is->audio_stream >= 0) {
2036
                    packet_queue_flush(&is->audioq);
2037
                    packet_queue_put(&is->audioq, &flush_pkt);
2038
                }
2039
                if (is->subtitle_stream >= 0) {
2040
                    packet_queue_flush(&is->subtitleq);
2041
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2042
                }
2043
                if (is->video_stream >= 0) {
2044
                    packet_queue_flush(&is->videoq);
2045
                    packet_queue_put(&is->videoq, &flush_pkt);
2046
                }
2047
            }
2048
            is->seek_req = 0;
2049
            eof= 0;
2050
        }
2051

    
2052
        /* if the queue are full, no need to read more */
2053
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2054
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2055
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2056
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2057
            /* wait 10 ms */
2058
            SDL_Delay(10);
2059
            continue;
2060
        }
2061
        if(url_feof(ic->pb) || eof) {
2062
            if(is->video_stream >= 0){
2063
                av_init_packet(pkt);
2064
                pkt->data=NULL;
2065
                pkt->size=0;
2066
                pkt->stream_index= is->video_stream;
2067
                packet_queue_put(&is->videoq, pkt);
2068
            }
2069
            SDL_Delay(10);
2070
            continue;
2071
        }
2072
        ret = av_read_frame(ic, pkt);
2073
        if (ret < 0) {
2074
            if (ret == AVERROR_EOF)
2075
                eof=1;
2076
            if (url_ferror(ic->pb))
2077
                break;
2078
            SDL_Delay(100); /* wait for user event */
2079
            continue;
2080
        }
2081
        if (pkt->stream_index == is->audio_stream) {
2082
            packet_queue_put(&is->audioq, pkt);
2083
        } else if (pkt->stream_index == is->video_stream) {
2084
            packet_queue_put(&is->videoq, pkt);
2085
        } else if (pkt->stream_index == is->subtitle_stream) {
2086
            packet_queue_put(&is->subtitleq, pkt);
2087
        } else {
2088
            av_free_packet(pkt);
2089
        }
2090
    }
2091
    /* wait until the end */
2092
    while (!is->abort_request) {
2093
        SDL_Delay(100);
2094
    }
2095

    
2096
    ret = 0;
2097
 fail:
2098
    /* disable interrupting */
2099
    global_video_state = NULL;
2100

    
2101
    /* close each stream */
2102
    if (is->audio_stream >= 0)
2103
        stream_component_close(is, is->audio_stream);
2104
    if (is->video_stream >= 0)
2105
        stream_component_close(is, is->video_stream);
2106
    if (is->subtitle_stream >= 0)
2107
        stream_component_close(is, is->subtitle_stream);
2108
    if (is->ic) {
2109
        av_close_input_file(is->ic);
2110
        is->ic = NULL; /* safety */
2111
    }
2112
    url_set_interrupt_cb(NULL);
2113

    
2114
    if (ret != 0) {
2115
        SDL_Event event;
2116

    
2117
        event.type = FF_QUIT_EVENT;
2118
        event.user.data1 = is;
2119
        SDL_PushEvent(&event);
2120
    }
2121
    return 0;
2122
}
2123

    
2124
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2125
{
2126
    VideoState *is;
2127

    
2128
    is = av_mallocz(sizeof(VideoState));
2129
    if (!is)
2130
        return NULL;
2131
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2132
    is->iformat = iformat;
2133
    is->ytop = 0;
2134
    is->xleft = 0;
2135

    
2136
    /* start video display */
2137
    is->pictq_mutex = SDL_CreateMutex();
2138
    is->pictq_cond = SDL_CreateCond();
2139

    
2140
    is->subpq_mutex = SDL_CreateMutex();
2141
    is->subpq_cond = SDL_CreateCond();
2142

    
2143
    /* add the refresh timer to draw the picture */
2144
    schedule_refresh(is, 40);
2145

    
2146
    is->av_sync_type = av_sync_type;
2147
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2148
    if (!is->parse_tid) {
2149
        av_free(is);
2150
        return NULL;
2151
    }
2152
    return is;
2153
}
2154

    
2155
static void stream_close(VideoState *is)
2156
{
2157
    VideoPicture *vp;
2158
    int i;
2159
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2160
    is->abort_request = 1;
2161
    SDL_WaitThread(is->parse_tid, NULL);
2162

    
2163
    /* free all pictures */
2164
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2165
        vp = &is->pictq[i];
2166
        if (vp->bmp) {
2167
            SDL_FreeYUVOverlay(vp->bmp);
2168
            vp->bmp = NULL;
2169
        }
2170
    }
2171
    SDL_DestroyMutex(is->pictq_mutex);
2172
    SDL_DestroyCond(is->pictq_cond);
2173
    SDL_DestroyMutex(is->subpq_mutex);
2174
    SDL_DestroyCond(is->subpq_cond);
2175
    if (is->img_convert_ctx)
2176
        sws_freeContext(is->img_convert_ctx);
2177
    av_free(is);
2178
}
2179

    
2180
static void stream_cycle_channel(VideoState *is, int codec_type)
2181
{
2182
    AVFormatContext *ic = is->ic;
2183
    int start_index, stream_index;
2184
    AVStream *st;
2185

    
2186
    if (codec_type == CODEC_TYPE_VIDEO)
2187
        start_index = is->video_stream;
2188
    else if (codec_type == CODEC_TYPE_AUDIO)
2189
        start_index = is->audio_stream;
2190
    else
2191
        start_index = is->subtitle_stream;
2192
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2193
        return;
2194
    stream_index = start_index;
2195
    for(;;) {
2196
        if (++stream_index >= is->ic->nb_streams)
2197
        {
2198
            if (codec_type == CODEC_TYPE_SUBTITLE)
2199
            {
2200
                stream_index = -1;
2201
                goto the_end;
2202
            } else
2203
                stream_index = 0;
2204
        }
2205
        if (stream_index == start_index)
2206
            return;
2207
        st = ic->streams[stream_index];
2208
        if (st->codec->codec_type == codec_type) {
2209
            /* check that parameters are OK */
2210
            switch(codec_type) {
2211
            case CODEC_TYPE_AUDIO:
2212
                if (st->codec->sample_rate != 0 &&
2213
                    st->codec->channels != 0)
2214
                    goto the_end;
2215
                break;
2216
            case CODEC_TYPE_VIDEO:
2217
            case CODEC_TYPE_SUBTITLE:
2218
                goto the_end;
2219
            default:
2220
                break;
2221
            }
2222
        }
2223
    }
2224
 the_end:
2225
    stream_component_close(is, start_index);
2226
    stream_component_open(is, stream_index);
2227
}
2228

    
2229

    
2230
static void toggle_full_screen(void)
2231
{
2232
    is_full_screen = !is_full_screen;
2233
    if (!fs_screen_width) {
2234
        /* use default SDL method */
2235
//        SDL_WM_ToggleFullScreen(screen);
2236
    }
2237
    video_open(cur_stream);
2238
}
2239

    
2240
static void toggle_pause(void)
2241
{
2242
    if (cur_stream)
2243
        stream_pause(cur_stream);
2244
    step = 0;
2245
}
2246

    
2247
static void step_to_next_frame(void)
2248
{
2249
    if (cur_stream) {
2250
        /* if the stream is paused unpause it, then step */
2251
        if (cur_stream->paused)
2252
            stream_pause(cur_stream);
2253
    }
2254
    step = 1;
2255
}
2256

    
2257
static void do_exit(void)
2258
{
2259
    int i;
2260
    if (cur_stream) {
2261
        stream_close(cur_stream);
2262
        cur_stream = NULL;
2263
    }
2264
    for (i = 0; i < CODEC_TYPE_NB; i++)
2265
        av_free(avcodec_opts[i]);
2266
    av_free(avformat_opts);
2267
    av_free(sws_opts);
2268
    if (show_status)
2269
        printf("\n");
2270
    SDL_Quit();
2271
    exit(0);
2272
}
2273

    
2274
static void toggle_audio_display(void)
2275
{
2276
    if (cur_stream) {
2277
        cur_stream->show_audio = !cur_stream->show_audio;
2278
    }
2279
}
2280

    
2281
/* handle an event sent by the GUI */
2282
static void event_loop(void)
2283
{
2284
    SDL_Event event;
2285
    double incr, pos, frac;
2286

    
2287
    for(;;) {
2288
        SDL_WaitEvent(&event);
2289
        switch(event.type) {
2290
        case SDL_KEYDOWN:
2291
            switch(event.key.keysym.sym) {
2292
            case SDLK_ESCAPE:
2293
            case SDLK_q:
2294
                do_exit();
2295
                break;
2296
            case SDLK_f:
2297
                toggle_full_screen();
2298
                break;
2299
            case SDLK_p:
2300
            case SDLK_SPACE:
2301
                toggle_pause();
2302
                break;
2303
            case SDLK_s: //S: Step to next frame
2304
                step_to_next_frame();
2305
                break;
2306
            case SDLK_a:
2307
                if (cur_stream)
2308
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2309
                break;
2310
            case SDLK_v:
2311
                if (cur_stream)
2312
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2313
                break;
2314
            case SDLK_t:
2315
                if (cur_stream)
2316
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2317
                break;
2318
            case SDLK_w:
2319
                toggle_audio_display();
2320
                break;
2321
            case SDLK_LEFT:
2322
                incr = -10.0;
2323
                goto do_seek;
2324
            case SDLK_RIGHT:
2325
                incr = 10.0;
2326
                goto do_seek;
2327
            case SDLK_UP:
2328
                incr = 60.0;
2329
                goto do_seek;
2330
            case SDLK_DOWN:
2331
                incr = -60.0;
2332
            do_seek:
2333
                if (cur_stream) {
2334
                    if (seek_by_bytes) {
2335
                        pos = url_ftell(cur_stream->ic->pb);
2336
                        if (cur_stream->ic->bit_rate)
2337
                            incr *= cur_stream->ic->bit_rate / 60.0;
2338
                        else
2339
                            incr *= 180000.0;
2340
                        pos += incr;
2341
                        stream_seek(cur_stream, pos, incr, 1);
2342
                    } else {
2343
                        pos = get_master_clock(cur_stream);
2344
                        pos += incr;
2345
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2346
                    }
2347
                }
2348
                break;
2349
            default:
2350
                break;
2351
            }
2352
            break;
2353
        case SDL_MOUSEBUTTONDOWN:
2354
            if (cur_stream) {
2355
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2356
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2357
                    stream_seek(cur_stream, size*(double)event.button.x/(double)cur_stream->width, 0, 1);
2358
                }else{
2359
                    int64_t ts;
2360
                    int ns, hh, mm, ss;
2361
                    int tns, thh, tmm, tss;
2362
                    tns = cur_stream->ic->duration/1000000LL;
2363
                    thh = tns/3600;
2364
                    tmm = (tns%3600)/60;
2365
                    tss = (tns%60);
2366
                    frac = (double)event.button.x/(double)cur_stream->width;
2367
                    ns = frac*tns;
2368
                    hh = ns/3600;
2369
                    mm = (ns%3600)/60;
2370
                    ss = (ns%60);
2371
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2372
                            hh, mm, ss, thh, tmm, tss);
2373
                    ts = frac*cur_stream->ic->duration;
2374
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2375
                        ts += cur_stream->ic->start_time;
2376
                    stream_seek(cur_stream, ts, 0, 0);
2377
                }
2378
            }
2379
            break;
2380
        case SDL_VIDEORESIZE:
2381
            if (cur_stream) {
2382
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2383
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2384
                screen_width = cur_stream->width = event.resize.w;
2385
                screen_height= cur_stream->height= event.resize.h;
2386
            }
2387
            break;
2388
        case SDL_QUIT:
2389
        case FF_QUIT_EVENT:
2390
            do_exit();
2391
            break;
2392
        case FF_ALLOC_EVENT:
2393
            video_open(event.user.data1);
2394
            alloc_picture(event.user.data1);
2395
            break;
2396
        case FF_REFRESH_EVENT:
2397
            video_refresh_timer(event.user.data1);
2398
            break;
2399
        default:
2400
            break;
2401
        }
2402
    }
2403
}
2404

    
2405
static void opt_frame_size(const char *arg)
2406
{
2407
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2408
        fprintf(stderr, "Incorrect frame size\n");
2409
        exit(1);
2410
    }
2411
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2412
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2413
        exit(1);
2414
    }
2415
}
2416

    
2417
static int opt_width(const char *opt, const char *arg)
2418
{
2419
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2420
    return 0;
2421
}
2422

    
2423
static int opt_height(const char *opt, const char *arg)
2424
{
2425
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2426
    return 0;
2427
}
2428

    
2429
static void opt_format(const char *arg)
2430
{
2431
    file_iformat = av_find_input_format(arg);
2432
    if (!file_iformat) {
2433
        fprintf(stderr, "Unknown input format: %s\n", arg);
2434
        exit(1);
2435
    }
2436
}
2437

    
2438
static void opt_frame_pix_fmt(const char *arg)
2439
{
2440
    frame_pix_fmt = av_get_pix_fmt(arg);
2441
}
2442

    
2443
static int opt_sync(const char *opt, const char *arg)
2444
{
2445
    if (!strcmp(arg, "audio"))
2446
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2447
    else if (!strcmp(arg, "video"))
2448
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2449
    else if (!strcmp(arg, "ext"))
2450
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2451
    else {
2452
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2453
        exit(1);
2454
    }
2455
    return 0;
2456
}
2457

    
2458
static int opt_seek(const char *opt, const char *arg)
2459
{
2460
    start_time = parse_time_or_die(opt, arg, 1);
2461
    return 0;
2462
}
2463

    
2464
static int opt_debug(const char *opt, const char *arg)
2465
{
2466
    av_log_set_level(99);
2467
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2468
    return 0;
2469
}
2470

    
2471
static int opt_vismv(const char *opt, const char *arg)
2472
{
2473
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2474
    return 0;
2475
}
2476

    
2477
static int opt_thread_count(const char *opt, const char *arg)
2478
{
2479
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2480
#if !HAVE_THREADS
2481
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2482
#endif
2483
    return 0;
2484
}
2485

    
2486
static const OptionDef options[] = {
2487
#include "cmdutils_common_opts.h"
2488
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2489
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2490
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2491
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2492
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2493
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2494
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2495
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2496
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2497
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2498
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2499
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2500
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2501
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2502
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2503
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2504
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2505
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2506
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2507
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2508
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2509
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2510
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2511
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2512
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2513
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2514
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2515
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2516
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2517
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2518
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2519
    { NULL, },
2520
};
2521

    
2522
static void show_usage(void)
2523
{
2524
    printf("Simple media player\n");
2525
    printf("usage: ffplay [options] input_file\n");
2526
    printf("\n");
2527
}
2528

    
2529
static void show_help(void)
2530
{
2531
    show_usage();
2532
    show_help_options(options, "Main options:\n",
2533
                      OPT_EXPERT, 0);
2534
    show_help_options(options, "\nAdvanced options:\n",
2535
                      OPT_EXPERT, OPT_EXPERT);
2536
    printf("\nWhile playing:\n"
2537
           "q, ESC              quit\n"
2538
           "f                   toggle full screen\n"
2539
           "p, SPC              pause\n"
2540
           "a                   cycle audio channel\n"
2541
           "v                   cycle video channel\n"
2542
           "t                   cycle subtitle channel\n"
2543
           "w                   show audio waves\n"
2544
           "left/right          seek backward/forward 10 seconds\n"
2545
           "down/up             seek backward/forward 1 minute\n"
2546
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2547
           );
2548
}
2549

    
2550
static void opt_input_file(const char *filename)
2551
{
2552
    if (!strcmp(filename, "-"))
2553
        filename = "pipe:";
2554
    input_filename = filename;
2555
}
2556

    
2557
/* Called from the main */
2558
int main(int argc, char **argv)
2559
{
2560
    int flags, i;
2561

    
2562
    /* register all codecs, demux and protocols */
2563
    avcodec_register_all();
2564
    avdevice_register_all();
2565
    av_register_all();
2566

    
2567
    for(i=0; i<CODEC_TYPE_NB; i++){
2568
        avcodec_opts[i]= avcodec_alloc_context2(i);
2569
    }
2570
    avformat_opts = avformat_alloc_context();
2571
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2572

    
2573
    show_banner();
2574

    
2575
    parse_options(argc, argv, options, opt_input_file);
2576

    
2577
    if (!input_filename) {
2578
        show_usage();
2579
        fprintf(stderr, "An input file must be specified\n");
2580
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2581
        exit(1);
2582
    }
2583

    
2584
    if (display_disable) {
2585
        video_disable = 1;
2586
    }
2587
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2588
#if !defined(__MINGW32__) && !defined(__APPLE__)
2589
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2590
#endif
2591
    if (SDL_Init (flags)) {
2592
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2593
        exit(1);
2594
    }
2595

    
2596
    if (!display_disable) {
2597
#if HAVE_SDL_VIDEO_SIZE
2598
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2599
        fs_screen_width = vi->current_w;
2600
        fs_screen_height = vi->current_h;
2601
#endif
2602
    }
2603

    
2604
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2605
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2606
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2607
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2608

    
2609
    av_init_packet(&flush_pkt);
2610
    flush_pkt.data= "FLUSH";
2611

    
2612
    cur_stream = stream_open(input_filename, file_iformat);
2613

    
2614
    event_loop();
2615

    
2616
    /* never returns */
2617

    
2618
    return 0;
2619
}