Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 41a4cd0c

History | View | Annotate | Download (79.7 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33

    
34
#include "cmdutils.h"
35

    
36
#include <SDL.h>
37
#include <SDL_thread.h>
38

    
39
#ifdef __MINGW32__
40
#undef main /* We don't want SDL to override our main() */
41
#endif
42

    
43
#undef exit
44
#undef printf
45
#undef fprintf
46

    
47
const char program_name[] = "FFplay";
48
const int program_birth_year = 2003;
49

    
50
//#define DEBUG_SYNC
51

    
52
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
53
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
54
#define MIN_FRAMES 5
55

    
56
/* SDL audio buffer size, in samples. Should be small to have precise
57
   A/V sync as SDL does not have hardware buffer fullness info. */
58
#define SDL_AUDIO_BUFFER_SIZE 1024
59

    
60
/* no AV sync correction is done if below the AV sync threshold */
61
#define AV_SYNC_THRESHOLD 0.01
62
/* no AV correction is done if too big error */
63
#define AV_NOSYNC_THRESHOLD 10.0
64

    
65
/* maximum audio speed change to get correct sync */
66
#define SAMPLE_CORRECTION_PERCENT_MAX 10
67

    
68
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
69
#define AUDIO_DIFF_AVG_NB   20
70

    
71
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
72
#define SAMPLE_ARRAY_SIZE (2*65536)
73

    
74
static int sws_flags = SWS_BICUBIC;
75

    
76
typedef struct PacketQueue {
77
    AVPacketList *first_pkt, *last_pkt;
78
    int nb_packets;
79
    int size;
80
    int abort_request;
81
    SDL_mutex *mutex;
82
    SDL_cond *cond;
83
} PacketQueue;
84

    
85
#define VIDEO_PICTURE_QUEUE_SIZE 1
86
#define SUBPICTURE_QUEUE_SIZE 4
87

    
88
typedef struct VideoPicture {
89
    double pts;                                  ///<presentation time stamp for this picture
90
    SDL_Overlay *bmp;
91
    int width, height; /* source height & width */
92
    int allocated;
93
} VideoPicture;
94

    
95
typedef struct SubPicture {
96
    double pts; /* presentation time stamp for this picture */
97
    AVSubtitle sub;
98
} SubPicture;
99

    
100
enum {
101
    AV_SYNC_AUDIO_MASTER, /* default choice */
102
    AV_SYNC_VIDEO_MASTER,
103
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
104
};
105

    
106
typedef struct VideoState {
107
    SDL_Thread *parse_tid;
108
    SDL_Thread *video_tid;
109
    AVInputFormat *iformat;
110
    int no_background;
111
    int abort_request;
112
    int paused;
113
    int last_paused;
114
    int seek_req;
115
    int seek_flags;
116
    int64_t seek_pos;
117
    int64_t seek_rel;
118
    AVFormatContext *ic;
119
    int dtg_active_format;
120

    
121
    int audio_stream;
122

    
123
    int av_sync_type;
124
    double external_clock; /* external clock base */
125
    int64_t external_clock_time;
126

    
127
    double audio_clock;
128
    double audio_diff_cum; /* used for AV difference average computation */
129
    double audio_diff_avg_coef;
130
    double audio_diff_threshold;
131
    int audio_diff_avg_count;
132
    AVStream *audio_st;
133
    PacketQueue audioq;
134
    int audio_hw_buf_size;
135
    /* samples output by the codec. we reserve more space for avsync
136
       compensation */
137
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
138
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
139
    uint8_t *audio_buf;
140
    unsigned int audio_buf_size; /* in bytes */
141
    int audio_buf_index; /* in bytes */
142
    AVPacket audio_pkt_temp;
143
    AVPacket audio_pkt;
144
    enum SampleFormat audio_src_fmt;
145
    AVAudioConvert *reformat_ctx;
146

    
147
    int show_audio; /* if true, display audio samples */
148
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
149
    int sample_array_index;
150
    int last_i_start;
151

    
152
    SDL_Thread *subtitle_tid;
153
    int subtitle_stream;
154
    int subtitle_stream_changed;
155
    AVStream *subtitle_st;
156
    PacketQueue subtitleq;
157
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
158
    int subpq_size, subpq_rindex, subpq_windex;
159
    SDL_mutex *subpq_mutex;
160
    SDL_cond *subpq_cond;
161

    
162
    double frame_timer;
163
    double frame_last_pts;
164
    double frame_last_delay;
165
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
166
    int video_stream;
167
    AVStream *video_st;
168
    PacketQueue videoq;
169
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
170
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
171
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
172
    int pictq_size, pictq_rindex, pictq_windex;
173
    SDL_mutex *pictq_mutex;
174
    SDL_cond *pictq_cond;
175
    struct SwsContext *img_convert_ctx;
176

    
177
    //    QETimer *video_timer;
178
    char filename[1024];
179
    int width, height, xleft, ytop;
180

    
181
    int64_t faulty_pts;
182
    int64_t faulty_dts;
183
    int64_t last_dts_for_fault_detection;
184
    int64_t last_pts_for_fault_detection;
185

    
186
} VideoState;
187

    
188
static void show_help(void);
189
static int audio_write_get_buf_size(VideoState *is);
190

    
191
/* options specified by the user */
192
static AVInputFormat *file_iformat;
193
static const char *input_filename;
194
static int fs_screen_width;
195
static int fs_screen_height;
196
static int screen_width = 0;
197
static int screen_height = 0;
198
static int frame_width = 0;
199
static int frame_height = 0;
200
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
201
static int audio_disable;
202
static int video_disable;
203
static int wanted_audio_stream= 0;
204
static int wanted_video_stream= 0;
205
static int wanted_subtitle_stream= -1;
206
static int seek_by_bytes;
207
static int display_disable;
208
static int show_status = 1;
209
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
210
static int64_t start_time = AV_NOPTS_VALUE;
211
static int debug = 0;
212
static int debug_mv = 0;
213
static int step = 0;
214
static int thread_count = 1;
215
static int workaround_bugs = 1;
216
static int fast = 0;
217
static int genpts = 0;
218
static int lowres = 0;
219
static int idct = FF_IDCT_AUTO;
220
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
221
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
222
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
223
static int error_recognition = FF_ER_CAREFUL;
224
static int error_concealment = 3;
225
static int decoder_reorder_pts= -1;
226

    
227
/* current context */
228
static int is_full_screen;
229
static VideoState *cur_stream;
230
static int64_t audio_callback_time;
231

    
232
static AVPacket flush_pkt;
233

    
234
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
235
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
236
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
237

    
238
static SDL_Surface *screen;
239

    
240
/* packet queue handling */
241
static void packet_queue_init(PacketQueue *q)
242
{
243
    memset(q, 0, sizeof(PacketQueue));
244
    q->mutex = SDL_CreateMutex();
245
    q->cond = SDL_CreateCond();
246
}
247

    
248
static void packet_queue_flush(PacketQueue *q)
249
{
250
    AVPacketList *pkt, *pkt1;
251

    
252
    SDL_LockMutex(q->mutex);
253
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
254
        pkt1 = pkt->next;
255
        av_free_packet(&pkt->pkt);
256
        av_freep(&pkt);
257
    }
258
    q->last_pkt = NULL;
259
    q->first_pkt = NULL;
260
    q->nb_packets = 0;
261
    q->size = 0;
262
    SDL_UnlockMutex(q->mutex);
263
}
264

    
265
static void packet_queue_end(PacketQueue *q)
266
{
267
    packet_queue_flush(q);
268
    SDL_DestroyMutex(q->mutex);
269
    SDL_DestroyCond(q->cond);
270
}
271

    
272
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
273
{
274
    AVPacketList *pkt1;
275

    
276
    /* duplicate the packet */
277
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
278
        return -1;
279

    
280
    pkt1 = av_malloc(sizeof(AVPacketList));
281
    if (!pkt1)
282
        return -1;
283
    pkt1->pkt = *pkt;
284
    pkt1->next = NULL;
285

    
286

    
287
    SDL_LockMutex(q->mutex);
288

    
289
    if (!q->last_pkt)
290

    
291
        q->first_pkt = pkt1;
292
    else
293
        q->last_pkt->next = pkt1;
294
    q->last_pkt = pkt1;
295
    q->nb_packets++;
296
    q->size += pkt1->pkt.size + sizeof(*pkt1);
297
    /* XXX: should duplicate packet data in DV case */
298
    SDL_CondSignal(q->cond);
299

    
300
    SDL_UnlockMutex(q->mutex);
301
    return 0;
302
}
303

    
304
static void packet_queue_abort(PacketQueue *q)
305
{
306
    SDL_LockMutex(q->mutex);
307

    
308
    q->abort_request = 1;
309

    
310
    SDL_CondSignal(q->cond);
311

    
312
    SDL_UnlockMutex(q->mutex);
313
}
314

    
315
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
316
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
317
{
318
    AVPacketList *pkt1;
319
    int ret;
320

    
321
    SDL_LockMutex(q->mutex);
322

    
323
    for(;;) {
324
        if (q->abort_request) {
325
            ret = -1;
326
            break;
327
        }
328

    
329
        pkt1 = q->first_pkt;
330
        if (pkt1) {
331
            q->first_pkt = pkt1->next;
332
            if (!q->first_pkt)
333
                q->last_pkt = NULL;
334
            q->nb_packets--;
335
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
336
            *pkt = pkt1->pkt;
337
            av_free(pkt1);
338
            ret = 1;
339
            break;
340
        } else if (!block) {
341
            ret = 0;
342
            break;
343
        } else {
344
            SDL_CondWait(q->cond, q->mutex);
345
        }
346
    }
347
    SDL_UnlockMutex(q->mutex);
348
    return ret;
349
}
350

    
351
static inline void fill_rectangle(SDL_Surface *screen,
352
                                  int x, int y, int w, int h, int color)
353
{
354
    SDL_Rect rect;
355
    rect.x = x;
356
    rect.y = y;
357
    rect.w = w;
358
    rect.h = h;
359
    SDL_FillRect(screen, &rect, color);
360
}
361

    
362
#if 0
363
/* draw only the border of a rectangle */
364
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
365
{
366
    int w1, w2, h1, h2;
367

368
    /* fill the background */
369
    w1 = x;
370
    if (w1 < 0)
371
        w1 = 0;
372
    w2 = s->width - (x + w);
373
    if (w2 < 0)
374
        w2 = 0;
375
    h1 = y;
376
    if (h1 < 0)
377
        h1 = 0;
378
    h2 = s->height - (y + h);
379
    if (h2 < 0)
380
        h2 = 0;
381
    fill_rectangle(screen,
382
                   s->xleft, s->ytop,
383
                   w1, s->height,
384
                   color);
385
    fill_rectangle(screen,
386
                   s->xleft + s->width - w2, s->ytop,
387
                   w2, s->height,
388
                   color);
389
    fill_rectangle(screen,
390
                   s->xleft + w1, s->ytop,
391
                   s->width - w1 - w2, h1,
392
                   color);
393
    fill_rectangle(screen,
394
                   s->xleft + w1, s->ytop + s->height - h2,
395
                   s->width - w1 - w2, h2,
396
                   color);
397
}
398
#endif
399

    
400
#define ALPHA_BLEND(a, oldp, newp, s)\
401
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
402

    
403
#define RGBA_IN(r, g, b, a, s)\
404
{\
405
    unsigned int v = ((const uint32_t *)(s))[0];\
406
    a = (v >> 24) & 0xff;\
407
    r = (v >> 16) & 0xff;\
408
    g = (v >> 8) & 0xff;\
409
    b = v & 0xff;\
410
}
411

    
412
#define YUVA_IN(y, u, v, a, s, pal)\
413
{\
414
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415
    a = (val >> 24) & 0xff;\
416
    y = (val >> 16) & 0xff;\
417
    u = (val >> 8) & 0xff;\
418
    v = val & 0xff;\
419
}
420

    
421
#define YUVA_OUT(d, y, u, v, a)\
422
{\
423
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
424
}
425

    
426

    
427
#define BPP 1
428

    
429
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
430
{
431
    int wrap, wrap3, width2, skip2;
432
    int y, u, v, a, u1, v1, a1, w, h;
433
    uint8_t *lum, *cb, *cr;
434
    const uint8_t *p;
435
    const uint32_t *pal;
436
    int dstx, dsty, dstw, dsth;
437

    
438
    dstw = av_clip(rect->w, 0, imgw);
439
    dsth = av_clip(rect->h, 0, imgh);
440
    dstx = av_clip(rect->x, 0, imgw - dstw);
441
    dsty = av_clip(rect->y, 0, imgh - dsth);
442
    lum = dst->data[0] + dsty * dst->linesize[0];
443
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
445

    
446
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
447
    skip2 = dstx >> 1;
448
    wrap = dst->linesize[0];
449
    wrap3 = rect->pict.linesize[0];
450
    p = rect->pict.data[0];
451
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
452

    
453
    if (dsty & 1) {
454
        lum += dstx;
455
        cb += skip2;
456
        cr += skip2;
457

    
458
        if (dstx & 1) {
459
            YUVA_IN(y, u, v, a, p, pal);
460
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463
            cb++;
464
            cr++;
465
            lum++;
466
            p += BPP;
467
        }
468
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469
            YUVA_IN(y, u, v, a, p, pal);
470
            u1 = u;
471
            v1 = v;
472
            a1 = a;
473
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
474

    
475
            YUVA_IN(y, u, v, a, p + BPP, pal);
476
            u1 += u;
477
            v1 += v;
478
            a1 += a;
479
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482
            cb++;
483
            cr++;
484
            p += 2 * BPP;
485
            lum += 2;
486
        }
487
        if (w) {
488
            YUVA_IN(y, u, v, a, p, pal);
489
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
492
            p++;
493
            lum++;
494
        }
495
        p += wrap3 - dstw * BPP;
496
        lum += wrap - dstw - dstx;
497
        cb += dst->linesize[1] - width2 - skip2;
498
        cr += dst->linesize[2] - width2 - skip2;
499
    }
500
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
501
        lum += dstx;
502
        cb += skip2;
503
        cr += skip2;
504

    
505
        if (dstx & 1) {
506
            YUVA_IN(y, u, v, a, p, pal);
507
            u1 = u;
508
            v1 = v;
509
            a1 = a;
510
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
511
            p += wrap3;
512
            lum += wrap;
513
            YUVA_IN(y, u, v, a, p, pal);
514
            u1 += u;
515
            v1 += v;
516
            a1 += a;
517
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
519
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
520
            cb++;
521
            cr++;
522
            p += -wrap3 + BPP;
523
            lum += -wrap + 1;
524
        }
525
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
526
            YUVA_IN(y, u, v, a, p, pal);
527
            u1 = u;
528
            v1 = v;
529
            a1 = a;
530
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531

    
532
            YUVA_IN(y, u, v, a, p + BPP, pal);
533
            u1 += u;
534
            v1 += v;
535
            a1 += a;
536
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
537
            p += wrap3;
538
            lum += wrap;
539

    
540
            YUVA_IN(y, u, v, a, p, pal);
541
            u1 += u;
542
            v1 += v;
543
            a1 += a;
544
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545

    
546
            YUVA_IN(y, u, v, a, p + BPP, pal);
547
            u1 += u;
548
            v1 += v;
549
            a1 += a;
550
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551

    
552
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
553
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554

    
555
            cb++;
556
            cr++;
557
            p += -wrap3 + 2 * BPP;
558
            lum += -wrap + 2;
559
        }
560
        if (w) {
561
            YUVA_IN(y, u, v, a, p, pal);
562
            u1 = u;
563
            v1 = v;
564
            a1 = a;
565
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
566
            p += wrap3;
567
            lum += wrap;
568
            YUVA_IN(y, u, v, a, p, pal);
569
            u1 += u;
570
            v1 += v;
571
            a1 += a;
572
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
574
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
575
            cb++;
576
            cr++;
577
            p += -wrap3 + BPP;
578
            lum += -wrap + 1;
579
        }
580
        p += wrap3 + (wrap3 - dstw * BPP);
581
        lum += wrap + (wrap - dstw - dstx);
582
        cb += dst->linesize[1] - width2 - skip2;
583
        cr += dst->linesize[2] - width2 - skip2;
584
    }
585
    /* handle odd height */
586
    if (h) {
587
        lum += dstx;
588
        cb += skip2;
589
        cr += skip2;
590

    
591
        if (dstx & 1) {
592
            YUVA_IN(y, u, v, a, p, pal);
593
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
594
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
595
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
596
            cb++;
597
            cr++;
598
            lum++;
599
            p += BPP;
600
        }
601
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
602
            YUVA_IN(y, u, v, a, p, pal);
603
            u1 = u;
604
            v1 = v;
605
            a1 = a;
606
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607

    
608
            YUVA_IN(y, u, v, a, p + BPP, pal);
609
            u1 += u;
610
            v1 += v;
611
            a1 += a;
612
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
613
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
614
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
615
            cb++;
616
            cr++;
617
            p += 2 * BPP;
618
            lum += 2;
619
        }
620
        if (w) {
621
            YUVA_IN(y, u, v, a, p, pal);
622
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
623
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
624
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
625
        }
626
    }
627
}
628

    
629
static void free_subpicture(SubPicture *sp)
630
{
631
    int i;
632

    
633
    for (i = 0; i < sp->sub.num_rects; i++)
634
    {
635
        av_freep(&sp->sub.rects[i]->pict.data[0]);
636
        av_freep(&sp->sub.rects[i]->pict.data[1]);
637
        av_freep(&sp->sub.rects[i]);
638
    }
639

    
640
    av_free(sp->sub.rects);
641

    
642
    memset(&sp->sub, 0, sizeof(AVSubtitle));
643
}
644

    
645
static void video_image_display(VideoState *is)
646
{
647
    VideoPicture *vp;
648
    SubPicture *sp;
649
    AVPicture pict;
650
    float aspect_ratio;
651
    int width, height, x, y;
652
    SDL_Rect rect;
653
    int i;
654

    
655
    vp = &is->pictq[is->pictq_rindex];
656
    if (vp->bmp) {
657
        /* XXX: use variable in the frame */
658
        if (is->video_st->sample_aspect_ratio.num)
659
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
660
        else if (is->video_st->codec->sample_aspect_ratio.num)
661
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
662
        else
663
            aspect_ratio = 0;
664
        if (aspect_ratio <= 0.0)
665
            aspect_ratio = 1.0;
666
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
667
        /* if an active format is indicated, then it overrides the
668
           mpeg format */
669
#if 0
670
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
671
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
672
            printf("dtg_active_format=%d\n", is->dtg_active_format);
673
        }
674
#endif
675
#if 0
676
        switch(is->video_st->codec->dtg_active_format) {
677
        case FF_DTG_AFD_SAME:
678
        default:
679
            /* nothing to do */
680
            break;
681
        case FF_DTG_AFD_4_3:
682
            aspect_ratio = 4.0 / 3.0;
683
            break;
684
        case FF_DTG_AFD_16_9:
685
            aspect_ratio = 16.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_14_9:
688
            aspect_ratio = 14.0 / 9.0;
689
            break;
690
        case FF_DTG_AFD_4_3_SP_14_9:
691
            aspect_ratio = 14.0 / 9.0;
692
            break;
693
        case FF_DTG_AFD_16_9_SP_14_9:
694
            aspect_ratio = 14.0 / 9.0;
695
            break;
696
        case FF_DTG_AFD_SP_4_3:
697
            aspect_ratio = 4.0 / 3.0;
698
            break;
699
        }
700
#endif
701

    
702
        if (is->subtitle_st)
703
        {
704
            if (is->subpq_size > 0)
705
            {
706
                sp = &is->subpq[is->subpq_rindex];
707

    
708
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
709
                {
710
                    SDL_LockYUVOverlay (vp->bmp);
711

    
712
                    pict.data[0] = vp->bmp->pixels[0];
713
                    pict.data[1] = vp->bmp->pixels[2];
714
                    pict.data[2] = vp->bmp->pixels[1];
715

    
716
                    pict.linesize[0] = vp->bmp->pitches[0];
717
                    pict.linesize[1] = vp->bmp->pitches[2];
718
                    pict.linesize[2] = vp->bmp->pitches[1];
719

    
720
                    for (i = 0; i < sp->sub.num_rects; i++)
721
                        blend_subrect(&pict, sp->sub.rects[i],
722
                                      vp->bmp->w, vp->bmp->h);
723

    
724
                    SDL_UnlockYUVOverlay (vp->bmp);
725
                }
726
            }
727
        }
728

    
729

    
730
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
731
        height = is->height;
732
        width = ((int)rint(height * aspect_ratio)) & ~1;
733
        if (width > is->width) {
734
            width = is->width;
735
            height = ((int)rint(width / aspect_ratio)) & ~1;
736
        }
737
        x = (is->width - width) / 2;
738
        y = (is->height - height) / 2;
739
        if (!is->no_background) {
740
            /* fill the background */
741
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
742
        } else {
743
            is->no_background = 0;
744
        }
745
        rect.x = is->xleft + x;
746
        rect.y = is->ytop  + y;
747
        rect.w = width;
748
        rect.h = height;
749
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
750
    } else {
751
#if 0
752
        fill_rectangle(screen,
753
                       is->xleft, is->ytop, is->width, is->height,
754
                       QERGB(0x00, 0x00, 0x00));
755
#endif
756
    }
757
}
758

    
759
static inline int compute_mod(int a, int b)
760
{
761
    a = a % b;
762
    if (a >= 0)
763
        return a;
764
    else
765
        return a + b;
766
}
767

    
768
static void video_audio_display(VideoState *s)
769
{
770
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
771
    int ch, channels, h, h2, bgcolor, fgcolor;
772
    int16_t time_diff;
773

    
774
    /* compute display index : center on currently output samples */
775
    channels = s->audio_st->codec->channels;
776
    nb_display_channels = channels;
777
    if (!s->paused) {
778
        n = 2 * channels;
779
        delay = audio_write_get_buf_size(s);
780
        delay /= n;
781

    
782
        /* to be more precise, we take into account the time spent since
783
           the last buffer computation */
784
        if (audio_callback_time) {
785
            time_diff = av_gettime() - audio_callback_time;
786
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
787
        }
788

    
789
        delay -= s->width / 2;
790
        if (delay < s->width)
791
            delay = s->width;
792

    
793
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
794

    
795
        h= INT_MIN;
796
        for(i=0; i<1000; i+=channels){
797
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
798
            int a= s->sample_array[idx];
799
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
800
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
801
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
802
            int score= a-d;
803
            if(h<score && (b^c)<0){
804
                h= score;
805
                i_start= idx;
806
            }
807
        }
808

    
809
        s->last_i_start = i_start;
810
    } else {
811
        i_start = s->last_i_start;
812
    }
813

    
814
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
815
    fill_rectangle(screen,
816
                   s->xleft, s->ytop, s->width, s->height,
817
                   bgcolor);
818

    
819
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
820

    
821
    /* total height for one channel */
822
    h = s->height / nb_display_channels;
823
    /* graph height / 2 */
824
    h2 = (h * 9) / 20;
825
    for(ch = 0;ch < nb_display_channels; ch++) {
826
        i = i_start + ch;
827
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
828
        for(x = 0; x < s->width; x++) {
829
            y = (s->sample_array[i] * h2) >> 15;
830
            if (y < 0) {
831
                y = -y;
832
                ys = y1 - y;
833
            } else {
834
                ys = y1;
835
            }
836
            fill_rectangle(screen,
837
                           s->xleft + x, ys, 1, y,
838
                           fgcolor);
839
            i += channels;
840
            if (i >= SAMPLE_ARRAY_SIZE)
841
                i -= SAMPLE_ARRAY_SIZE;
842
        }
843
    }
844

    
845
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
846

    
847
    for(ch = 1;ch < nb_display_channels; ch++) {
848
        y = s->ytop + ch * h;
849
        fill_rectangle(screen,
850
                       s->xleft, y, s->width, 1,
851
                       fgcolor);
852
    }
853
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
854
}
855

    
856
static int video_open(VideoState *is){
857
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
858
    int w,h;
859

    
860
    if(is_full_screen) flags |= SDL_FULLSCREEN;
861
    else               flags |= SDL_RESIZABLE;
862

    
863
    if (is_full_screen && fs_screen_width) {
864
        w = fs_screen_width;
865
        h = fs_screen_height;
866
    } else if(!is_full_screen && screen_width){
867
        w = screen_width;
868
        h = screen_height;
869
    }else if (is->video_st && is->video_st->codec->width){
870
        w = is->video_st->codec->width;
871
        h = is->video_st->codec->height;
872
    } else {
873
        w = 640;
874
        h = 480;
875
    }
876
#ifndef __APPLE__
877
    screen = SDL_SetVideoMode(w, h, 0, flags);
878
#else
879
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
880
    screen = SDL_SetVideoMode(w, h, 24, flags);
881
#endif
882
    if (!screen) {
883
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
884
        return -1;
885
    }
886
    SDL_WM_SetCaption("FFplay", "FFplay");
887

    
888
    is->width = screen->w;
889
    is->height = screen->h;
890

    
891
    return 0;
892
}
893

    
894
/* display the current picture, if any */
895
static void video_display(VideoState *is)
896
{
897
    if(!screen)
898
        video_open(cur_stream);
899
    if (is->audio_st && is->show_audio)
900
        video_audio_display(is);
901
    else if (is->video_st)
902
        video_image_display(is);
903
}
904

    
905
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
906
{
907
    SDL_Event event;
908
    event.type = FF_REFRESH_EVENT;
909
    event.user.data1 = opaque;
910
    SDL_PushEvent(&event);
911
    return 0; /* 0 means stop timer */
912
}
913

    
914
/* schedule a video refresh in 'delay' ms */
915
static void schedule_refresh(VideoState *is, int delay)
916
{
917
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
918
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
919
}
920

    
921
/* get the current audio clock value */
922
static double get_audio_clock(VideoState *is)
923
{
924
    double pts;
925
    int hw_buf_size, bytes_per_sec;
926
    pts = is->audio_clock;
927
    hw_buf_size = audio_write_get_buf_size(is);
928
    bytes_per_sec = 0;
929
    if (is->audio_st) {
930
        bytes_per_sec = is->audio_st->codec->sample_rate *
931
            2 * is->audio_st->codec->channels;
932
    }
933
    if (bytes_per_sec)
934
        pts -= (double)hw_buf_size / bytes_per_sec;
935
    return pts;
936
}
937

    
938
/* get the current video clock value */
939
static double get_video_clock(VideoState *is)
940
{
941
    if (is->paused) {
942
        return is->video_current_pts;
943
    } else {
944
        return is->video_current_pts + (av_gettime() - is->video_current_pts_time) / 1000000.0;
945
    }
946
}
947

    
948
/* get the current external clock value */
949
static double get_external_clock(VideoState *is)
950
{
951
    int64_t ti;
952
    ti = av_gettime();
953
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
954
}
955

    
956
/* get the current master clock value */
957
static double get_master_clock(VideoState *is)
958
{
959
    double val;
960

    
961
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
962
        if (is->video_st)
963
            val = get_video_clock(is);
964
        else
965
            val = get_audio_clock(is);
966
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
967
        if (is->audio_st)
968
            val = get_audio_clock(is);
969
        else
970
            val = get_video_clock(is);
971
    } else {
972
        val = get_external_clock(is);
973
    }
974
    return val;
975
}
976

    
977
/* seek in the stream */
978
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
979
{
980
    if (!is->seek_req) {
981
        is->seek_pos = pos;
982
        is->seek_rel = rel;
983
        if (seek_by_bytes)
984
            is->seek_flags |= AVSEEK_FLAG_BYTE;
985
        is->seek_req = 1;
986
    }
987
}
988

    
989
/* pause or resume the video */
990
static void stream_pause(VideoState *is)
991
{
992
    is->paused = !is->paused;
993
    if (!is->paused) {
994
        is->video_current_pts = get_video_clock(is);
995
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
996
    }
997
}
998

    
999
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1000
{
1001
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1002

    
1003
    /* compute nominal delay */
1004
    delay = frame_current_pts - is->frame_last_pts;
1005
    if (delay <= 0 || delay >= 10.0) {
1006
        /* if incorrect delay, use previous one */
1007
        delay = is->frame_last_delay;
1008
    } else {
1009
        is->frame_last_delay = delay;
1010
    }
1011
    is->frame_last_pts = frame_current_pts;
1012

    
1013
    /* update delay to follow master synchronisation source */
1014
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1015
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1016
        /* if video is slave, we try to correct big delays by
1017
           duplicating or deleting a frame */
1018
        ref_clock = get_master_clock(is);
1019
        diff = frame_current_pts - ref_clock;
1020

    
1021
        /* skip or repeat frame. We take into account the
1022
           delay to compute the threshold. I still don't know
1023
           if it is the best guess */
1024
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1025
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1026
            if (diff <= -sync_threshold)
1027
                delay = 0;
1028
            else if (diff >= sync_threshold)
1029
                delay = 2 * delay;
1030
        }
1031
    }
1032

    
1033
    is->frame_timer += delay;
1034
    /* compute the REAL delay (we need to do that to avoid
1035
       long term errors */
1036
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1037
    if (actual_delay < 0.010) {
1038
        /* XXX: should skip picture */
1039
        actual_delay = 0.010;
1040
    }
1041

    
1042
#if defined(DEBUG_SYNC)
1043
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1044
            delay, actual_delay, frame_current_pts, -diff);
1045
#endif
1046

    
1047
    return actual_delay;
1048
}
1049

    
1050
/* called to display each frame */
1051
static void video_refresh_timer(void *opaque)
1052
{
1053
    VideoState *is = opaque;
1054
    VideoPicture *vp;
1055

    
1056
    SubPicture *sp, *sp2;
1057

    
1058
    if (is->video_st) {
1059
        if (is->pictq_size == 0) {
1060
            /* if no picture, need to wait */
1061
            schedule_refresh(is, 1);
1062
        } else {
1063
            /* dequeue the picture */
1064
            vp = &is->pictq[is->pictq_rindex];
1065

    
1066
            /* update current video pts */
1067
            is->video_current_pts = vp->pts;
1068
            is->video_current_pts_time = av_gettime();
1069

    
1070
            /* launch timer for next picture */
1071
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1072

    
1073
            if(is->subtitle_st) {
1074
                if (is->subtitle_stream_changed) {
1075
                    SDL_LockMutex(is->subpq_mutex);
1076

    
1077
                    while (is->subpq_size) {
1078
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1079

    
1080
                        /* update queue size and signal for next picture */
1081
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1082
                            is->subpq_rindex = 0;
1083

    
1084
                        is->subpq_size--;
1085
                    }
1086
                    is->subtitle_stream_changed = 0;
1087

    
1088
                    SDL_CondSignal(is->subpq_cond);
1089
                    SDL_UnlockMutex(is->subpq_mutex);
1090
                } else {
1091
                    if (is->subpq_size > 0) {
1092
                        sp = &is->subpq[is->subpq_rindex];
1093

    
1094
                        if (is->subpq_size > 1)
1095
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1096
                        else
1097
                            sp2 = NULL;
1098

    
1099
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1100
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1101
                        {
1102
                            free_subpicture(sp);
1103

    
1104
                            /* update queue size and signal for next picture */
1105
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1106
                                is->subpq_rindex = 0;
1107

    
1108
                            SDL_LockMutex(is->subpq_mutex);
1109
                            is->subpq_size--;
1110
                            SDL_CondSignal(is->subpq_cond);
1111
                            SDL_UnlockMutex(is->subpq_mutex);
1112
                        }
1113
                    }
1114
                }
1115
            }
1116

    
1117
            /* display picture */
1118
            video_display(is);
1119

    
1120
            /* update queue size and signal for next picture */
1121
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1122
                is->pictq_rindex = 0;
1123

    
1124
            SDL_LockMutex(is->pictq_mutex);
1125
            is->pictq_size--;
1126
            SDL_CondSignal(is->pictq_cond);
1127
            SDL_UnlockMutex(is->pictq_mutex);
1128
        }
1129
    } else if (is->audio_st) {
1130
        /* draw the next audio frame */
1131

    
1132
        schedule_refresh(is, 40);
1133

    
1134
        /* if only audio stream, then display the audio bars (better
1135
           than nothing, just to test the implementation */
1136

    
1137
        /* display picture */
1138
        video_display(is);
1139
    } else {
1140
        schedule_refresh(is, 100);
1141
    }
1142
    if (show_status) {
1143
        static int64_t last_time;
1144
        int64_t cur_time;
1145
        int aqsize, vqsize, sqsize;
1146
        double av_diff;
1147

    
1148
        cur_time = av_gettime();
1149
        if (!last_time || (cur_time - last_time) >= 30000) {
1150
            aqsize = 0;
1151
            vqsize = 0;
1152
            sqsize = 0;
1153
            if (is->audio_st)
1154
                aqsize = is->audioq.size;
1155
            if (is->video_st)
1156
                vqsize = is->videoq.size;
1157
            if (is->subtitle_st)
1158
                sqsize = is->subtitleq.size;
1159
            av_diff = 0;
1160
            if (is->audio_st && is->video_st)
1161
                av_diff = get_audio_clock(is) - get_video_clock(is);
1162
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1163
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1164
            fflush(stdout);
1165
            last_time = cur_time;
1166
        }
1167
    }
1168
}
1169

    
1170
/* allocate a picture (needs to do that in main thread to avoid
1171
   potential locking problems */
1172
static void alloc_picture(void *opaque)
1173
{
1174
    VideoState *is = opaque;
1175
    VideoPicture *vp;
1176

    
1177
    vp = &is->pictq[is->pictq_windex];
1178

    
1179
    if (vp->bmp)
1180
        SDL_FreeYUVOverlay(vp->bmp);
1181

    
1182
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1183
                                   is->video_st->codec->height,
1184
                                   SDL_YV12_OVERLAY,
1185
                                   screen);
1186
    vp->width = is->video_st->codec->width;
1187
    vp->height = is->video_st->codec->height;
1188

    
1189
    SDL_LockMutex(is->pictq_mutex);
1190
    vp->allocated = 1;
1191
    SDL_CondSignal(is->pictq_cond);
1192
    SDL_UnlockMutex(is->pictq_mutex);
1193
}
1194

    
1195
/**
1196
 *
1197
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1198
 */
1199
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1200
{
1201
    VideoPicture *vp;
1202
    int dst_pix_fmt;
1203

    
1204
    /* wait until we have space to put a new picture */
1205
    SDL_LockMutex(is->pictq_mutex);
1206
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1207
           !is->videoq.abort_request) {
1208
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1209
    }
1210
    SDL_UnlockMutex(is->pictq_mutex);
1211

    
1212
    if (is->videoq.abort_request)
1213
        return -1;
1214

    
1215
    vp = &is->pictq[is->pictq_windex];
1216

    
1217
    /* alloc or resize hardware picture buffer */
1218
    if (!vp->bmp ||
1219
        vp->width != is->video_st->codec->width ||
1220
        vp->height != is->video_st->codec->height) {
1221
        SDL_Event event;
1222

    
1223
        vp->allocated = 0;
1224

    
1225
        /* the allocation must be done in the main thread to avoid
1226
           locking problems */
1227
        event.type = FF_ALLOC_EVENT;
1228
        event.user.data1 = is;
1229
        SDL_PushEvent(&event);
1230

    
1231
        /* wait until the picture is allocated */
1232
        SDL_LockMutex(is->pictq_mutex);
1233
        while (!vp->allocated && !is->videoq.abort_request) {
1234
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1235
        }
1236
        SDL_UnlockMutex(is->pictq_mutex);
1237

    
1238
        if (is->videoq.abort_request)
1239
            return -1;
1240
    }
1241

    
1242
    /* if the frame is not skipped, then display it */
1243
    if (vp->bmp) {
1244
        AVPicture pict;
1245

    
1246
        /* get a pointer on the bitmap */
1247
        SDL_LockYUVOverlay (vp->bmp);
1248

    
1249
        dst_pix_fmt = PIX_FMT_YUV420P;
1250
        memset(&pict,0,sizeof(AVPicture));
1251
        pict.data[0] = vp->bmp->pixels[0];
1252
        pict.data[1] = vp->bmp->pixels[2];
1253
        pict.data[2] = vp->bmp->pixels[1];
1254

    
1255
        pict.linesize[0] = vp->bmp->pitches[0];
1256
        pict.linesize[1] = vp->bmp->pitches[2];
1257
        pict.linesize[2] = vp->bmp->pitches[1];
1258
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1259
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1260
            is->video_st->codec->width, is->video_st->codec->height,
1261
            is->video_st->codec->pix_fmt,
1262
            is->video_st->codec->width, is->video_st->codec->height,
1263
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1264
        if (is->img_convert_ctx == NULL) {
1265
            fprintf(stderr, "Cannot initialize the conversion context\n");
1266
            exit(1);
1267
        }
1268
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1269
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1270
        /* update the bitmap content */
1271
        SDL_UnlockYUVOverlay(vp->bmp);
1272

    
1273
        vp->pts = pts;
1274

    
1275
        /* now we can update the picture count */
1276
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1277
            is->pictq_windex = 0;
1278
        SDL_LockMutex(is->pictq_mutex);
1279
        is->pictq_size++;
1280
        SDL_UnlockMutex(is->pictq_mutex);
1281
    }
1282
    return 0;
1283
}
1284

    
1285
/**
1286
 * compute the exact PTS for the picture if it is omitted in the stream
1287
 * @param pts1 the dts of the pkt / pts of the frame
1288
 */
1289
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1290
{
1291
    double frame_delay, pts;
1292

    
1293
    pts = pts1;
1294

    
1295
    if (pts != 0) {
1296
        /* update video clock with pts, if present */
1297
        is->video_clock = pts;
1298
    } else {
1299
        pts = is->video_clock;
1300
    }
1301
    /* update video clock for next frame */
1302
    frame_delay = av_q2d(is->video_st->codec->time_base);
1303
    /* for MPEG2, the frame can be repeated, so we update the
1304
       clock accordingly */
1305
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1306
    is->video_clock += frame_delay;
1307

    
1308
#if defined(DEBUG_SYNC) && 0
1309
    {
1310
        int ftype;
1311
        if (src_frame->pict_type == FF_B_TYPE)
1312
            ftype = 'B';
1313
        else if (src_frame->pict_type == FF_I_TYPE)
1314
            ftype = 'I';
1315
        else
1316
            ftype = 'P';
1317
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1318
               ftype, pts, pts1);
1319
    }
1320
#endif
1321
    return queue_picture(is, src_frame, pts);
1322
}
1323

    
1324
static int video_thread(void *arg)
1325
{
1326
    VideoState *is = arg;
1327
    AVPacket pkt1, *pkt = &pkt1;
1328
    int len1, got_picture;
1329
    AVFrame *frame= avcodec_alloc_frame();
1330
    double pts;
1331

    
1332
    for(;;) {
1333
        while (is->paused && !is->videoq.abort_request) {
1334
            SDL_Delay(10);
1335
        }
1336
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1337
            break;
1338

    
1339
        if(pkt->data == flush_pkt.data){
1340
            avcodec_flush_buffers(is->video_st->codec);
1341
            is->last_dts_for_fault_detection=
1342
            is->last_pts_for_fault_detection= INT64_MIN;
1343
            continue;
1344
        }
1345

    
1346
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1347
           this packet, if any */
1348
        is->video_st->codec->reordered_opaque= pkt->pts;
1349
        len1 = avcodec_decode_video2(is->video_st->codec,
1350
                                    frame, &got_picture,
1351
                                    pkt);
1352

    
1353
        if(pkt->dts != AV_NOPTS_VALUE){
1354
            is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1355
            is->last_dts_for_fault_detection= pkt->dts;
1356
        }
1357
        if(frame->reordered_opaque != AV_NOPTS_VALUE){
1358
            is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1359
            is->last_pts_for_fault_detection= frame->reordered_opaque;
1360
        }
1361

    
1362
        if(   (   decoder_reorder_pts==1
1363
               || decoder_reorder_pts && is->faulty_pts<is->faulty_dts
1364
               || pkt->dts == AV_NOPTS_VALUE)
1365
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1366
            pts= frame->reordered_opaque;
1367
        else if(pkt->dts != AV_NOPTS_VALUE)
1368
            pts= pkt->dts;
1369
        else
1370
            pts= 0;
1371
        pts *= av_q2d(is->video_st->time_base);
1372

    
1373
//            if (len1 < 0)
1374
//                break;
1375
        if (got_picture) {
1376
            if (output_picture2(is, frame, pts) < 0)
1377
                goto the_end;
1378
        }
1379
        av_free_packet(pkt);
1380
        if (step)
1381
            if (cur_stream)
1382
                stream_pause(cur_stream);
1383
    }
1384
 the_end:
1385
    av_free(frame);
1386
    return 0;
1387
}
1388

    
1389
static int subtitle_thread(void *arg)
1390
{
1391
    VideoState *is = arg;
1392
    SubPicture *sp;
1393
    AVPacket pkt1, *pkt = &pkt1;
1394
    int len1, got_subtitle;
1395
    double pts;
1396
    int i, j;
1397
    int r, g, b, y, u, v, a;
1398

    
1399
    for(;;) {
1400
        while (is->paused && !is->subtitleq.abort_request) {
1401
            SDL_Delay(10);
1402
        }
1403
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1404
            break;
1405

    
1406
        if(pkt->data == flush_pkt.data){
1407
            avcodec_flush_buffers(is->subtitle_st->codec);
1408
            continue;
1409
        }
1410
        SDL_LockMutex(is->subpq_mutex);
1411
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1412
               !is->subtitleq.abort_request) {
1413
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1414
        }
1415
        SDL_UnlockMutex(is->subpq_mutex);
1416

    
1417
        if (is->subtitleq.abort_request)
1418
            goto the_end;
1419

    
1420
        sp = &is->subpq[is->subpq_windex];
1421

    
1422
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1423
           this packet, if any */
1424
        pts = 0;
1425
        if (pkt->pts != AV_NOPTS_VALUE)
1426
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1427

    
1428
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1429
                                    &sp->sub, &got_subtitle,
1430
                                    pkt);
1431
//            if (len1 < 0)
1432
//                break;
1433
        if (got_subtitle && sp->sub.format == 0) {
1434
            sp->pts = pts;
1435

    
1436
            for (i = 0; i < sp->sub.num_rects; i++)
1437
            {
1438
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1439
                {
1440
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1441
                    y = RGB_TO_Y_CCIR(r, g, b);
1442
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1443
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1444
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1445
                }
1446
            }
1447

    
1448
            /* now we can update the picture count */
1449
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1450
                is->subpq_windex = 0;
1451
            SDL_LockMutex(is->subpq_mutex);
1452
            is->subpq_size++;
1453
            SDL_UnlockMutex(is->subpq_mutex);
1454
        }
1455
        av_free_packet(pkt);
1456
//        if (step)
1457
//            if (cur_stream)
1458
//                stream_pause(cur_stream);
1459
    }
1460
 the_end:
1461
    return 0;
1462
}
1463

    
1464
/* copy samples for viewing in editor window */
1465
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1466
{
1467
    int size, len, channels;
1468

    
1469
    channels = is->audio_st->codec->channels;
1470

    
1471
    size = samples_size / sizeof(short);
1472
    while (size > 0) {
1473
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1474
        if (len > size)
1475
            len = size;
1476
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1477
        samples += len;
1478
        is->sample_array_index += len;
1479
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1480
            is->sample_array_index = 0;
1481
        size -= len;
1482
    }
1483
}
1484

    
1485
/* return the new audio buffer size (samples can be added or deleted
1486
   to get better sync if video or external master clock) */
1487
static int synchronize_audio(VideoState *is, short *samples,
1488
                             int samples_size1, double pts)
1489
{
1490
    int n, samples_size;
1491
    double ref_clock;
1492

    
1493
    n = 2 * is->audio_st->codec->channels;
1494
    samples_size = samples_size1;
1495

    
1496
    /* if not master, then we try to remove or add samples to correct the clock */
1497
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1498
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1499
        double diff, avg_diff;
1500
        int wanted_size, min_size, max_size, nb_samples;
1501

    
1502
        ref_clock = get_master_clock(is);
1503
        diff = get_audio_clock(is) - ref_clock;
1504

    
1505
        if (diff < AV_NOSYNC_THRESHOLD) {
1506
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1507
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1508
                /* not enough measures to have a correct estimate */
1509
                is->audio_diff_avg_count++;
1510
            } else {
1511
                /* estimate the A-V difference */
1512
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1513

    
1514
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1515
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1516
                    nb_samples = samples_size / n;
1517

    
1518
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1519
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1520
                    if (wanted_size < min_size)
1521
                        wanted_size = min_size;
1522
                    else if (wanted_size > max_size)
1523
                        wanted_size = max_size;
1524

    
1525
                    /* add or remove samples to correction the synchro */
1526
                    if (wanted_size < samples_size) {
1527
                        /* remove samples */
1528
                        samples_size = wanted_size;
1529
                    } else if (wanted_size > samples_size) {
1530
                        uint8_t *samples_end, *q;
1531
                        int nb;
1532

    
1533
                        /* add samples */
1534
                        nb = (samples_size - wanted_size);
1535
                        samples_end = (uint8_t *)samples + samples_size - n;
1536
                        q = samples_end + n;
1537
                        while (nb > 0) {
1538
                            memcpy(q, samples_end, n);
1539
                            q += n;
1540
                            nb -= n;
1541
                        }
1542
                        samples_size = wanted_size;
1543
                    }
1544
                }
1545
#if 0
1546
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1547
                       diff, avg_diff, samples_size - samples_size1,
1548
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1549
#endif
1550
            }
1551
        } else {
1552
            /* too big difference : may be initial PTS errors, so
1553
               reset A-V filter */
1554
            is->audio_diff_avg_count = 0;
1555
            is->audio_diff_cum = 0;
1556
        }
1557
    }
1558

    
1559
    return samples_size;
1560
}
1561

    
1562
/* decode one audio frame and returns its uncompressed size */
1563
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1564
{
1565
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1566
    AVPacket *pkt = &is->audio_pkt;
1567
    AVCodecContext *dec= is->audio_st->codec;
1568
    int n, len1, data_size;
1569
    double pts;
1570

    
1571
    for(;;) {
1572
        /* NOTE: the audio packet can contain several frames */
1573
        while (pkt_temp->size > 0) {
1574
            data_size = sizeof(is->audio_buf1);
1575
            len1 = avcodec_decode_audio3(dec,
1576
                                        (int16_t *)is->audio_buf1, &data_size,
1577
                                        pkt_temp);
1578
            if (len1 < 0) {
1579
                /* if error, we skip the frame */
1580
                pkt_temp->size = 0;
1581
                break;
1582
            }
1583

    
1584
            pkt_temp->data += len1;
1585
            pkt_temp->size -= len1;
1586
            if (data_size <= 0)
1587
                continue;
1588

    
1589
            if (dec->sample_fmt != is->audio_src_fmt) {
1590
                if (is->reformat_ctx)
1591
                    av_audio_convert_free(is->reformat_ctx);
1592
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1593
                                                         dec->sample_fmt, 1, NULL, 0);
1594
                if (!is->reformat_ctx) {
1595
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1596
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1597
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1598
                        break;
1599
                }
1600
                is->audio_src_fmt= dec->sample_fmt;
1601
            }
1602

    
1603
            if (is->reformat_ctx) {
1604
                const void *ibuf[6]= {is->audio_buf1};
1605
                void *obuf[6]= {is->audio_buf2};
1606
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1607
                int ostride[6]= {2};
1608
                int len= data_size/istride[0];
1609
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1610
                    printf("av_audio_convert() failed\n");
1611
                    break;
1612
                }
1613
                is->audio_buf= is->audio_buf2;
1614
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1615
                          remove this legacy cruft */
1616
                data_size= len*2;
1617
            }else{
1618
                is->audio_buf= is->audio_buf1;
1619
            }
1620

    
1621
            /* if no pts, then compute it */
1622
            pts = is->audio_clock;
1623
            *pts_ptr = pts;
1624
            n = 2 * dec->channels;
1625
            is->audio_clock += (double)data_size /
1626
                (double)(n * dec->sample_rate);
1627
#if defined(DEBUG_SYNC)
1628
            {
1629
                static double last_clock;
1630
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1631
                       is->audio_clock - last_clock,
1632
                       is->audio_clock, pts);
1633
                last_clock = is->audio_clock;
1634
            }
1635
#endif
1636
            return data_size;
1637
        }
1638

    
1639
        /* free the current packet */
1640
        if (pkt->data)
1641
            av_free_packet(pkt);
1642

    
1643
        if (is->paused || is->audioq.abort_request) {
1644
            return -1;
1645
        }
1646

    
1647
        /* read next packet */
1648
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1649
            return -1;
1650
        if(pkt->data == flush_pkt.data){
1651
            avcodec_flush_buffers(dec);
1652
            continue;
1653
        }
1654

    
1655
        pkt_temp->data = pkt->data;
1656
        pkt_temp->size = pkt->size;
1657

    
1658
        /* if update the audio clock with the pts */
1659
        if (pkt->pts != AV_NOPTS_VALUE) {
1660
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1661
        }
1662
    }
1663
}
1664

    
1665
/* get the current audio output buffer size, in samples. With SDL, we
1666
   cannot have a precise information */
1667
static int audio_write_get_buf_size(VideoState *is)
1668
{
1669
    return is->audio_buf_size - is->audio_buf_index;
1670
}
1671

    
1672

    
1673
/* prepare a new audio buffer */
1674
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1675
{
1676
    VideoState *is = opaque;
1677
    int audio_size, len1;
1678
    double pts;
1679

    
1680
    audio_callback_time = av_gettime();
1681

    
1682
    while (len > 0) {
1683
        if (is->audio_buf_index >= is->audio_buf_size) {
1684
           audio_size = audio_decode_frame(is, &pts);
1685
           if (audio_size < 0) {
1686
                /* if error, just output silence */
1687
               is->audio_buf = is->audio_buf1;
1688
               is->audio_buf_size = 1024;
1689
               memset(is->audio_buf, 0, is->audio_buf_size);
1690
           } else {
1691
               if (is->show_audio)
1692
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1693
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1694
                                              pts);
1695
               is->audio_buf_size = audio_size;
1696
           }
1697
           is->audio_buf_index = 0;
1698
        }
1699
        len1 = is->audio_buf_size - is->audio_buf_index;
1700
        if (len1 > len)
1701
            len1 = len;
1702
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1703
        len -= len1;
1704
        stream += len1;
1705
        is->audio_buf_index += len1;
1706
    }
1707
}
1708

    
1709
/* open a given stream. Return 0 if OK */
1710
static int stream_component_open(VideoState *is, int stream_index)
1711
{
1712
    AVFormatContext *ic = is->ic;
1713
    AVCodecContext *enc;
1714
    AVCodec *codec;
1715
    SDL_AudioSpec wanted_spec, spec;
1716

    
1717
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1718
        return -1;
1719
    enc = ic->streams[stream_index]->codec;
1720

    
1721
    /* prepare audio output */
1722
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1723
        if (enc->channels > 0) {
1724
            enc->request_channels = FFMIN(2, enc->channels);
1725
        } else {
1726
            enc->request_channels = 2;
1727
        }
1728
    }
1729

    
1730
    codec = avcodec_find_decoder(enc->codec_id);
1731
    enc->debug_mv = debug_mv;
1732
    enc->debug = debug;
1733
    enc->workaround_bugs = workaround_bugs;
1734
    enc->lowres = lowres;
1735
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1736
    enc->idct_algo= idct;
1737
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1738
    enc->skip_frame= skip_frame;
1739
    enc->skip_idct= skip_idct;
1740
    enc->skip_loop_filter= skip_loop_filter;
1741
    enc->error_recognition= error_recognition;
1742
    enc->error_concealment= error_concealment;
1743
    avcodec_thread_init(enc, thread_count);
1744

    
1745
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1746

    
1747
    if (!codec ||
1748
        avcodec_open(enc, codec) < 0)
1749
        return -1;
1750

    
1751
    /* prepare audio output */
1752
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1753
        wanted_spec.freq = enc->sample_rate;
1754
        wanted_spec.format = AUDIO_S16SYS;
1755
        wanted_spec.channels = enc->channels;
1756
        wanted_spec.silence = 0;
1757
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1758
        wanted_spec.callback = sdl_audio_callback;
1759
        wanted_spec.userdata = is;
1760
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1761
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1762
            return -1;
1763
        }
1764
        is->audio_hw_buf_size = spec.size;
1765
        is->audio_src_fmt= SAMPLE_FMT_S16;
1766
    }
1767

    
1768
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1769
    switch(enc->codec_type) {
1770
    case CODEC_TYPE_AUDIO:
1771
        is->audio_stream = stream_index;
1772
        is->audio_st = ic->streams[stream_index];
1773
        is->audio_buf_size = 0;
1774
        is->audio_buf_index = 0;
1775

    
1776
        /* init averaging filter */
1777
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1778
        is->audio_diff_avg_count = 0;
1779
        /* since we do not have a precise anough audio fifo fullness,
1780
           we correct audio sync only if larger than this threshold */
1781
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1782

    
1783
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1784
        packet_queue_init(&is->audioq);
1785
        SDL_PauseAudio(0);
1786
        break;
1787
    case CODEC_TYPE_VIDEO:
1788
        is->video_stream = stream_index;
1789
        is->video_st = ic->streams[stream_index];
1790

    
1791
        is->frame_last_delay = 40e-3;
1792
        is->frame_timer = (double)av_gettime() / 1000000.0;
1793
        is->video_current_pts_time = av_gettime();
1794

    
1795
        packet_queue_init(&is->videoq);
1796
        is->video_tid = SDL_CreateThread(video_thread, is);
1797
        break;
1798
    case CODEC_TYPE_SUBTITLE:
1799
        is->subtitle_stream = stream_index;
1800
        is->subtitle_st = ic->streams[stream_index];
1801
        packet_queue_init(&is->subtitleq);
1802

    
1803
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1804
        break;
1805
    default:
1806
        break;
1807
    }
1808
    return 0;
1809
}
1810

    
1811
static void stream_component_close(VideoState *is, int stream_index)
1812
{
1813
    AVFormatContext *ic = is->ic;
1814
    AVCodecContext *enc;
1815

    
1816
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1817
        return;
1818
    enc = ic->streams[stream_index]->codec;
1819

    
1820
    switch(enc->codec_type) {
1821
    case CODEC_TYPE_AUDIO:
1822
        packet_queue_abort(&is->audioq);
1823

    
1824
        SDL_CloseAudio();
1825

    
1826
        packet_queue_end(&is->audioq);
1827
        if (is->reformat_ctx)
1828
            av_audio_convert_free(is->reformat_ctx);
1829
        break;
1830
    case CODEC_TYPE_VIDEO:
1831
        packet_queue_abort(&is->videoq);
1832

    
1833
        /* note: we also signal this mutex to make sure we deblock the
1834
           video thread in all cases */
1835
        SDL_LockMutex(is->pictq_mutex);
1836
        SDL_CondSignal(is->pictq_cond);
1837
        SDL_UnlockMutex(is->pictq_mutex);
1838

    
1839
        SDL_WaitThread(is->video_tid, NULL);
1840

    
1841
        packet_queue_end(&is->videoq);
1842
        break;
1843
    case CODEC_TYPE_SUBTITLE:
1844
        packet_queue_abort(&is->subtitleq);
1845

    
1846
        /* note: we also signal this mutex to make sure we deblock the
1847
           video thread in all cases */
1848
        SDL_LockMutex(is->subpq_mutex);
1849
        is->subtitle_stream_changed = 1;
1850

    
1851
        SDL_CondSignal(is->subpq_cond);
1852
        SDL_UnlockMutex(is->subpq_mutex);
1853

    
1854
        SDL_WaitThread(is->subtitle_tid, NULL);
1855

    
1856
        packet_queue_end(&is->subtitleq);
1857
        break;
1858
    default:
1859
        break;
1860
    }
1861

    
1862
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1863
    avcodec_close(enc);
1864
    switch(enc->codec_type) {
1865
    case CODEC_TYPE_AUDIO:
1866
        is->audio_st = NULL;
1867
        is->audio_stream = -1;
1868
        break;
1869
    case CODEC_TYPE_VIDEO:
1870
        is->video_st = NULL;
1871
        is->video_stream = -1;
1872
        break;
1873
    case CODEC_TYPE_SUBTITLE:
1874
        is->subtitle_st = NULL;
1875
        is->subtitle_stream = -1;
1876
        break;
1877
    default:
1878
        break;
1879
    }
1880
}
1881

    
1882
/* since we have only one decoding thread, we can use a global
1883
   variable instead of a thread local variable */
1884
static VideoState *global_video_state;
1885

    
1886
static int decode_interrupt_cb(void)
1887
{
1888
    return (global_video_state && global_video_state->abort_request);
1889
}
1890

    
1891
/* this thread gets the stream from the disk or the network */
1892
static int decode_thread(void *arg)
1893
{
1894
    VideoState *is = arg;
1895
    AVFormatContext *ic;
1896
    int err, i, ret, video_index, audio_index, subtitle_index;
1897
    AVPacket pkt1, *pkt = &pkt1;
1898
    AVFormatParameters params, *ap = &params;
1899
    int eof=0;
1900

    
1901
    ic = avformat_alloc_context();
1902

    
1903
    video_index = -1;
1904
    audio_index = -1;
1905
    subtitle_index = -1;
1906
    is->video_stream = -1;
1907
    is->audio_stream = -1;
1908
    is->subtitle_stream = -1;
1909

    
1910
    global_video_state = is;
1911
    url_set_interrupt_cb(decode_interrupt_cb);
1912

    
1913
    memset(ap, 0, sizeof(*ap));
1914

    
1915
    ap->prealloced_context = 1;
1916
    ap->width = frame_width;
1917
    ap->height= frame_height;
1918
    ap->time_base= (AVRational){1, 25};
1919
    ap->pix_fmt = frame_pix_fmt;
1920

    
1921
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
1922

    
1923
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1924
    if (err < 0) {
1925
        print_error(is->filename, err);
1926
        ret = -1;
1927
        goto fail;
1928
    }
1929
    is->ic = ic;
1930

    
1931
    if(genpts)
1932
        ic->flags |= AVFMT_FLAG_GENPTS;
1933

    
1934
    err = av_find_stream_info(ic);
1935
    if (err < 0) {
1936
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1937
        ret = -1;
1938
        goto fail;
1939
    }
1940
    if(ic->pb)
1941
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1942

    
1943
    /* if seeking requested, we execute it */
1944
    if (start_time != AV_NOPTS_VALUE) {
1945
        int64_t timestamp;
1946

    
1947
        timestamp = start_time;
1948
        /* add the stream start time */
1949
        if (ic->start_time != AV_NOPTS_VALUE)
1950
            timestamp += ic->start_time;
1951
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1952
        if (ret < 0) {
1953
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1954
                    is->filename, (double)timestamp / AV_TIME_BASE);
1955
        }
1956
    }
1957

    
1958
    for(i = 0; i < ic->nb_streams; i++) {
1959
        AVCodecContext *enc = ic->streams[i]->codec;
1960
        ic->streams[i]->discard = AVDISCARD_ALL;
1961
        switch(enc->codec_type) {
1962
        case CODEC_TYPE_AUDIO:
1963
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1964
                audio_index = i;
1965
            break;
1966
        case CODEC_TYPE_VIDEO:
1967
            if (wanted_video_stream-- >= 0 && !video_disable)
1968
                video_index = i;
1969
            break;
1970
        case CODEC_TYPE_SUBTITLE:
1971
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1972
                subtitle_index = i;
1973
            break;
1974
        default:
1975
            break;
1976
        }
1977
    }
1978
    if (show_status) {
1979
        dump_format(ic, 0, is->filename, 0);
1980
    }
1981

    
1982
    /* open the streams */
1983
    if (audio_index >= 0) {
1984
        stream_component_open(is, audio_index);
1985
    }
1986

    
1987
    if (video_index >= 0) {
1988
        stream_component_open(is, video_index);
1989
    } else {
1990
        if (!display_disable)
1991
            is->show_audio = 1;
1992
    }
1993

    
1994
    if (subtitle_index >= 0) {
1995
        stream_component_open(is, subtitle_index);
1996
    }
1997

    
1998
    if (is->video_stream < 0 && is->audio_stream < 0) {
1999
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2000
        ret = -1;
2001
        goto fail;
2002
    }
2003

    
2004
    for(;;) {
2005
        if (is->abort_request)
2006
            break;
2007
        if (is->paused != is->last_paused) {
2008
            is->last_paused = is->paused;
2009
            if (is->paused)
2010
                av_read_pause(ic);
2011
            else
2012
                av_read_play(ic);
2013
        }
2014
#if CONFIG_RTSP_DEMUXER
2015
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2016
            /* wait 10 ms to avoid trying to get another packet */
2017
            /* XXX: horrible */
2018
            SDL_Delay(10);
2019
            continue;
2020
        }
2021
#endif
2022
        if (is->seek_req) {
2023
            int64_t seek_target= is->seek_pos;
2024
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2025
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2026
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2027
//      of the seek_pos/seek_rel variables
2028

    
2029
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2030
            if (ret < 0) {
2031
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2032
            }else{
2033
                if (is->audio_stream >= 0) {
2034
                    packet_queue_flush(&is->audioq);
2035
                    packet_queue_put(&is->audioq, &flush_pkt);
2036
                }
2037
                if (is->subtitle_stream >= 0) {
2038
                    packet_queue_flush(&is->subtitleq);
2039
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2040
                }
2041
                if (is->video_stream >= 0) {
2042
                    packet_queue_flush(&is->videoq);
2043
                    packet_queue_put(&is->videoq, &flush_pkt);
2044
                }
2045
            }
2046
            is->seek_req = 0;
2047
            eof= 0;
2048
        }
2049

    
2050
        /* if the queue are full, no need to read more */
2051
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2052
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2053
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2054
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2055
            /* wait 10 ms */
2056
            SDL_Delay(10);
2057
            continue;
2058
        }
2059
        if(url_feof(ic->pb) || eof) {
2060
            if(is->video_stream >= 0){
2061
                av_init_packet(pkt);
2062
                pkt->data=NULL;
2063
                pkt->size=0;
2064
                pkt->stream_index= is->video_stream;
2065
                packet_queue_put(&is->videoq, pkt);
2066
            }
2067
            SDL_Delay(10);
2068
            continue;
2069
        }
2070
        ret = av_read_frame(ic, pkt);
2071
        if (ret < 0) {
2072
            if (ret == AVERROR_EOF)
2073
                eof=1;
2074
            if (url_ferror(ic->pb))
2075
                break;
2076
            SDL_Delay(100); /* wait for user event */
2077
            continue;
2078
        }
2079
        if (pkt->stream_index == is->audio_stream) {
2080
            packet_queue_put(&is->audioq, pkt);
2081
        } else if (pkt->stream_index == is->video_stream) {
2082
            packet_queue_put(&is->videoq, pkt);
2083
        } else if (pkt->stream_index == is->subtitle_stream) {
2084
            packet_queue_put(&is->subtitleq, pkt);
2085
        } else {
2086
            av_free_packet(pkt);
2087
        }
2088
    }
2089
    /* wait until the end */
2090
    while (!is->abort_request) {
2091
        SDL_Delay(100);
2092
    }
2093

    
2094
    ret = 0;
2095
 fail:
2096
    /* disable interrupting */
2097
    global_video_state = NULL;
2098

    
2099
    /* close each stream */
2100
    if (is->audio_stream >= 0)
2101
        stream_component_close(is, is->audio_stream);
2102
    if (is->video_stream >= 0)
2103
        stream_component_close(is, is->video_stream);
2104
    if (is->subtitle_stream >= 0)
2105
        stream_component_close(is, is->subtitle_stream);
2106
    if (is->ic) {
2107
        av_close_input_file(is->ic);
2108
        is->ic = NULL; /* safety */
2109
    }
2110
    url_set_interrupt_cb(NULL);
2111

    
2112
    if (ret != 0) {
2113
        SDL_Event event;
2114

    
2115
        event.type = FF_QUIT_EVENT;
2116
        event.user.data1 = is;
2117
        SDL_PushEvent(&event);
2118
    }
2119
    return 0;
2120
}
2121

    
2122
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2123
{
2124
    VideoState *is;
2125

    
2126
    is = av_mallocz(sizeof(VideoState));
2127
    if (!is)
2128
        return NULL;
2129
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2130
    is->iformat = iformat;
2131
    is->ytop = 0;
2132
    is->xleft = 0;
2133

    
2134
    /* start video display */
2135
    is->pictq_mutex = SDL_CreateMutex();
2136
    is->pictq_cond = SDL_CreateCond();
2137

    
2138
    is->subpq_mutex = SDL_CreateMutex();
2139
    is->subpq_cond = SDL_CreateCond();
2140

    
2141
    /* add the refresh timer to draw the picture */
2142
    schedule_refresh(is, 40);
2143

    
2144
    is->av_sync_type = av_sync_type;
2145
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2146
    if (!is->parse_tid) {
2147
        av_free(is);
2148
        return NULL;
2149
    }
2150
    return is;
2151
}
2152

    
2153
static void stream_close(VideoState *is)
2154
{
2155
    VideoPicture *vp;
2156
    int i;
2157
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2158
    is->abort_request = 1;
2159
    SDL_WaitThread(is->parse_tid, NULL);
2160

    
2161
    /* free all pictures */
2162
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2163
        vp = &is->pictq[i];
2164
        if (vp->bmp) {
2165
            SDL_FreeYUVOverlay(vp->bmp);
2166
            vp->bmp = NULL;
2167
        }
2168
    }
2169
    SDL_DestroyMutex(is->pictq_mutex);
2170
    SDL_DestroyCond(is->pictq_cond);
2171
    SDL_DestroyMutex(is->subpq_mutex);
2172
    SDL_DestroyCond(is->subpq_cond);
2173
    if (is->img_convert_ctx)
2174
        sws_freeContext(is->img_convert_ctx);
2175
    av_free(is);
2176
}
2177

    
2178
static void stream_cycle_channel(VideoState *is, int codec_type)
2179
{
2180
    AVFormatContext *ic = is->ic;
2181
    int start_index, stream_index;
2182
    AVStream *st;
2183

    
2184
    if (codec_type == CODEC_TYPE_VIDEO)
2185
        start_index = is->video_stream;
2186
    else if (codec_type == CODEC_TYPE_AUDIO)
2187
        start_index = is->audio_stream;
2188
    else
2189
        start_index = is->subtitle_stream;
2190
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2191
        return;
2192
    stream_index = start_index;
2193
    for(;;) {
2194
        if (++stream_index >= is->ic->nb_streams)
2195
        {
2196
            if (codec_type == CODEC_TYPE_SUBTITLE)
2197
            {
2198
                stream_index = -1;
2199
                goto the_end;
2200
            } else
2201
                stream_index = 0;
2202
        }
2203
        if (stream_index == start_index)
2204
            return;
2205
        st = ic->streams[stream_index];
2206
        if (st->codec->codec_type == codec_type) {
2207
            /* check that parameters are OK */
2208
            switch(codec_type) {
2209
            case CODEC_TYPE_AUDIO:
2210
                if (st->codec->sample_rate != 0 &&
2211
                    st->codec->channels != 0)
2212
                    goto the_end;
2213
                break;
2214
            case CODEC_TYPE_VIDEO:
2215
            case CODEC_TYPE_SUBTITLE:
2216
                goto the_end;
2217
            default:
2218
                break;
2219
            }
2220
        }
2221
    }
2222
 the_end:
2223
    stream_component_close(is, start_index);
2224
    stream_component_open(is, stream_index);
2225
}
2226

    
2227

    
2228
static void toggle_full_screen(void)
2229
{
2230
    is_full_screen = !is_full_screen;
2231
    if (!fs_screen_width) {
2232
        /* use default SDL method */
2233
//        SDL_WM_ToggleFullScreen(screen);
2234
    }
2235
    video_open(cur_stream);
2236
}
2237

    
2238
static void toggle_pause(void)
2239
{
2240
    if (cur_stream)
2241
        stream_pause(cur_stream);
2242
    step = 0;
2243
}
2244

    
2245
static void step_to_next_frame(void)
2246
{
2247
    if (cur_stream) {
2248
        /* if the stream is paused unpause it, then step */
2249
        if (cur_stream->paused)
2250
            stream_pause(cur_stream);
2251
    }
2252
    step = 1;
2253
}
2254

    
2255
static void do_exit(void)
2256
{
2257
    int i;
2258
    if (cur_stream) {
2259
        stream_close(cur_stream);
2260
        cur_stream = NULL;
2261
    }
2262
    for (i = 0; i < CODEC_TYPE_NB; i++)
2263
        av_free(avcodec_opts[i]);
2264
    av_free(avformat_opts);
2265
    av_free(sws_opts);
2266
    if (show_status)
2267
        printf("\n");
2268
    SDL_Quit();
2269
    exit(0);
2270
}
2271

    
2272
static void toggle_audio_display(void)
2273
{
2274
    if (cur_stream) {
2275
        cur_stream->show_audio = !cur_stream->show_audio;
2276
    }
2277
}
2278

    
2279
/* handle an event sent by the GUI */
2280
static void event_loop(void)
2281
{
2282
    SDL_Event event;
2283
    double incr, pos, frac;
2284

    
2285
    for(;;) {
2286
        SDL_WaitEvent(&event);
2287
        switch(event.type) {
2288
        case SDL_KEYDOWN:
2289
            switch(event.key.keysym.sym) {
2290
            case SDLK_ESCAPE:
2291
            case SDLK_q:
2292
                do_exit();
2293
                break;
2294
            case SDLK_f:
2295
                toggle_full_screen();
2296
                break;
2297
            case SDLK_p:
2298
            case SDLK_SPACE:
2299
                toggle_pause();
2300
                break;
2301
            case SDLK_s: //S: Step to next frame
2302
                step_to_next_frame();
2303
                break;
2304
            case SDLK_a:
2305
                if (cur_stream)
2306
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2307
                break;
2308
            case SDLK_v:
2309
                if (cur_stream)
2310
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2311
                break;
2312
            case SDLK_t:
2313
                if (cur_stream)
2314
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2315
                break;
2316
            case SDLK_w:
2317
                toggle_audio_display();
2318
                break;
2319
            case SDLK_LEFT:
2320
                incr = -10.0;
2321
                goto do_seek;
2322
            case SDLK_RIGHT:
2323
                incr = 10.0;
2324
                goto do_seek;
2325
            case SDLK_UP:
2326
                incr = 60.0;
2327
                goto do_seek;
2328
            case SDLK_DOWN:
2329
                incr = -60.0;
2330
            do_seek:
2331
                if (cur_stream) {
2332
                    if (seek_by_bytes) {
2333
                        pos = url_ftell(cur_stream->ic->pb);
2334
                        if (cur_stream->ic->bit_rate)
2335
                            incr *= cur_stream->ic->bit_rate / 60.0;
2336
                        else
2337
                            incr *= 180000.0;
2338
                        pos += incr;
2339
                        stream_seek(cur_stream, pos, incr, 1);
2340
                    } else {
2341
                        pos = get_master_clock(cur_stream);
2342
                        pos += incr;
2343
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2344
                    }
2345
                }
2346
                break;
2347
            default:
2348
                break;
2349
            }
2350
            break;
2351
        case SDL_MOUSEBUTTONDOWN:
2352
            if (cur_stream) {
2353
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2354
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2355
                    stream_seek(cur_stream, size*(double)event.button.x/(double)cur_stream->width, 0, 1);
2356
                }else{
2357
                    int64_t ts;
2358
                    int ns, hh, mm, ss;
2359
                    int tns, thh, tmm, tss;
2360
                    tns = cur_stream->ic->duration/1000000LL;
2361
                    thh = tns/3600;
2362
                    tmm = (tns%3600)/60;
2363
                    tss = (tns%60);
2364
                    frac = (double)event.button.x/(double)cur_stream->width;
2365
                    ns = frac*tns;
2366
                    hh = ns/3600;
2367
                    mm = (ns%3600)/60;
2368
                    ss = (ns%60);
2369
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2370
                            hh, mm, ss, thh, tmm, tss);
2371
                    ts = frac*cur_stream->ic->duration;
2372
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2373
                        ts += cur_stream->ic->start_time;
2374
                    stream_seek(cur_stream, ts, 0, 0);
2375
                }
2376
            }
2377
            break;
2378
        case SDL_VIDEORESIZE:
2379
            if (cur_stream) {
2380
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2381
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2382
                screen_width = cur_stream->width = event.resize.w;
2383
                screen_height= cur_stream->height= event.resize.h;
2384
            }
2385
            break;
2386
        case SDL_QUIT:
2387
        case FF_QUIT_EVENT:
2388
            do_exit();
2389
            break;
2390
        case FF_ALLOC_EVENT:
2391
            video_open(event.user.data1);
2392
            alloc_picture(event.user.data1);
2393
            break;
2394
        case FF_REFRESH_EVENT:
2395
            video_refresh_timer(event.user.data1);
2396
            break;
2397
        default:
2398
            break;
2399
        }
2400
    }
2401
}
2402

    
2403
static void opt_frame_size(const char *arg)
2404
{
2405
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2406
        fprintf(stderr, "Incorrect frame size\n");
2407
        exit(1);
2408
    }
2409
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2410
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2411
        exit(1);
2412
    }
2413
}
2414

    
2415
static int opt_width(const char *opt, const char *arg)
2416
{
2417
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2418
    return 0;
2419
}
2420

    
2421
static int opt_height(const char *opt, const char *arg)
2422
{
2423
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2424
    return 0;
2425
}
2426

    
2427
static void opt_format(const char *arg)
2428
{
2429
    file_iformat = av_find_input_format(arg);
2430
    if (!file_iformat) {
2431
        fprintf(stderr, "Unknown input format: %s\n", arg);
2432
        exit(1);
2433
    }
2434
}
2435

    
2436
static void opt_frame_pix_fmt(const char *arg)
2437
{
2438
    frame_pix_fmt = av_get_pix_fmt(arg);
2439
}
2440

    
2441
static int opt_sync(const char *opt, const char *arg)
2442
{
2443
    if (!strcmp(arg, "audio"))
2444
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2445
    else if (!strcmp(arg, "video"))
2446
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2447
    else if (!strcmp(arg, "ext"))
2448
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2449
    else {
2450
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2451
        exit(1);
2452
    }
2453
    return 0;
2454
}
2455

    
2456
static int opt_seek(const char *opt, const char *arg)
2457
{
2458
    start_time = parse_time_or_die(opt, arg, 1);
2459
    return 0;
2460
}
2461

    
2462
static int opt_debug(const char *opt, const char *arg)
2463
{
2464
    av_log_set_level(99);
2465
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2466
    return 0;
2467
}
2468

    
2469
static int opt_vismv(const char *opt, const char *arg)
2470
{
2471
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2472
    return 0;
2473
}
2474

    
2475
static int opt_thread_count(const char *opt, const char *arg)
2476
{
2477
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2478
#if !HAVE_THREADS
2479
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2480
#endif
2481
    return 0;
2482
}
2483

    
2484
static const OptionDef options[] = {
2485
#include "cmdutils_common_opts.h"
2486
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2487
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2488
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2489
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2490
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2491
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2492
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2493
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2494
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2495
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2496
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2497
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2498
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2499
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2500
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2501
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2502
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2503
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2504
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2505
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2506
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2507
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2508
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2509
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2510
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2511
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2512
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2513
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2514
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2515
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2516
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2517
    { NULL, },
2518
};
2519

    
2520
static void show_usage(void)
2521
{
2522
    printf("Simple media player\n");
2523
    printf("usage: ffplay [options] input_file\n");
2524
    printf("\n");
2525
}
2526

    
2527
static void show_help(void)
2528
{
2529
    show_usage();
2530
    show_help_options(options, "Main options:\n",
2531
                      OPT_EXPERT, 0);
2532
    show_help_options(options, "\nAdvanced options:\n",
2533
                      OPT_EXPERT, OPT_EXPERT);
2534
    printf("\nWhile playing:\n"
2535
           "q, ESC              quit\n"
2536
           "f                   toggle full screen\n"
2537
           "p, SPC              pause\n"
2538
           "a                   cycle audio channel\n"
2539
           "v                   cycle video channel\n"
2540
           "t                   cycle subtitle channel\n"
2541
           "w                   show audio waves\n"
2542
           "left/right          seek backward/forward 10 seconds\n"
2543
           "down/up             seek backward/forward 1 minute\n"
2544
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2545
           );
2546
}
2547

    
2548
static void opt_input_file(const char *filename)
2549
{
2550
    if (!strcmp(filename, "-"))
2551
        filename = "pipe:";
2552
    input_filename = filename;
2553
}
2554

    
2555
/* Called from the main */
2556
int main(int argc, char **argv)
2557
{
2558
    int flags, i;
2559

    
2560
    /* register all codecs, demux and protocols */
2561
    avcodec_register_all();
2562
    avdevice_register_all();
2563
    av_register_all();
2564

    
2565
    for(i=0; i<CODEC_TYPE_NB; i++){
2566
        avcodec_opts[i]= avcodec_alloc_context2(i);
2567
    }
2568
    avformat_opts = avformat_alloc_context();
2569
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2570

    
2571
    show_banner();
2572

    
2573
    parse_options(argc, argv, options, opt_input_file);
2574

    
2575
    if (!input_filename) {
2576
        show_usage();
2577
        fprintf(stderr, "An input file must be specified\n");
2578
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2579
        exit(1);
2580
    }
2581

    
2582
    if (display_disable) {
2583
        video_disable = 1;
2584
    }
2585
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2586
#if !defined(__MINGW32__) && !defined(__APPLE__)
2587
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2588
#endif
2589
    if (SDL_Init (flags)) {
2590
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2591
        exit(1);
2592
    }
2593

    
2594
    if (!display_disable) {
2595
#if HAVE_SDL_VIDEO_SIZE
2596
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2597
        fs_screen_width = vi->current_w;
2598
        fs_screen_height = vi->current_h;
2599
#endif
2600
    }
2601

    
2602
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2603
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2604
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2605
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2606

    
2607
    av_init_packet(&flush_pkt);
2608
    flush_pkt.data= "FLUSH";
2609

    
2610
    cur_stream = stream_open(input_filename, file_iformat);
2611

    
2612
    event_loop();
2613

    
2614
    /* never returns */
2615

    
2616
    return 0;
2617
}