Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 79ee4683

History | View | Annotate | Download (78.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33

    
34
#include "cmdutils.h"
35

    
36
#include <SDL.h>
37
#include <SDL_thread.h>
38

    
39
#ifdef __MINGW32__
40
#undef main /* We don't want SDL to override our main() */
41
#endif
42

    
43
#undef exit
44
#undef printf
45
#undef fprintf
46

    
47
const char program_name[] = "FFplay";
48
const int program_birth_year = 2003;
49

    
50
//#define DEBUG_SYNC
51

    
52
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
53
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
54
#define MIN_FRAMES 5
55

    
56
/* SDL audio buffer size, in samples. Should be small to have precise
57
   A/V sync as SDL does not have hardware buffer fullness info. */
58
#define SDL_AUDIO_BUFFER_SIZE 1024
59

    
60
/* no AV sync correction is done if below the AV sync threshold */
61
#define AV_SYNC_THRESHOLD 0.01
62
/* no AV correction is done if too big error */
63
#define AV_NOSYNC_THRESHOLD 10.0
64

    
65
/* maximum audio speed change to get correct sync */
66
#define SAMPLE_CORRECTION_PERCENT_MAX 10
67

    
68
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
69
#define AUDIO_DIFF_AVG_NB   20
70

    
71
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
72
#define SAMPLE_ARRAY_SIZE (2*65536)
73

    
74
static int sws_flags = SWS_BICUBIC;
75

    
76
typedef struct PacketQueue {
77
    AVPacketList *first_pkt, *last_pkt;
78
    int nb_packets;
79
    int size;
80
    int abort_request;
81
    SDL_mutex *mutex;
82
    SDL_cond *cond;
83
} PacketQueue;
84

    
85
#define VIDEO_PICTURE_QUEUE_SIZE 1
86
#define SUBPICTURE_QUEUE_SIZE 4
87

    
88
typedef struct VideoPicture {
89
    double pts;                                  ///<presentation time stamp for this picture
90
    SDL_Overlay *bmp;
91
    int width, height; /* source height & width */
92
    int allocated;
93
} VideoPicture;
94

    
95
typedef struct SubPicture {
96
    double pts; /* presentation time stamp for this picture */
97
    AVSubtitle sub;
98
} SubPicture;
99

    
100
enum {
101
    AV_SYNC_AUDIO_MASTER, /* default choice */
102
    AV_SYNC_VIDEO_MASTER,
103
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
104
};
105

    
106
typedef struct VideoState {
107
    SDL_Thread *parse_tid;
108
    SDL_Thread *video_tid;
109
    AVInputFormat *iformat;
110
    int no_background;
111
    int abort_request;
112
    int paused;
113
    int last_paused;
114
    int seek_req;
115
    int seek_flags;
116
    int64_t seek_pos;
117
    int64_t seek_rel;
118
    AVFormatContext *ic;
119
    int dtg_active_format;
120

    
121
    int audio_stream;
122

    
123
    int av_sync_type;
124
    double external_clock; /* external clock base */
125
    int64_t external_clock_time;
126

    
127
    double audio_clock;
128
    double audio_diff_cum; /* used for AV difference average computation */
129
    double audio_diff_avg_coef;
130
    double audio_diff_threshold;
131
    int audio_diff_avg_count;
132
    AVStream *audio_st;
133
    PacketQueue audioq;
134
    int audio_hw_buf_size;
135
    /* samples output by the codec. we reserve more space for avsync
136
       compensation */
137
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
138
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
139
    uint8_t *audio_buf;
140
    unsigned int audio_buf_size; /* in bytes */
141
    int audio_buf_index; /* in bytes */
142
    AVPacket audio_pkt_temp;
143
    AVPacket audio_pkt;
144
    enum SampleFormat audio_src_fmt;
145
    AVAudioConvert *reformat_ctx;
146

    
147
    int show_audio; /* if true, display audio samples */
148
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
149
    int sample_array_index;
150
    int last_i_start;
151

    
152
    SDL_Thread *subtitle_tid;
153
    int subtitle_stream;
154
    int subtitle_stream_changed;
155
    AVStream *subtitle_st;
156
    PacketQueue subtitleq;
157
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
158
    int subpq_size, subpq_rindex, subpq_windex;
159
    SDL_mutex *subpq_mutex;
160
    SDL_cond *subpq_cond;
161

    
162
    double frame_timer;
163
    double frame_last_pts;
164
    double frame_last_delay;
165
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
166
    int video_stream;
167
    AVStream *video_st;
168
    PacketQueue videoq;
169
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
170
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
171
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
172
    int pictq_size, pictq_rindex, pictq_windex;
173
    SDL_mutex *pictq_mutex;
174
    SDL_cond *pictq_cond;
175
    struct SwsContext *img_convert_ctx;
176

    
177
    //    QETimer *video_timer;
178
    char filename[1024];
179
    int width, height, xleft, ytop;
180
} VideoState;
181

    
182
static void show_help(void);
183
static int audio_write_get_buf_size(VideoState *is);
184

    
185
/* options specified by the user */
186
static AVInputFormat *file_iformat;
187
static const char *input_filename;
188
static int fs_screen_width;
189
static int fs_screen_height;
190
static int screen_width = 0;
191
static int screen_height = 0;
192
static int frame_width = 0;
193
static int frame_height = 0;
194
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
195
static int audio_disable;
196
static int video_disable;
197
static int wanted_audio_stream= 0;
198
static int wanted_video_stream= 0;
199
static int wanted_subtitle_stream= -1;
200
static int seek_by_bytes;
201
static int display_disable;
202
static int show_status = 1;
203
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
204
static int64_t start_time = AV_NOPTS_VALUE;
205
static int debug = 0;
206
static int debug_mv = 0;
207
static int step = 0;
208
static int thread_count = 1;
209
static int workaround_bugs = 1;
210
static int fast = 0;
211
static int genpts = 0;
212
static int lowres = 0;
213
static int idct = FF_IDCT_AUTO;
214
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
215
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
216
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
217
static int error_recognition = FF_ER_CAREFUL;
218
static int error_concealment = 3;
219
static int decoder_reorder_pts= 0;
220

    
221
/* current context */
222
static int is_full_screen;
223
static VideoState *cur_stream;
224
static int64_t audio_callback_time;
225

    
226
static AVPacket flush_pkt;
227

    
228
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
229
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
230
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
231

    
232
static SDL_Surface *screen;
233

    
234
/* packet queue handling */
235
static void packet_queue_init(PacketQueue *q)
236
{
237
    memset(q, 0, sizeof(PacketQueue));
238
    q->mutex = SDL_CreateMutex();
239
    q->cond = SDL_CreateCond();
240
}
241

    
242
static void packet_queue_flush(PacketQueue *q)
243
{
244
    AVPacketList *pkt, *pkt1;
245

    
246
    SDL_LockMutex(q->mutex);
247
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
248
        pkt1 = pkt->next;
249
        av_free_packet(&pkt->pkt);
250
        av_freep(&pkt);
251
    }
252
    q->last_pkt = NULL;
253
    q->first_pkt = NULL;
254
    q->nb_packets = 0;
255
    q->size = 0;
256
    SDL_UnlockMutex(q->mutex);
257
}
258

    
259
static void packet_queue_end(PacketQueue *q)
260
{
261
    packet_queue_flush(q);
262
    SDL_DestroyMutex(q->mutex);
263
    SDL_DestroyCond(q->cond);
264
}
265

    
266
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
267
{
268
    AVPacketList *pkt1;
269

    
270
    /* duplicate the packet */
271
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
272
        return -1;
273

    
274
    pkt1 = av_malloc(sizeof(AVPacketList));
275
    if (!pkt1)
276
        return -1;
277
    pkt1->pkt = *pkt;
278
    pkt1->next = NULL;
279

    
280

    
281
    SDL_LockMutex(q->mutex);
282

    
283
    if (!q->last_pkt)
284

    
285
        q->first_pkt = pkt1;
286
    else
287
        q->last_pkt->next = pkt1;
288
    q->last_pkt = pkt1;
289
    q->nb_packets++;
290
    q->size += pkt1->pkt.size + sizeof(*pkt1);
291
    /* XXX: should duplicate packet data in DV case */
292
    SDL_CondSignal(q->cond);
293

    
294
    SDL_UnlockMutex(q->mutex);
295
    return 0;
296
}
297

    
298
static void packet_queue_abort(PacketQueue *q)
299
{
300
    SDL_LockMutex(q->mutex);
301

    
302
    q->abort_request = 1;
303

    
304
    SDL_CondSignal(q->cond);
305

    
306
    SDL_UnlockMutex(q->mutex);
307
}
308

    
309
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
310
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
311
{
312
    AVPacketList *pkt1;
313
    int ret;
314

    
315
    SDL_LockMutex(q->mutex);
316

    
317
    for(;;) {
318
        if (q->abort_request) {
319
            ret = -1;
320
            break;
321
        }
322

    
323
        pkt1 = q->first_pkt;
324
        if (pkt1) {
325
            q->first_pkt = pkt1->next;
326
            if (!q->first_pkt)
327
                q->last_pkt = NULL;
328
            q->nb_packets--;
329
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
330
            *pkt = pkt1->pkt;
331
            av_free(pkt1);
332
            ret = 1;
333
            break;
334
        } else if (!block) {
335
            ret = 0;
336
            break;
337
        } else {
338
            SDL_CondWait(q->cond, q->mutex);
339
        }
340
    }
341
    SDL_UnlockMutex(q->mutex);
342
    return ret;
343
}
344

    
345
static inline void fill_rectangle(SDL_Surface *screen,
346
                                  int x, int y, int w, int h, int color)
347
{
348
    SDL_Rect rect;
349
    rect.x = x;
350
    rect.y = y;
351
    rect.w = w;
352
    rect.h = h;
353
    SDL_FillRect(screen, &rect, color);
354
}
355

    
356
#if 0
357
/* draw only the border of a rectangle */
358
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
359
{
360
    int w1, w2, h1, h2;
361

362
    /* fill the background */
363
    w1 = x;
364
    if (w1 < 0)
365
        w1 = 0;
366
    w2 = s->width - (x + w);
367
    if (w2 < 0)
368
        w2 = 0;
369
    h1 = y;
370
    if (h1 < 0)
371
        h1 = 0;
372
    h2 = s->height - (y + h);
373
    if (h2 < 0)
374
        h2 = 0;
375
    fill_rectangle(screen,
376
                   s->xleft, s->ytop,
377
                   w1, s->height,
378
                   color);
379
    fill_rectangle(screen,
380
                   s->xleft + s->width - w2, s->ytop,
381
                   w2, s->height,
382
                   color);
383
    fill_rectangle(screen,
384
                   s->xleft + w1, s->ytop,
385
                   s->width - w1 - w2, h1,
386
                   color);
387
    fill_rectangle(screen,
388
                   s->xleft + w1, s->ytop + s->height - h2,
389
                   s->width - w1 - w2, h2,
390
                   color);
391
}
392
#endif
393

    
394
#define ALPHA_BLEND(a, oldp, newp, s)\
395
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
396

    
397
#define RGBA_IN(r, g, b, a, s)\
398
{\
399
    unsigned int v = ((const uint32_t *)(s))[0];\
400
    a = (v >> 24) & 0xff;\
401
    r = (v >> 16) & 0xff;\
402
    g = (v >> 8) & 0xff;\
403
    b = v & 0xff;\
404
}
405

    
406
#define YUVA_IN(y, u, v, a, s, pal)\
407
{\
408
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
409
    a = (val >> 24) & 0xff;\
410
    y = (val >> 16) & 0xff;\
411
    u = (val >> 8) & 0xff;\
412
    v = val & 0xff;\
413
}
414

    
415
#define YUVA_OUT(d, y, u, v, a)\
416
{\
417
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
418
}
419

    
420

    
421
#define BPP 1
422

    
423
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
424
{
425
    int wrap, wrap3, width2, skip2;
426
    int y, u, v, a, u1, v1, a1, w, h;
427
    uint8_t *lum, *cb, *cr;
428
    const uint8_t *p;
429
    const uint32_t *pal;
430
    int dstx, dsty, dstw, dsth;
431

    
432
    dstw = av_clip(rect->w, 0, imgw);
433
    dsth = av_clip(rect->h, 0, imgh);
434
    dstx = av_clip(rect->x, 0, imgw - dstw);
435
    dsty = av_clip(rect->y, 0, imgh - dsth);
436
    lum = dst->data[0] + dsty * dst->linesize[0];
437
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
438
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
439

    
440
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
441
    skip2 = dstx >> 1;
442
    wrap = dst->linesize[0];
443
    wrap3 = rect->pict.linesize[0];
444
    p = rect->pict.data[0];
445
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
446

    
447
    if (dsty & 1) {
448
        lum += dstx;
449
        cb += skip2;
450
        cr += skip2;
451

    
452
        if (dstx & 1) {
453
            YUVA_IN(y, u, v, a, p, pal);
454
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
455
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
456
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
457
            cb++;
458
            cr++;
459
            lum++;
460
            p += BPP;
461
        }
462
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
463
            YUVA_IN(y, u, v, a, p, pal);
464
            u1 = u;
465
            v1 = v;
466
            a1 = a;
467
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468

    
469
            YUVA_IN(y, u, v, a, p + BPP, pal);
470
            u1 += u;
471
            v1 += v;
472
            a1 += a;
473
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
474
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
475
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
476
            cb++;
477
            cr++;
478
            p += 2 * BPP;
479
            lum += 2;
480
        }
481
        if (w) {
482
            YUVA_IN(y, u, v, a, p, pal);
483
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
484
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
485
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
486
            p++;
487
            lum++;
488
        }
489
        p += wrap3 - dstw * BPP;
490
        lum += wrap - dstw - dstx;
491
        cb += dst->linesize[1] - width2 - skip2;
492
        cr += dst->linesize[2] - width2 - skip2;
493
    }
494
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
495
        lum += dstx;
496
        cb += skip2;
497
        cr += skip2;
498

    
499
        if (dstx & 1) {
500
            YUVA_IN(y, u, v, a, p, pal);
501
            u1 = u;
502
            v1 = v;
503
            a1 = a;
504
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
505
            p += wrap3;
506
            lum += wrap;
507
            YUVA_IN(y, u, v, a, p, pal);
508
            u1 += u;
509
            v1 += v;
510
            a1 += a;
511
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
513
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
514
            cb++;
515
            cr++;
516
            p += -wrap3 + BPP;
517
            lum += -wrap + 1;
518
        }
519
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520
            YUVA_IN(y, u, v, a, p, pal);
521
            u1 = u;
522
            v1 = v;
523
            a1 = a;
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525

    
526
            YUVA_IN(y, u, v, a, p + BPP, pal);
527
            u1 += u;
528
            v1 += v;
529
            a1 += a;
530
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531
            p += wrap3;
532
            lum += wrap;
533

    
534
            YUVA_IN(y, u, v, a, p, pal);
535
            u1 += u;
536
            v1 += v;
537
            a1 += a;
538
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539

    
540
            YUVA_IN(y, u, v, a, p + BPP, pal);
541
            u1 += u;
542
            v1 += v;
543
            a1 += a;
544
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545

    
546
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
547
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
548

    
549
            cb++;
550
            cr++;
551
            p += -wrap3 + 2 * BPP;
552
            lum += -wrap + 2;
553
        }
554
        if (w) {
555
            YUVA_IN(y, u, v, a, p, pal);
556
            u1 = u;
557
            v1 = v;
558
            a1 = a;
559
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
            p += wrap3;
561
            lum += wrap;
562
            YUVA_IN(y, u, v, a, p, pal);
563
            u1 += u;
564
            v1 += v;
565
            a1 += a;
566
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569
            cb++;
570
            cr++;
571
            p += -wrap3 + BPP;
572
            lum += -wrap + 1;
573
        }
574
        p += wrap3 + (wrap3 - dstw * BPP);
575
        lum += wrap + (wrap - dstw - dstx);
576
        cb += dst->linesize[1] - width2 - skip2;
577
        cr += dst->linesize[2] - width2 - skip2;
578
    }
579
    /* handle odd height */
580
    if (h) {
581
        lum += dstx;
582
        cb += skip2;
583
        cr += skip2;
584

    
585
        if (dstx & 1) {
586
            YUVA_IN(y, u, v, a, p, pal);
587
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
588
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
589
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
590
            cb++;
591
            cr++;
592
            lum++;
593
            p += BPP;
594
        }
595
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            u1 = u;
598
            v1 = v;
599
            a1 = a;
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601

    
602
            YUVA_IN(y, u, v, a, p + BPP, pal);
603
            u1 += u;
604
            v1 += v;
605
            a1 += a;
606
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
607
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
608
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
609
            cb++;
610
            cr++;
611
            p += 2 * BPP;
612
            lum += 2;
613
        }
614
        if (w) {
615
            YUVA_IN(y, u, v, a, p, pal);
616
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
618
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
619
        }
620
    }
621
}
622

    
623
static void free_subpicture(SubPicture *sp)
624
{
625
    int i;
626

    
627
    for (i = 0; i < sp->sub.num_rects; i++)
628
    {
629
        av_freep(&sp->sub.rects[i]->pict.data[0]);
630
        av_freep(&sp->sub.rects[i]->pict.data[1]);
631
        av_freep(&sp->sub.rects[i]);
632
    }
633

    
634
    av_free(sp->sub.rects);
635

    
636
    memset(&sp->sub, 0, sizeof(AVSubtitle));
637
}
638

    
639
static void video_image_display(VideoState *is)
640
{
641
    VideoPicture *vp;
642
    SubPicture *sp;
643
    AVPicture pict;
644
    float aspect_ratio;
645
    int width, height, x, y;
646
    SDL_Rect rect;
647
    int i;
648

    
649
    vp = &is->pictq[is->pictq_rindex];
650
    if (vp->bmp) {
651
        /* XXX: use variable in the frame */
652
        if (is->video_st->sample_aspect_ratio.num)
653
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
654
        else if (is->video_st->codec->sample_aspect_ratio.num)
655
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
656
        else
657
            aspect_ratio = 0;
658
        if (aspect_ratio <= 0.0)
659
            aspect_ratio = 1.0;
660
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
661
        /* if an active format is indicated, then it overrides the
662
           mpeg format */
663
#if 0
664
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
665
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
666
            printf("dtg_active_format=%d\n", is->dtg_active_format);
667
        }
668
#endif
669
#if 0
670
        switch(is->video_st->codec->dtg_active_format) {
671
        case FF_DTG_AFD_SAME:
672
        default:
673
            /* nothing to do */
674
            break;
675
        case FF_DTG_AFD_4_3:
676
            aspect_ratio = 4.0 / 3.0;
677
            break;
678
        case FF_DTG_AFD_16_9:
679
            aspect_ratio = 16.0 / 9.0;
680
            break;
681
        case FF_DTG_AFD_14_9:
682
            aspect_ratio = 14.0 / 9.0;
683
            break;
684
        case FF_DTG_AFD_4_3_SP_14_9:
685
            aspect_ratio = 14.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_16_9_SP_14_9:
688
            aspect_ratio = 14.0 / 9.0;
689
            break;
690
        case FF_DTG_AFD_SP_4_3:
691
            aspect_ratio = 4.0 / 3.0;
692
            break;
693
        }
694
#endif
695

    
696
        if (is->subtitle_st)
697
        {
698
            if (is->subpq_size > 0)
699
            {
700
                sp = &is->subpq[is->subpq_rindex];
701

    
702
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
703
                {
704
                    SDL_LockYUVOverlay (vp->bmp);
705

    
706
                    pict.data[0] = vp->bmp->pixels[0];
707
                    pict.data[1] = vp->bmp->pixels[2];
708
                    pict.data[2] = vp->bmp->pixels[1];
709

    
710
                    pict.linesize[0] = vp->bmp->pitches[0];
711
                    pict.linesize[1] = vp->bmp->pitches[2];
712
                    pict.linesize[2] = vp->bmp->pitches[1];
713

    
714
                    for (i = 0; i < sp->sub.num_rects; i++)
715
                        blend_subrect(&pict, sp->sub.rects[i],
716
                                      vp->bmp->w, vp->bmp->h);
717

    
718
                    SDL_UnlockYUVOverlay (vp->bmp);
719
                }
720
            }
721
        }
722

    
723

    
724
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
725
        height = is->height;
726
        width = ((int)rint(height * aspect_ratio)) & ~1;
727
        if (width > is->width) {
728
            width = is->width;
729
            height = ((int)rint(width / aspect_ratio)) & ~1;
730
        }
731
        x = (is->width - width) / 2;
732
        y = (is->height - height) / 2;
733
        if (!is->no_background) {
734
            /* fill the background */
735
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
736
        } else {
737
            is->no_background = 0;
738
        }
739
        rect.x = is->xleft + x;
740
        rect.y = is->ytop  + y;
741
        rect.w = width;
742
        rect.h = height;
743
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
744
    } else {
745
#if 0
746
        fill_rectangle(screen,
747
                       is->xleft, is->ytop, is->width, is->height,
748
                       QERGB(0x00, 0x00, 0x00));
749
#endif
750
    }
751
}
752

    
753
static inline int compute_mod(int a, int b)
754
{
755
    a = a % b;
756
    if (a >= 0)
757
        return a;
758
    else
759
        return a + b;
760
}
761

    
762
static void video_audio_display(VideoState *s)
763
{
764
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
765
    int ch, channels, h, h2, bgcolor, fgcolor;
766
    int16_t time_diff;
767

    
768
    /* compute display index : center on currently output samples */
769
    channels = s->audio_st->codec->channels;
770
    nb_display_channels = channels;
771
    if (!s->paused) {
772
        n = 2 * channels;
773
        delay = audio_write_get_buf_size(s);
774
        delay /= n;
775

    
776
        /* to be more precise, we take into account the time spent since
777
           the last buffer computation */
778
        if (audio_callback_time) {
779
            time_diff = av_gettime() - audio_callback_time;
780
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
781
        }
782

    
783
        delay -= s->width / 2;
784
        if (delay < s->width)
785
            delay = s->width;
786

    
787
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
788

    
789
        h= INT_MIN;
790
        for(i=0; i<1000; i+=channels){
791
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
792
            int a= s->sample_array[idx];
793
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
794
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
795
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
796
            int score= a-d;
797
            if(h<score && (b^c)<0){
798
                h= score;
799
                i_start= idx;
800
            }
801
        }
802

    
803
        s->last_i_start = i_start;
804
    } else {
805
        i_start = s->last_i_start;
806
    }
807

    
808
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
809
    fill_rectangle(screen,
810
                   s->xleft, s->ytop, s->width, s->height,
811
                   bgcolor);
812

    
813
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
814

    
815
    /* total height for one channel */
816
    h = s->height / nb_display_channels;
817
    /* graph height / 2 */
818
    h2 = (h * 9) / 20;
819
    for(ch = 0;ch < nb_display_channels; ch++) {
820
        i = i_start + ch;
821
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
822
        for(x = 0; x < s->width; x++) {
823
            y = (s->sample_array[i] * h2) >> 15;
824
            if (y < 0) {
825
                y = -y;
826
                ys = y1 - y;
827
            } else {
828
                ys = y1;
829
            }
830
            fill_rectangle(screen,
831
                           s->xleft + x, ys, 1, y,
832
                           fgcolor);
833
            i += channels;
834
            if (i >= SAMPLE_ARRAY_SIZE)
835
                i -= SAMPLE_ARRAY_SIZE;
836
        }
837
    }
838

    
839
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
840

    
841
    for(ch = 1;ch < nb_display_channels; ch++) {
842
        y = s->ytop + ch * h;
843
        fill_rectangle(screen,
844
                       s->xleft, y, s->width, 1,
845
                       fgcolor);
846
    }
847
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
848
}
849

    
850
static int video_open(VideoState *is){
851
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
852
    int w,h;
853

    
854
    if(is_full_screen) flags |= SDL_FULLSCREEN;
855
    else               flags |= SDL_RESIZABLE;
856

    
857
    if (is_full_screen && fs_screen_width) {
858
        w = fs_screen_width;
859
        h = fs_screen_height;
860
    } else if(!is_full_screen && screen_width){
861
        w = screen_width;
862
        h = screen_height;
863
    }else if (is->video_st && is->video_st->codec->width){
864
        w = is->video_st->codec->width;
865
        h = is->video_st->codec->height;
866
    } else {
867
        w = 640;
868
        h = 480;
869
    }
870
#ifndef __APPLE__
871
    screen = SDL_SetVideoMode(w, h, 0, flags);
872
#else
873
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
874
    screen = SDL_SetVideoMode(w, h, 24, flags);
875
#endif
876
    if (!screen) {
877
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
878
        return -1;
879
    }
880
    SDL_WM_SetCaption("FFplay", "FFplay");
881

    
882
    is->width = screen->w;
883
    is->height = screen->h;
884

    
885
    return 0;
886
}
887

    
888
/* display the current picture, if any */
889
static void video_display(VideoState *is)
890
{
891
    if(!screen)
892
        video_open(cur_stream);
893
    if (is->audio_st && is->show_audio)
894
        video_audio_display(is);
895
    else if (is->video_st)
896
        video_image_display(is);
897
}
898

    
899
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
900
{
901
    SDL_Event event;
902
    event.type = FF_REFRESH_EVENT;
903
    event.user.data1 = opaque;
904
    SDL_PushEvent(&event);
905
    return 0; /* 0 means stop timer */
906
}
907

    
908
/* schedule a video refresh in 'delay' ms */
909
static void schedule_refresh(VideoState *is, int delay)
910
{
911
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
912
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
913
}
914

    
915
/* get the current audio clock value */
916
static double get_audio_clock(VideoState *is)
917
{
918
    double pts;
919
    int hw_buf_size, bytes_per_sec;
920
    pts = is->audio_clock;
921
    hw_buf_size = audio_write_get_buf_size(is);
922
    bytes_per_sec = 0;
923
    if (is->audio_st) {
924
        bytes_per_sec = is->audio_st->codec->sample_rate *
925
            2 * is->audio_st->codec->channels;
926
    }
927
    if (bytes_per_sec)
928
        pts -= (double)hw_buf_size / bytes_per_sec;
929
    return pts;
930
}
931

    
932
/* get the current video clock value */
933
static double get_video_clock(VideoState *is)
934
{
935
    double delta;
936
    if (is->paused) {
937
        delta = 0;
938
    } else {
939
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
940
    }
941
    return is->video_current_pts + delta;
942
}
943

    
944
/* get the current external clock value */
945
static double get_external_clock(VideoState *is)
946
{
947
    int64_t ti;
948
    ti = av_gettime();
949
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
950
}
951

    
952
/* get the current master clock value */
953
static double get_master_clock(VideoState *is)
954
{
955
    double val;
956

    
957
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
958
        if (is->video_st)
959
            val = get_video_clock(is);
960
        else
961
            val = get_audio_clock(is);
962
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
963
        if (is->audio_st)
964
            val = get_audio_clock(is);
965
        else
966
            val = get_video_clock(is);
967
    } else {
968
        val = get_external_clock(is);
969
    }
970
    return val;
971
}
972

    
973
/* seek in the stream */
974
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
975
{
976
    if (!is->seek_req) {
977
        is->seek_pos = pos;
978
        is->seek_rel = rel;
979
        if (seek_by_bytes)
980
            is->seek_flags |= AVSEEK_FLAG_BYTE;
981
        is->seek_req = 1;
982
    }
983
}
984

    
985
/* pause or resume the video */
986
static void stream_pause(VideoState *is)
987
{
988
    is->paused = !is->paused;
989
    if (!is->paused) {
990
        is->video_current_pts = get_video_clock(is);
991
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
992
    }
993
}
994

    
995
static double compute_frame_delay(double frame_current_pts, VideoState *is)
996
{
997
    double actual_delay, delay, sync_threshold, ref_clock, diff;
998

    
999
    /* compute nominal delay */
1000
    delay = frame_current_pts - is->frame_last_pts;
1001
    if (delay <= 0 || delay >= 10.0) {
1002
        /* if incorrect delay, use previous one */
1003
        delay = is->frame_last_delay;
1004
    } else {
1005
        is->frame_last_delay = delay;
1006
    }
1007
    is->frame_last_pts = frame_current_pts;
1008

    
1009
    /* update delay to follow master synchronisation source */
1010
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1011
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1012
        /* if video is slave, we try to correct big delays by
1013
           duplicating or deleting a frame */
1014
        ref_clock = get_master_clock(is);
1015
        diff = frame_current_pts - ref_clock;
1016

    
1017
        /* skip or repeat frame. We take into account the
1018
           delay to compute the threshold. I still don't know
1019
           if it is the best guess */
1020
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1021
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1022
            if (diff <= -sync_threshold)
1023
                delay = 0;
1024
            else if (diff >= sync_threshold)
1025
                delay = 2 * delay;
1026
        }
1027
    }
1028

    
1029
    is->frame_timer += delay;
1030
    /* compute the REAL delay (we need to do that to avoid
1031
       long term errors */
1032
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1033
    if (actual_delay < 0.010) {
1034
        /* XXX: should skip picture */
1035
        actual_delay = 0.010;
1036
    }
1037

    
1038
#if defined(DEBUG_SYNC)
1039
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1040
            delay, actual_delay, frame_current_pts, -diff);
1041
#endif
1042

    
1043
    return actual_delay;
1044
}
1045

    
1046
/* called to display each frame */
1047
static void video_refresh_timer(void *opaque)
1048
{
1049
    VideoState *is = opaque;
1050
    VideoPicture *vp;
1051

    
1052
    SubPicture *sp, *sp2;
1053

    
1054
    if (is->video_st) {
1055
        if (is->pictq_size == 0) {
1056
            /* if no picture, need to wait */
1057
            schedule_refresh(is, 1);
1058
        } else {
1059
            /* dequeue the picture */
1060
            vp = &is->pictq[is->pictq_rindex];
1061

    
1062
            /* update current video pts */
1063
            is->video_current_pts = vp->pts;
1064
            is->video_current_pts_time = av_gettime();
1065

    
1066
            /* launch timer for next picture */
1067
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1068

    
1069
            if(is->subtitle_st) {
1070
                if (is->subtitle_stream_changed) {
1071
                    SDL_LockMutex(is->subpq_mutex);
1072

    
1073
                    while (is->subpq_size) {
1074
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1075

    
1076
                        /* update queue size and signal for next picture */
1077
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1078
                            is->subpq_rindex = 0;
1079

    
1080
                        is->subpq_size--;
1081
                    }
1082
                    is->subtitle_stream_changed = 0;
1083

    
1084
                    SDL_CondSignal(is->subpq_cond);
1085
                    SDL_UnlockMutex(is->subpq_mutex);
1086
                } else {
1087
                    if (is->subpq_size > 0) {
1088
                        sp = &is->subpq[is->subpq_rindex];
1089

    
1090
                        if (is->subpq_size > 1)
1091
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1092
                        else
1093
                            sp2 = NULL;
1094

    
1095
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1096
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1097
                        {
1098
                            free_subpicture(sp);
1099

    
1100
                            /* update queue size and signal for next picture */
1101
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1102
                                is->subpq_rindex = 0;
1103

    
1104
                            SDL_LockMutex(is->subpq_mutex);
1105
                            is->subpq_size--;
1106
                            SDL_CondSignal(is->subpq_cond);
1107
                            SDL_UnlockMutex(is->subpq_mutex);
1108
                        }
1109
                    }
1110
                }
1111
            }
1112

    
1113
            /* display picture */
1114
            video_display(is);
1115

    
1116
            /* update queue size and signal for next picture */
1117
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1118
                is->pictq_rindex = 0;
1119

    
1120
            SDL_LockMutex(is->pictq_mutex);
1121
            is->pictq_size--;
1122
            SDL_CondSignal(is->pictq_cond);
1123
            SDL_UnlockMutex(is->pictq_mutex);
1124
        }
1125
    } else if (is->audio_st) {
1126
        /* draw the next audio frame */
1127

    
1128
        schedule_refresh(is, 40);
1129

    
1130
        /* if only audio stream, then display the audio bars (better
1131
           than nothing, just to test the implementation */
1132

    
1133
        /* display picture */
1134
        video_display(is);
1135
    } else {
1136
        schedule_refresh(is, 100);
1137
    }
1138
    if (show_status) {
1139
        static int64_t last_time;
1140
        int64_t cur_time;
1141
        int aqsize, vqsize, sqsize;
1142
        double av_diff;
1143

    
1144
        cur_time = av_gettime();
1145
        if (!last_time || (cur_time - last_time) >= 30000) {
1146
            aqsize = 0;
1147
            vqsize = 0;
1148
            sqsize = 0;
1149
            if (is->audio_st)
1150
                aqsize = is->audioq.size;
1151
            if (is->video_st)
1152
                vqsize = is->videoq.size;
1153
            if (is->subtitle_st)
1154
                sqsize = is->subtitleq.size;
1155
            av_diff = 0;
1156
            if (is->audio_st && is->video_st)
1157
                av_diff = get_audio_clock(is) - get_video_clock(is);
1158
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1159
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1160
            fflush(stdout);
1161
            last_time = cur_time;
1162
        }
1163
    }
1164
}
1165

    
1166
/* allocate a picture (needs to do that in main thread to avoid
1167
   potential locking problems */
1168
static void alloc_picture(void *opaque)
1169
{
1170
    VideoState *is = opaque;
1171
    VideoPicture *vp;
1172

    
1173
    vp = &is->pictq[is->pictq_windex];
1174

    
1175
    if (vp->bmp)
1176
        SDL_FreeYUVOverlay(vp->bmp);
1177

    
1178
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1179
                                   is->video_st->codec->height,
1180
                                   SDL_YV12_OVERLAY,
1181
                                   screen);
1182
    vp->width = is->video_st->codec->width;
1183
    vp->height = is->video_st->codec->height;
1184

    
1185
    SDL_LockMutex(is->pictq_mutex);
1186
    vp->allocated = 1;
1187
    SDL_CondSignal(is->pictq_cond);
1188
    SDL_UnlockMutex(is->pictq_mutex);
1189
}
1190

    
1191
/**
1192
 *
1193
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1194
 */
1195
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1196
{
1197
    VideoPicture *vp;
1198
    int dst_pix_fmt;
1199

    
1200
    /* wait until we have space to put a new picture */
1201
    SDL_LockMutex(is->pictq_mutex);
1202
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1203
           !is->videoq.abort_request) {
1204
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1205
    }
1206
    SDL_UnlockMutex(is->pictq_mutex);
1207

    
1208
    if (is->videoq.abort_request)
1209
        return -1;
1210

    
1211
    vp = &is->pictq[is->pictq_windex];
1212

    
1213
    /* alloc or resize hardware picture buffer */
1214
    if (!vp->bmp ||
1215
        vp->width != is->video_st->codec->width ||
1216
        vp->height != is->video_st->codec->height) {
1217
        SDL_Event event;
1218

    
1219
        vp->allocated = 0;
1220

    
1221
        /* the allocation must be done in the main thread to avoid
1222
           locking problems */
1223
        event.type = FF_ALLOC_EVENT;
1224
        event.user.data1 = is;
1225
        SDL_PushEvent(&event);
1226

    
1227
        /* wait until the picture is allocated */
1228
        SDL_LockMutex(is->pictq_mutex);
1229
        while (!vp->allocated && !is->videoq.abort_request) {
1230
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1231
        }
1232
        SDL_UnlockMutex(is->pictq_mutex);
1233

    
1234
        if (is->videoq.abort_request)
1235
            return -1;
1236
    }
1237

    
1238
    /* if the frame is not skipped, then display it */
1239
    if (vp->bmp) {
1240
        AVPicture pict;
1241

    
1242
        /* get a pointer on the bitmap */
1243
        SDL_LockYUVOverlay (vp->bmp);
1244

    
1245
        dst_pix_fmt = PIX_FMT_YUV420P;
1246
        memset(&pict,0,sizeof(AVPicture));
1247
        pict.data[0] = vp->bmp->pixels[0];
1248
        pict.data[1] = vp->bmp->pixels[2];
1249
        pict.data[2] = vp->bmp->pixels[1];
1250

    
1251
        pict.linesize[0] = vp->bmp->pitches[0];
1252
        pict.linesize[1] = vp->bmp->pitches[2];
1253
        pict.linesize[2] = vp->bmp->pitches[1];
1254
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1255
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1256
            is->video_st->codec->width, is->video_st->codec->height,
1257
            is->video_st->codec->pix_fmt,
1258
            is->video_st->codec->width, is->video_st->codec->height,
1259
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1260
        if (is->img_convert_ctx == NULL) {
1261
            fprintf(stderr, "Cannot initialize the conversion context\n");
1262
            exit(1);
1263
        }
1264
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1265
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1266
        /* update the bitmap content */
1267
        SDL_UnlockYUVOverlay(vp->bmp);
1268

    
1269
        vp->pts = pts;
1270

    
1271
        /* now we can update the picture count */
1272
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1273
            is->pictq_windex = 0;
1274
        SDL_LockMutex(is->pictq_mutex);
1275
        is->pictq_size++;
1276
        SDL_UnlockMutex(is->pictq_mutex);
1277
    }
1278
    return 0;
1279
}
1280

    
1281
/**
1282
 * compute the exact PTS for the picture if it is omitted in the stream
1283
 * @param pts1 the dts of the pkt / pts of the frame
1284
 */
1285
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1286
{
1287
    double frame_delay, pts;
1288

    
1289
    pts = pts1;
1290

    
1291
    if (pts != 0) {
1292
        /* update video clock with pts, if present */
1293
        is->video_clock = pts;
1294
    } else {
1295
        pts = is->video_clock;
1296
    }
1297
    /* update video clock for next frame */
1298
    frame_delay = av_q2d(is->video_st->codec->time_base);
1299
    /* for MPEG2, the frame can be repeated, so we update the
1300
       clock accordingly */
1301
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1302
    is->video_clock += frame_delay;
1303

    
1304
#if defined(DEBUG_SYNC) && 0
1305
    {
1306
        int ftype;
1307
        if (src_frame->pict_type == FF_B_TYPE)
1308
            ftype = 'B';
1309
        else if (src_frame->pict_type == FF_I_TYPE)
1310
            ftype = 'I';
1311
        else
1312
            ftype = 'P';
1313
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314
               ftype, pts, pts1);
1315
    }
1316
#endif
1317
    return queue_picture(is, src_frame, pts);
1318
}
1319

    
1320
static int video_thread(void *arg)
1321
{
1322
    VideoState *is = arg;
1323
    AVPacket pkt1, *pkt = &pkt1;
1324
    int len1, got_picture;
1325
    AVFrame *frame= avcodec_alloc_frame();
1326
    double pts;
1327

    
1328
    for(;;) {
1329
        while (is->paused && !is->videoq.abort_request) {
1330
            SDL_Delay(10);
1331
        }
1332
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1333
            break;
1334

    
1335
        if(pkt->data == flush_pkt.data){
1336
            avcodec_flush_buffers(is->video_st->codec);
1337
            continue;
1338
        }
1339

    
1340
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1341
           this packet, if any */
1342
        is->video_st->codec->reordered_opaque= pkt->pts;
1343
        len1 = avcodec_decode_video2(is->video_st->codec,
1344
                                    frame, &got_picture,
1345
                                    pkt);
1346

    
1347
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1348
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1349
            pts= frame->reordered_opaque;
1350
        else if(pkt->dts != AV_NOPTS_VALUE)
1351
            pts= pkt->dts;
1352
        else
1353
            pts= 0;
1354
        pts *= av_q2d(is->video_st->time_base);
1355

    
1356
//            if (len1 < 0)
1357
//                break;
1358
        if (got_picture) {
1359
            if (output_picture2(is, frame, pts) < 0)
1360
                goto the_end;
1361
        }
1362
        av_free_packet(pkt);
1363
        if (step)
1364
            if (cur_stream)
1365
                stream_pause(cur_stream);
1366
    }
1367
 the_end:
1368
    av_free(frame);
1369
    return 0;
1370
}
1371

    
1372
static int subtitle_thread(void *arg)
1373
{
1374
    VideoState *is = arg;
1375
    SubPicture *sp;
1376
    AVPacket pkt1, *pkt = &pkt1;
1377
    int len1, got_subtitle;
1378
    double pts;
1379
    int i, j;
1380
    int r, g, b, y, u, v, a;
1381

    
1382
    for(;;) {
1383
        while (is->paused && !is->subtitleq.abort_request) {
1384
            SDL_Delay(10);
1385
        }
1386
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1387
            break;
1388

    
1389
        if(pkt->data == flush_pkt.data){
1390
            avcodec_flush_buffers(is->subtitle_st->codec);
1391
            continue;
1392
        }
1393
        SDL_LockMutex(is->subpq_mutex);
1394
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1395
               !is->subtitleq.abort_request) {
1396
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1397
        }
1398
        SDL_UnlockMutex(is->subpq_mutex);
1399

    
1400
        if (is->subtitleq.abort_request)
1401
            goto the_end;
1402

    
1403
        sp = &is->subpq[is->subpq_windex];
1404

    
1405
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1406
           this packet, if any */
1407
        pts = 0;
1408
        if (pkt->pts != AV_NOPTS_VALUE)
1409
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1410

    
1411
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1412
                                    &sp->sub, &got_subtitle,
1413
                                    pkt);
1414
//            if (len1 < 0)
1415
//                break;
1416
        if (got_subtitle && sp->sub.format == 0) {
1417
            sp->pts = pts;
1418

    
1419
            for (i = 0; i < sp->sub.num_rects; i++)
1420
            {
1421
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1422
                {
1423
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1424
                    y = RGB_TO_Y_CCIR(r, g, b);
1425
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1426
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1427
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1428
                }
1429
            }
1430

    
1431
            /* now we can update the picture count */
1432
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1433
                is->subpq_windex = 0;
1434
            SDL_LockMutex(is->subpq_mutex);
1435
            is->subpq_size++;
1436
            SDL_UnlockMutex(is->subpq_mutex);
1437
        }
1438
        av_free_packet(pkt);
1439
//        if (step)
1440
//            if (cur_stream)
1441
//                stream_pause(cur_stream);
1442
    }
1443
 the_end:
1444
    return 0;
1445
}
1446

    
1447
/* copy samples for viewing in editor window */
1448
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1449
{
1450
    int size, len, channels;
1451

    
1452
    channels = is->audio_st->codec->channels;
1453

    
1454
    size = samples_size / sizeof(short);
1455
    while (size > 0) {
1456
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1457
        if (len > size)
1458
            len = size;
1459
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1460
        samples += len;
1461
        is->sample_array_index += len;
1462
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1463
            is->sample_array_index = 0;
1464
        size -= len;
1465
    }
1466
}
1467

    
1468
/* return the new audio buffer size (samples can be added or deleted
1469
   to get better sync if video or external master clock) */
1470
static int synchronize_audio(VideoState *is, short *samples,
1471
                             int samples_size1, double pts)
1472
{
1473
    int n, samples_size;
1474
    double ref_clock;
1475

    
1476
    n = 2 * is->audio_st->codec->channels;
1477
    samples_size = samples_size1;
1478

    
1479
    /* if not master, then we try to remove or add samples to correct the clock */
1480
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1481
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1482
        double diff, avg_diff;
1483
        int wanted_size, min_size, max_size, nb_samples;
1484

    
1485
        ref_clock = get_master_clock(is);
1486
        diff = get_audio_clock(is) - ref_clock;
1487

    
1488
        if (diff < AV_NOSYNC_THRESHOLD) {
1489
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1490
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1491
                /* not enough measures to have a correct estimate */
1492
                is->audio_diff_avg_count++;
1493
            } else {
1494
                /* estimate the A-V difference */
1495
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1496

    
1497
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1498
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1499
                    nb_samples = samples_size / n;
1500

    
1501
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1502
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1503
                    if (wanted_size < min_size)
1504
                        wanted_size = min_size;
1505
                    else if (wanted_size > max_size)
1506
                        wanted_size = max_size;
1507

    
1508
                    /* add or remove samples to correction the synchro */
1509
                    if (wanted_size < samples_size) {
1510
                        /* remove samples */
1511
                        samples_size = wanted_size;
1512
                    } else if (wanted_size > samples_size) {
1513
                        uint8_t *samples_end, *q;
1514
                        int nb;
1515

    
1516
                        /* add samples */
1517
                        nb = (samples_size - wanted_size);
1518
                        samples_end = (uint8_t *)samples + samples_size - n;
1519
                        q = samples_end + n;
1520
                        while (nb > 0) {
1521
                            memcpy(q, samples_end, n);
1522
                            q += n;
1523
                            nb -= n;
1524
                        }
1525
                        samples_size = wanted_size;
1526
                    }
1527
                }
1528
#if 0
1529
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1530
                       diff, avg_diff, samples_size - samples_size1,
1531
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1532
#endif
1533
            }
1534
        } else {
1535
            /* too big difference : may be initial PTS errors, so
1536
               reset A-V filter */
1537
            is->audio_diff_avg_count = 0;
1538
            is->audio_diff_cum = 0;
1539
        }
1540
    }
1541

    
1542
    return samples_size;
1543
}
1544

    
1545
/* decode one audio frame and returns its uncompressed size */
1546
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1547
{
1548
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1549
    AVPacket *pkt = &is->audio_pkt;
1550
    AVCodecContext *dec= is->audio_st->codec;
1551
    int n, len1, data_size;
1552
    double pts;
1553

    
1554
    for(;;) {
1555
        /* NOTE: the audio packet can contain several frames */
1556
        while (pkt_temp->size > 0) {
1557
            data_size = sizeof(is->audio_buf1);
1558
            len1 = avcodec_decode_audio3(dec,
1559
                                        (int16_t *)is->audio_buf1, &data_size,
1560
                                        pkt_temp);
1561
            if (len1 < 0) {
1562
                /* if error, we skip the frame */
1563
                pkt_temp->size = 0;
1564
                break;
1565
            }
1566

    
1567
            pkt_temp->data += len1;
1568
            pkt_temp->size -= len1;
1569
            if (data_size <= 0)
1570
                continue;
1571

    
1572
            if (dec->sample_fmt != is->audio_src_fmt) {
1573
                if (is->reformat_ctx)
1574
                    av_audio_convert_free(is->reformat_ctx);
1575
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1576
                                                         dec->sample_fmt, 1, NULL, 0);
1577
                if (!is->reformat_ctx) {
1578
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1579
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1580
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1581
                        break;
1582
                }
1583
                is->audio_src_fmt= dec->sample_fmt;
1584
            }
1585

    
1586
            if (is->reformat_ctx) {
1587
                const void *ibuf[6]= {is->audio_buf1};
1588
                void *obuf[6]= {is->audio_buf2};
1589
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1590
                int ostride[6]= {2};
1591
                int len= data_size/istride[0];
1592
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1593
                    printf("av_audio_convert() failed\n");
1594
                    break;
1595
                }
1596
                is->audio_buf= is->audio_buf2;
1597
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1598
                          remove this legacy cruft */
1599
                data_size= len*2;
1600
            }else{
1601
                is->audio_buf= is->audio_buf1;
1602
            }
1603

    
1604
            /* if no pts, then compute it */
1605
            pts = is->audio_clock;
1606
            *pts_ptr = pts;
1607
            n = 2 * dec->channels;
1608
            is->audio_clock += (double)data_size /
1609
                (double)(n * dec->sample_rate);
1610
#if defined(DEBUG_SYNC)
1611
            {
1612
                static double last_clock;
1613
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1614
                       is->audio_clock - last_clock,
1615
                       is->audio_clock, pts);
1616
                last_clock = is->audio_clock;
1617
            }
1618
#endif
1619
            return data_size;
1620
        }
1621

    
1622
        /* free the current packet */
1623
        if (pkt->data)
1624
            av_free_packet(pkt);
1625

    
1626
        if (is->paused || is->audioq.abort_request) {
1627
            return -1;
1628
        }
1629

    
1630
        /* read next packet */
1631
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1632
            return -1;
1633
        if(pkt->data == flush_pkt.data){
1634
            avcodec_flush_buffers(dec);
1635
            continue;
1636
        }
1637

    
1638
        pkt_temp->data = pkt->data;
1639
        pkt_temp->size = pkt->size;
1640

    
1641
        /* if update the audio clock with the pts */
1642
        if (pkt->pts != AV_NOPTS_VALUE) {
1643
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1644
        }
1645
    }
1646
}
1647

    
1648
/* get the current audio output buffer size, in samples. With SDL, we
1649
   cannot have a precise information */
1650
static int audio_write_get_buf_size(VideoState *is)
1651
{
1652
    return is->audio_buf_size - is->audio_buf_index;
1653
}
1654

    
1655

    
1656
/* prepare a new audio buffer */
1657
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1658
{
1659
    VideoState *is = opaque;
1660
    int audio_size, len1;
1661
    double pts;
1662

    
1663
    audio_callback_time = av_gettime();
1664

    
1665
    while (len > 0) {
1666
        if (is->audio_buf_index >= is->audio_buf_size) {
1667
           audio_size = audio_decode_frame(is, &pts);
1668
           if (audio_size < 0) {
1669
                /* if error, just output silence */
1670
               is->audio_buf = is->audio_buf1;
1671
               is->audio_buf_size = 1024;
1672
               memset(is->audio_buf, 0, is->audio_buf_size);
1673
           } else {
1674
               if (is->show_audio)
1675
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1676
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1677
                                              pts);
1678
               is->audio_buf_size = audio_size;
1679
           }
1680
           is->audio_buf_index = 0;
1681
        }
1682
        len1 = is->audio_buf_size - is->audio_buf_index;
1683
        if (len1 > len)
1684
            len1 = len;
1685
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1686
        len -= len1;
1687
        stream += len1;
1688
        is->audio_buf_index += len1;
1689
    }
1690
}
1691

    
1692
/* open a given stream. Return 0 if OK */
1693
static int stream_component_open(VideoState *is, int stream_index)
1694
{
1695
    AVFormatContext *ic = is->ic;
1696
    AVCodecContext *enc;
1697
    AVCodec *codec;
1698
    SDL_AudioSpec wanted_spec, spec;
1699

    
1700
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1701
        return -1;
1702
    enc = ic->streams[stream_index]->codec;
1703

    
1704
    /* prepare audio output */
1705
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1706
        if (enc->channels > 0) {
1707
            enc->request_channels = FFMIN(2, enc->channels);
1708
        } else {
1709
            enc->request_channels = 2;
1710
        }
1711
    }
1712

    
1713
    codec = avcodec_find_decoder(enc->codec_id);
1714
    enc->debug_mv = debug_mv;
1715
    enc->debug = debug;
1716
    enc->workaround_bugs = workaround_bugs;
1717
    enc->lowres = lowres;
1718
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1719
    enc->idct_algo= idct;
1720
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1721
    enc->skip_frame= skip_frame;
1722
    enc->skip_idct= skip_idct;
1723
    enc->skip_loop_filter= skip_loop_filter;
1724
    enc->error_recognition= error_recognition;
1725
    enc->error_concealment= error_concealment;
1726
    avcodec_thread_init(enc, thread_count);
1727

    
1728
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1729

    
1730
    if (!codec ||
1731
        avcodec_open(enc, codec) < 0)
1732
        return -1;
1733

    
1734
    /* prepare audio output */
1735
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1736
        wanted_spec.freq = enc->sample_rate;
1737
        wanted_spec.format = AUDIO_S16SYS;
1738
        wanted_spec.channels = enc->channels;
1739
        wanted_spec.silence = 0;
1740
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1741
        wanted_spec.callback = sdl_audio_callback;
1742
        wanted_spec.userdata = is;
1743
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1744
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1745
            return -1;
1746
        }
1747
        is->audio_hw_buf_size = spec.size;
1748
        is->audio_src_fmt= SAMPLE_FMT_S16;
1749
    }
1750

    
1751
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1752
    switch(enc->codec_type) {
1753
    case CODEC_TYPE_AUDIO:
1754
        is->audio_stream = stream_index;
1755
        is->audio_st = ic->streams[stream_index];
1756
        is->audio_buf_size = 0;
1757
        is->audio_buf_index = 0;
1758

    
1759
        /* init averaging filter */
1760
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1761
        is->audio_diff_avg_count = 0;
1762
        /* since we do not have a precise anough audio fifo fullness,
1763
           we correct audio sync only if larger than this threshold */
1764
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1765

    
1766
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1767
        packet_queue_init(&is->audioq);
1768
        SDL_PauseAudio(0);
1769
        break;
1770
    case CODEC_TYPE_VIDEO:
1771
        is->video_stream = stream_index;
1772
        is->video_st = ic->streams[stream_index];
1773

    
1774
        is->frame_last_delay = 40e-3;
1775
        is->frame_timer = (double)av_gettime() / 1000000.0;
1776
        is->video_current_pts_time = av_gettime();
1777

    
1778
        packet_queue_init(&is->videoq);
1779
        is->video_tid = SDL_CreateThread(video_thread, is);
1780
        break;
1781
    case CODEC_TYPE_SUBTITLE:
1782
        is->subtitle_stream = stream_index;
1783
        is->subtitle_st = ic->streams[stream_index];
1784
        packet_queue_init(&is->subtitleq);
1785

    
1786
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1787
        break;
1788
    default:
1789
        break;
1790
    }
1791
    return 0;
1792
}
1793

    
1794
static void stream_component_close(VideoState *is, int stream_index)
1795
{
1796
    AVFormatContext *ic = is->ic;
1797
    AVCodecContext *enc;
1798

    
1799
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1800
        return;
1801
    enc = ic->streams[stream_index]->codec;
1802

    
1803
    switch(enc->codec_type) {
1804
    case CODEC_TYPE_AUDIO:
1805
        packet_queue_abort(&is->audioq);
1806

    
1807
        SDL_CloseAudio();
1808

    
1809
        packet_queue_end(&is->audioq);
1810
        if (is->reformat_ctx)
1811
            av_audio_convert_free(is->reformat_ctx);
1812
        break;
1813
    case CODEC_TYPE_VIDEO:
1814
        packet_queue_abort(&is->videoq);
1815

    
1816
        /* note: we also signal this mutex to make sure we deblock the
1817
           video thread in all cases */
1818
        SDL_LockMutex(is->pictq_mutex);
1819
        SDL_CondSignal(is->pictq_cond);
1820
        SDL_UnlockMutex(is->pictq_mutex);
1821

    
1822
        SDL_WaitThread(is->video_tid, NULL);
1823

    
1824
        packet_queue_end(&is->videoq);
1825
        break;
1826
    case CODEC_TYPE_SUBTITLE:
1827
        packet_queue_abort(&is->subtitleq);
1828

    
1829
        /* note: we also signal this mutex to make sure we deblock the
1830
           video thread in all cases */
1831
        SDL_LockMutex(is->subpq_mutex);
1832
        is->subtitle_stream_changed = 1;
1833

    
1834
        SDL_CondSignal(is->subpq_cond);
1835
        SDL_UnlockMutex(is->subpq_mutex);
1836

    
1837
        SDL_WaitThread(is->subtitle_tid, NULL);
1838

    
1839
        packet_queue_end(&is->subtitleq);
1840
        break;
1841
    default:
1842
        break;
1843
    }
1844

    
1845
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1846
    avcodec_close(enc);
1847
    switch(enc->codec_type) {
1848
    case CODEC_TYPE_AUDIO:
1849
        is->audio_st = NULL;
1850
        is->audio_stream = -1;
1851
        break;
1852
    case CODEC_TYPE_VIDEO:
1853
        is->video_st = NULL;
1854
        is->video_stream = -1;
1855
        break;
1856
    case CODEC_TYPE_SUBTITLE:
1857
        is->subtitle_st = NULL;
1858
        is->subtitle_stream = -1;
1859
        break;
1860
    default:
1861
        break;
1862
    }
1863
}
1864

    
1865
/* since we have only one decoding thread, we can use a global
1866
   variable instead of a thread local variable */
1867
static VideoState *global_video_state;
1868

    
1869
static int decode_interrupt_cb(void)
1870
{
1871
    return (global_video_state && global_video_state->abort_request);
1872
}
1873

    
1874
/* this thread gets the stream from the disk or the network */
1875
static int decode_thread(void *arg)
1876
{
1877
    VideoState *is = arg;
1878
    AVFormatContext *ic;
1879
    int err, i, ret, video_index, audio_index, subtitle_index;
1880
    AVPacket pkt1, *pkt = &pkt1;
1881
    AVFormatParameters params, *ap = &params;
1882
    int eof=0;
1883

    
1884
    video_index = -1;
1885
    audio_index = -1;
1886
    subtitle_index = -1;
1887
    is->video_stream = -1;
1888
    is->audio_stream = -1;
1889
    is->subtitle_stream = -1;
1890

    
1891
    global_video_state = is;
1892
    url_set_interrupt_cb(decode_interrupt_cb);
1893

    
1894
    memset(ap, 0, sizeof(*ap));
1895

    
1896
    ap->width = frame_width;
1897
    ap->height= frame_height;
1898
    ap->time_base= (AVRational){1, 25};
1899
    ap->pix_fmt = frame_pix_fmt;
1900

    
1901
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1902
    if (err < 0) {
1903
        print_error(is->filename, err);
1904
        ret = -1;
1905
        goto fail;
1906
    }
1907
    is->ic = ic;
1908

    
1909
    if(genpts)
1910
        ic->flags |= AVFMT_FLAG_GENPTS;
1911

    
1912
    err = av_find_stream_info(ic);
1913
    if (err < 0) {
1914
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1915
        ret = -1;
1916
        goto fail;
1917
    }
1918
    if(ic->pb)
1919
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1920

    
1921
    /* if seeking requested, we execute it */
1922
    if (start_time != AV_NOPTS_VALUE) {
1923
        int64_t timestamp;
1924

    
1925
        timestamp = start_time;
1926
        /* add the stream start time */
1927
        if (ic->start_time != AV_NOPTS_VALUE)
1928
            timestamp += ic->start_time;
1929
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1930
        if (ret < 0) {
1931
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1932
                    is->filename, (double)timestamp / AV_TIME_BASE);
1933
        }
1934
    }
1935

    
1936
    for(i = 0; i < ic->nb_streams; i++) {
1937
        AVCodecContext *enc = ic->streams[i]->codec;
1938
        ic->streams[i]->discard = AVDISCARD_ALL;
1939
        switch(enc->codec_type) {
1940
        case CODEC_TYPE_AUDIO:
1941
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1942
                audio_index = i;
1943
            break;
1944
        case CODEC_TYPE_VIDEO:
1945
            if (wanted_video_stream-- >= 0 && !video_disable)
1946
                video_index = i;
1947
            break;
1948
        case CODEC_TYPE_SUBTITLE:
1949
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1950
                subtitle_index = i;
1951
            break;
1952
        default:
1953
            break;
1954
        }
1955
    }
1956
    if (show_status) {
1957
        dump_format(ic, 0, is->filename, 0);
1958
    }
1959

    
1960
    /* open the streams */
1961
    if (audio_index >= 0) {
1962
        stream_component_open(is, audio_index);
1963
    }
1964

    
1965
    if (video_index >= 0) {
1966
        stream_component_open(is, video_index);
1967
    } else {
1968
        if (!display_disable)
1969
            is->show_audio = 1;
1970
    }
1971

    
1972
    if (subtitle_index >= 0) {
1973
        stream_component_open(is, subtitle_index);
1974
    }
1975

    
1976
    if (is->video_stream < 0 && is->audio_stream < 0) {
1977
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1978
        ret = -1;
1979
        goto fail;
1980
    }
1981

    
1982
    for(;;) {
1983
        if (is->abort_request)
1984
            break;
1985
        if (is->paused != is->last_paused) {
1986
            is->last_paused = is->paused;
1987
            if (is->paused)
1988
                av_read_pause(ic);
1989
            else
1990
                av_read_play(ic);
1991
        }
1992
#if CONFIG_RTSP_DEMUXER
1993
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1994
            /* wait 10 ms to avoid trying to get another packet */
1995
            /* XXX: horrible */
1996
            SDL_Delay(10);
1997
            continue;
1998
        }
1999
#endif
2000
        if (is->seek_req) {
2001
            int64_t seek_target= is->seek_pos;
2002
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2003
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2004
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2005
//      of the seek_pos/seek_rel variables
2006

    
2007
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2008
            if (ret < 0) {
2009
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2010
            }else{
2011
                if (is->audio_stream >= 0) {
2012
                    packet_queue_flush(&is->audioq);
2013
                    packet_queue_put(&is->audioq, &flush_pkt);
2014
                }
2015
                if (is->subtitle_stream >= 0) {
2016
                    packet_queue_flush(&is->subtitleq);
2017
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2018
                }
2019
                if (is->video_stream >= 0) {
2020
                    packet_queue_flush(&is->videoq);
2021
                    packet_queue_put(&is->videoq, &flush_pkt);
2022
                }
2023
            }
2024
            is->seek_req = 0;
2025
            eof= 0;
2026
        }
2027

    
2028
        /* if the queue are full, no need to read more */
2029
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2030
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2031
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2032
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2033
            /* wait 10 ms */
2034
            SDL_Delay(10);
2035
            continue;
2036
        }
2037
        if(url_feof(ic->pb) || eof) {
2038
            if(is->video_stream >= 0){
2039
                av_init_packet(pkt);
2040
                pkt->data=NULL;
2041
                pkt->size=0;
2042
                pkt->stream_index= is->video_stream;
2043
                packet_queue_put(&is->videoq, pkt);
2044
            }
2045
            SDL_Delay(10);
2046
            continue;
2047
        }
2048
        ret = av_read_frame(ic, pkt);
2049
        if (ret < 0) {
2050
            if (ret == AVERROR_EOF)
2051
                eof=1;
2052
            if (url_ferror(ic->pb))
2053
                break;
2054
            SDL_Delay(100); /* wait for user event */
2055
            continue;
2056
        }
2057
        if (pkt->stream_index == is->audio_stream) {
2058
            packet_queue_put(&is->audioq, pkt);
2059
        } else if (pkt->stream_index == is->video_stream) {
2060
            packet_queue_put(&is->videoq, pkt);
2061
        } else if (pkt->stream_index == is->subtitle_stream) {
2062
            packet_queue_put(&is->subtitleq, pkt);
2063
        } else {
2064
            av_free_packet(pkt);
2065
        }
2066
    }
2067
    /* wait until the end */
2068
    while (!is->abort_request) {
2069
        SDL_Delay(100);
2070
    }
2071

    
2072
    ret = 0;
2073
 fail:
2074
    /* disable interrupting */
2075
    global_video_state = NULL;
2076

    
2077
    /* close each stream */
2078
    if (is->audio_stream >= 0)
2079
        stream_component_close(is, is->audio_stream);
2080
    if (is->video_stream >= 0)
2081
        stream_component_close(is, is->video_stream);
2082
    if (is->subtitle_stream >= 0)
2083
        stream_component_close(is, is->subtitle_stream);
2084
    if (is->ic) {
2085
        av_close_input_file(is->ic);
2086
        is->ic = NULL; /* safety */
2087
    }
2088
    url_set_interrupt_cb(NULL);
2089

    
2090
    if (ret != 0) {
2091
        SDL_Event event;
2092

    
2093
        event.type = FF_QUIT_EVENT;
2094
        event.user.data1 = is;
2095
        SDL_PushEvent(&event);
2096
    }
2097
    return 0;
2098
}
2099

    
2100
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2101
{
2102
    VideoState *is;
2103

    
2104
    is = av_mallocz(sizeof(VideoState));
2105
    if (!is)
2106
        return NULL;
2107
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2108
    is->iformat = iformat;
2109
    is->ytop = 0;
2110
    is->xleft = 0;
2111

    
2112
    /* start video display */
2113
    is->pictq_mutex = SDL_CreateMutex();
2114
    is->pictq_cond = SDL_CreateCond();
2115

    
2116
    is->subpq_mutex = SDL_CreateMutex();
2117
    is->subpq_cond = SDL_CreateCond();
2118

    
2119
    /* add the refresh timer to draw the picture */
2120
    schedule_refresh(is, 40);
2121

    
2122
    is->av_sync_type = av_sync_type;
2123
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2124
    if (!is->parse_tid) {
2125
        av_free(is);
2126
        return NULL;
2127
    }
2128
    return is;
2129
}
2130

    
2131
static void stream_close(VideoState *is)
2132
{
2133
    VideoPicture *vp;
2134
    int i;
2135
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2136
    is->abort_request = 1;
2137
    SDL_WaitThread(is->parse_tid, NULL);
2138

    
2139
    /* free all pictures */
2140
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2141
        vp = &is->pictq[i];
2142
        if (vp->bmp) {
2143
            SDL_FreeYUVOverlay(vp->bmp);
2144
            vp->bmp = NULL;
2145
        }
2146
    }
2147
    SDL_DestroyMutex(is->pictq_mutex);
2148
    SDL_DestroyCond(is->pictq_cond);
2149
    SDL_DestroyMutex(is->subpq_mutex);
2150
    SDL_DestroyCond(is->subpq_cond);
2151
    if (is->img_convert_ctx)
2152
        sws_freeContext(is->img_convert_ctx);
2153
    av_free(is);
2154
}
2155

    
2156
static void stream_cycle_channel(VideoState *is, int codec_type)
2157
{
2158
    AVFormatContext *ic = is->ic;
2159
    int start_index, stream_index;
2160
    AVStream *st;
2161

    
2162
    if (codec_type == CODEC_TYPE_VIDEO)
2163
        start_index = is->video_stream;
2164
    else if (codec_type == CODEC_TYPE_AUDIO)
2165
        start_index = is->audio_stream;
2166
    else
2167
        start_index = is->subtitle_stream;
2168
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2169
        return;
2170
    stream_index = start_index;
2171
    for(;;) {
2172
        if (++stream_index >= is->ic->nb_streams)
2173
        {
2174
            if (codec_type == CODEC_TYPE_SUBTITLE)
2175
            {
2176
                stream_index = -1;
2177
                goto the_end;
2178
            } else
2179
                stream_index = 0;
2180
        }
2181
        if (stream_index == start_index)
2182
            return;
2183
        st = ic->streams[stream_index];
2184
        if (st->codec->codec_type == codec_type) {
2185
            /* check that parameters are OK */
2186
            switch(codec_type) {
2187
            case CODEC_TYPE_AUDIO:
2188
                if (st->codec->sample_rate != 0 &&
2189
                    st->codec->channels != 0)
2190
                    goto the_end;
2191
                break;
2192
            case CODEC_TYPE_VIDEO:
2193
            case CODEC_TYPE_SUBTITLE:
2194
                goto the_end;
2195
            default:
2196
                break;
2197
            }
2198
        }
2199
    }
2200
 the_end:
2201
    stream_component_close(is, start_index);
2202
    stream_component_open(is, stream_index);
2203
}
2204

    
2205

    
2206
static void toggle_full_screen(void)
2207
{
2208
    is_full_screen = !is_full_screen;
2209
    if (!fs_screen_width) {
2210
        /* use default SDL method */
2211
//        SDL_WM_ToggleFullScreen(screen);
2212
    }
2213
    video_open(cur_stream);
2214
}
2215

    
2216
static void toggle_pause(void)
2217
{
2218
    if (cur_stream)
2219
        stream_pause(cur_stream);
2220
    step = 0;
2221
}
2222

    
2223
static void step_to_next_frame(void)
2224
{
2225
    if (cur_stream) {
2226
        /* if the stream is paused unpause it, then step */
2227
        if (cur_stream->paused)
2228
            stream_pause(cur_stream);
2229
    }
2230
    step = 1;
2231
}
2232

    
2233
static void do_exit(void)
2234
{
2235
    int i;
2236
    if (cur_stream) {
2237
        stream_close(cur_stream);
2238
        cur_stream = NULL;
2239
    }
2240
    for (i = 0; i < CODEC_TYPE_NB; i++)
2241
        av_free(avcodec_opts[i]);
2242
    av_free(avformat_opts);
2243
    av_free(sws_opts);
2244
    if (show_status)
2245
        printf("\n");
2246
    SDL_Quit();
2247
    exit(0);
2248
}
2249

    
2250
static void toggle_audio_display(void)
2251
{
2252
    if (cur_stream) {
2253
        cur_stream->show_audio = !cur_stream->show_audio;
2254
    }
2255
}
2256

    
2257
/* handle an event sent by the GUI */
2258
static void event_loop(void)
2259
{
2260
    SDL_Event event;
2261
    double incr, pos, frac;
2262

    
2263
    for(;;) {
2264
        SDL_WaitEvent(&event);
2265
        switch(event.type) {
2266
        case SDL_KEYDOWN:
2267
            switch(event.key.keysym.sym) {
2268
            case SDLK_ESCAPE:
2269
            case SDLK_q:
2270
                do_exit();
2271
                break;
2272
            case SDLK_f:
2273
                toggle_full_screen();
2274
                break;
2275
            case SDLK_p:
2276
            case SDLK_SPACE:
2277
                toggle_pause();
2278
                break;
2279
            case SDLK_s: //S: Step to next frame
2280
                step_to_next_frame();
2281
                break;
2282
            case SDLK_a:
2283
                if (cur_stream)
2284
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2285
                break;
2286
            case SDLK_v:
2287
                if (cur_stream)
2288
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2289
                break;
2290
            case SDLK_t:
2291
                if (cur_stream)
2292
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2293
                break;
2294
            case SDLK_w:
2295
                toggle_audio_display();
2296
                break;
2297
            case SDLK_LEFT:
2298
                incr = -10.0;
2299
                goto do_seek;
2300
            case SDLK_RIGHT:
2301
                incr = 10.0;
2302
                goto do_seek;
2303
            case SDLK_UP:
2304
                incr = 60.0;
2305
                goto do_seek;
2306
            case SDLK_DOWN:
2307
                incr = -60.0;
2308
            do_seek:
2309
                if (cur_stream) {
2310
                    if (seek_by_bytes) {
2311
                        pos = url_ftell(cur_stream->ic->pb);
2312
                        if (cur_stream->ic->bit_rate)
2313
                            incr *= cur_stream->ic->bit_rate / 60.0;
2314
                        else
2315
                            incr *= 180000.0;
2316
                        pos += incr;
2317
                        stream_seek(cur_stream, pos, incr);
2318
                    } else {
2319
                        pos = get_master_clock(cur_stream);
2320
                        pos += incr;
2321
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2322
                    }
2323
                }
2324
                break;
2325
            default:
2326
                break;
2327
            }
2328
            break;
2329
        case SDL_MOUSEBUTTONDOWN:
2330
            if (cur_stream) {
2331
                int64_t ts;
2332
                int ns, hh, mm, ss;
2333
                int tns, thh, tmm, tss;
2334
                tns = cur_stream->ic->duration/1000000LL;
2335
                thh = tns/3600;
2336
                tmm = (tns%3600)/60;
2337
                tss = (tns%60);
2338
                frac = (double)event.button.x/(double)cur_stream->width;
2339
                ns = frac*tns;
2340
                hh = ns/3600;
2341
                mm = (ns%3600)/60;
2342
                ss = (ns%60);
2343
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2344
                        hh, mm, ss, thh, tmm, tss);
2345
                ts = frac*cur_stream->ic->duration;
2346
                if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2347
                    ts += cur_stream->ic->start_time;
2348
                stream_seek(cur_stream, ts, 0);
2349
            }
2350
            break;
2351
        case SDL_VIDEORESIZE:
2352
            if (cur_stream) {
2353
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2354
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2355
                screen_width = cur_stream->width = event.resize.w;
2356
                screen_height= cur_stream->height= event.resize.h;
2357
            }
2358
            break;
2359
        case SDL_QUIT:
2360
        case FF_QUIT_EVENT:
2361
            do_exit();
2362
            break;
2363
        case FF_ALLOC_EVENT:
2364
            video_open(event.user.data1);
2365
            alloc_picture(event.user.data1);
2366
            break;
2367
        case FF_REFRESH_EVENT:
2368
            video_refresh_timer(event.user.data1);
2369
            break;
2370
        default:
2371
            break;
2372
        }
2373
    }
2374
}
2375

    
2376
static void opt_frame_size(const char *arg)
2377
{
2378
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2379
        fprintf(stderr, "Incorrect frame size\n");
2380
        exit(1);
2381
    }
2382
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2383
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2384
        exit(1);
2385
    }
2386
}
2387

    
2388
static int opt_width(const char *opt, const char *arg)
2389
{
2390
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2391
    return 0;
2392
}
2393

    
2394
static int opt_height(const char *opt, const char *arg)
2395
{
2396
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2397
    return 0;
2398
}
2399

    
2400
static void opt_format(const char *arg)
2401
{
2402
    file_iformat = av_find_input_format(arg);
2403
    if (!file_iformat) {
2404
        fprintf(stderr, "Unknown input format: %s\n", arg);
2405
        exit(1);
2406
    }
2407
}
2408

    
2409
static void opt_frame_pix_fmt(const char *arg)
2410
{
2411
    frame_pix_fmt = av_get_pix_fmt(arg);
2412
}
2413

    
2414
static int opt_sync(const char *opt, const char *arg)
2415
{
2416
    if (!strcmp(arg, "audio"))
2417
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2418
    else if (!strcmp(arg, "video"))
2419
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2420
    else if (!strcmp(arg, "ext"))
2421
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2422
    else {
2423
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2424
        exit(1);
2425
    }
2426
    return 0;
2427
}
2428

    
2429
static int opt_seek(const char *opt, const char *arg)
2430
{
2431
    start_time = parse_time_or_die(opt, arg, 1);
2432
    return 0;
2433
}
2434

    
2435
static int opt_debug(const char *opt, const char *arg)
2436
{
2437
    av_log_set_level(99);
2438
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2439
    return 0;
2440
}
2441

    
2442
static int opt_vismv(const char *opt, const char *arg)
2443
{
2444
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2445
    return 0;
2446
}
2447

    
2448
static int opt_thread_count(const char *opt, const char *arg)
2449
{
2450
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2451
#if !HAVE_THREADS
2452
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2453
#endif
2454
    return 0;
2455
}
2456

    
2457
static const OptionDef options[] = {
2458
#include "cmdutils_common_opts.h"
2459
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2460
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2461
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2462
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2463
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2464
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2465
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2466
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2467
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2468
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2469
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2470
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2471
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2472
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2473
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2474
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2475
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2476
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2477
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2478
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2479
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2480
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2481
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2482
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2483
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2484
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2485
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2486
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2487
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2488
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2489
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2490
    { NULL, },
2491
};
2492

    
2493
static void show_usage(void)
2494
{
2495
    printf("Simple media player\n");
2496
    printf("usage: ffplay [options] input_file\n");
2497
    printf("\n");
2498
}
2499

    
2500
static void show_help(void)
2501
{
2502
    show_usage();
2503
    show_help_options(options, "Main options:\n",
2504
                      OPT_EXPERT, 0);
2505
    show_help_options(options, "\nAdvanced options:\n",
2506
                      OPT_EXPERT, OPT_EXPERT);
2507
    printf("\nWhile playing:\n"
2508
           "q, ESC              quit\n"
2509
           "f                   toggle full screen\n"
2510
           "p, SPC              pause\n"
2511
           "a                   cycle audio channel\n"
2512
           "v                   cycle video channel\n"
2513
           "t                   cycle subtitle channel\n"
2514
           "w                   show audio waves\n"
2515
           "left/right          seek backward/forward 10 seconds\n"
2516
           "down/up             seek backward/forward 1 minute\n"
2517
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2518
           );
2519
}
2520

    
2521
static void opt_input_file(const char *filename)
2522
{
2523
    if (!strcmp(filename, "-"))
2524
        filename = "pipe:";
2525
    input_filename = filename;
2526
}
2527

    
2528
/* Called from the main */
2529
int main(int argc, char **argv)
2530
{
2531
    int flags, i;
2532

    
2533
    /* register all codecs, demux and protocols */
2534
    avcodec_register_all();
2535
    avdevice_register_all();
2536
    av_register_all();
2537

    
2538
    for(i=0; i<CODEC_TYPE_NB; i++){
2539
        avcodec_opts[i]= avcodec_alloc_context2(i);
2540
    }
2541
    avformat_opts = avformat_alloc_context();
2542
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2543

    
2544
    show_banner();
2545

    
2546
    parse_options(argc, argv, options, opt_input_file);
2547

    
2548
    if (!input_filename) {
2549
        show_usage();
2550
        fprintf(stderr, "An input file must be specified\n");
2551
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2552
        exit(1);
2553
    }
2554

    
2555
    if (display_disable) {
2556
        video_disable = 1;
2557
    }
2558
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2559
#if !defined(__MINGW32__) && !defined(__APPLE__)
2560
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2561
#endif
2562
    if (SDL_Init (flags)) {
2563
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2564
        exit(1);
2565
    }
2566

    
2567
    if (!display_disable) {
2568
#if HAVE_SDL_VIDEO_SIZE
2569
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2570
        fs_screen_width = vi->current_w;
2571
        fs_screen_height = vi->current_h;
2572
#endif
2573
    }
2574

    
2575
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2576
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2577
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2578
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2579

    
2580
    av_init_packet(&flush_pkt);
2581
    flush_pkt.data= "FLUSH";
2582

    
2583
    cur_stream = stream_open(input_filename, file_iformat);
2584

    
2585
    event_loop();
2586

    
2587
    /* never returns */
2588

    
2589
    return 0;
2590
}