Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 4cfac5bc

History | View | Annotate | Download (76.6 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "avformat.h"
25
#include "swscale.h"
26
#include "avstring.h"
27

    
28
#include "version.h"
29
#include "cmdutils.h"
30

    
31
#include <SDL.h>
32
#include <SDL_thread.h>
33

    
34
#ifdef __MINGW32__
35
#undef main /* We don't want SDL to override our main() */
36
#endif
37

    
38
#undef exit
39

    
40
static const char program_name[] = "FFplay";
41
static const int program_birth_year = 2003;
42

    
43
//#define DEBUG_SYNC
44

    
45
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
46
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
47
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
48

    
49
/* SDL audio buffer size, in samples. Should be small to have precise
50
   A/V sync as SDL does not have hardware buffer fullness info. */
51
#define SDL_AUDIO_BUFFER_SIZE 1024
52

    
53
/* no AV sync correction is done if below the AV sync threshold */
54
#define AV_SYNC_THRESHOLD 0.01
55
/* no AV correction is done if too big error */
56
#define AV_NOSYNC_THRESHOLD 10.0
57

    
58
/* maximum audio speed change to get correct sync */
59
#define SAMPLE_CORRECTION_PERCENT_MAX 10
60

    
61
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
62
#define AUDIO_DIFF_AVG_NB   20
63

    
64
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
65
#define SAMPLE_ARRAY_SIZE (2*65536)
66

    
67
static int sws_flags = SWS_BICUBIC;
68

    
69
typedef struct PacketQueue {
70
    AVPacketList *first_pkt, *last_pkt;
71
    int nb_packets;
72
    int size;
73
    int abort_request;
74
    SDL_mutex *mutex;
75
    SDL_cond *cond;
76
} PacketQueue;
77

    
78
#define VIDEO_PICTURE_QUEUE_SIZE 1
79
#define SUBPICTURE_QUEUE_SIZE 4
80

    
81
typedef struct VideoPicture {
82
    double pts;                                  ///<presentation time stamp for this picture
83
    SDL_Overlay *bmp;
84
    int width, height; /* source height & width */
85
    int allocated;
86
} VideoPicture;
87

    
88
typedef struct SubPicture {
89
    double pts; /* presentation time stamp for this picture */
90
    AVSubtitle sub;
91
} SubPicture;
92

    
93
enum {
94
    AV_SYNC_AUDIO_MASTER, /* default choice */
95
    AV_SYNC_VIDEO_MASTER,
96
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
97
};
98

    
99
typedef struct VideoState {
100
    SDL_Thread *parse_tid;
101
    SDL_Thread *video_tid;
102
    AVInputFormat *iformat;
103
    int no_background;
104
    int abort_request;
105
    int paused;
106
    int last_paused;
107
    int seek_req;
108
    int seek_flags;
109
    int64_t seek_pos;
110
    AVFormatContext *ic;
111
    int dtg_active_format;
112

    
113
    int audio_stream;
114

    
115
    int av_sync_type;
116
    double external_clock; /* external clock base */
117
    int64_t external_clock_time;
118

    
119
    double audio_clock;
120
    double audio_diff_cum; /* used for AV difference average computation */
121
    double audio_diff_avg_coef;
122
    double audio_diff_threshold;
123
    int audio_diff_avg_count;
124
    AVStream *audio_st;
125
    PacketQueue audioq;
126
    int audio_hw_buf_size;
127
    /* samples output by the codec. we reserve more space for avsync
128
       compensation */
129
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
130
    unsigned int audio_buf_size; /* in bytes */
131
    int audio_buf_index; /* in bytes */
132
    AVPacket audio_pkt;
133
    uint8_t *audio_pkt_data;
134
    int audio_pkt_size;
135

    
136
    int show_audio; /* if true, display audio samples */
137
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
138
    int sample_array_index;
139
    int last_i_start;
140

    
141
    SDL_Thread *subtitle_tid;
142
    int subtitle_stream;
143
    int subtitle_stream_changed;
144
    AVStream *subtitle_st;
145
    PacketQueue subtitleq;
146
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
147
    int subpq_size, subpq_rindex, subpq_windex;
148
    SDL_mutex *subpq_mutex;
149
    SDL_cond *subpq_cond;
150

    
151
    double frame_timer;
152
    double frame_last_pts;
153
    double frame_last_delay;
154
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
155
    int video_stream;
156
    AVStream *video_st;
157
    PacketQueue videoq;
158
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
159
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
160
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
161
    int pictq_size, pictq_rindex, pictq_windex;
162
    SDL_mutex *pictq_mutex;
163
    SDL_cond *pictq_cond;
164

    
165
    //    QETimer *video_timer;
166
    char filename[1024];
167
    int width, height, xleft, ytop;
168
} VideoState;
169

    
170
void show_help(void);
171
static int audio_write_get_buf_size(VideoState *is);
172

    
173
/* options specified by the user */
174
static AVInputFormat *file_iformat;
175
static const char *input_filename;
176
static int fs_screen_width;
177
static int fs_screen_height;
178
static int screen_width = 0;
179
static int screen_height = 0;
180
static int frame_width = 0;
181
static int frame_height = 0;
182
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
183
static int audio_disable;
184
static int video_disable;
185
static int wanted_audio_stream= 0;
186
static int wanted_video_stream= 0;
187
static int seek_by_bytes;
188
static int display_disable;
189
static int show_status;
190
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
191
static int64_t start_time = AV_NOPTS_VALUE;
192
static int debug = 0;
193
static int debug_mv = 0;
194
static int step = 0;
195
static int thread_count = 1;
196
static int workaround_bugs = 1;
197
static int fast = 0;
198
static int genpts = 0;
199
static int lowres = 0;
200
static int idct = FF_IDCT_AUTO;
201
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
202
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
203
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
204
static int error_resilience = FF_ER_CAREFUL;
205
static int error_concealment = 3;
206
static int decoder_reorder_pts= 0;
207

    
208
/* current context */
209
static int is_full_screen;
210
static VideoState *cur_stream;
211
static int64_t audio_callback_time;
212

    
213
AVPacket flush_pkt;
214

    
215
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
216
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
217
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
218

    
219
SDL_Surface *screen;
220

    
221
/* packet queue handling */
222
static void packet_queue_init(PacketQueue *q)
223
{
224
    memset(q, 0, sizeof(PacketQueue));
225
    q->mutex = SDL_CreateMutex();
226
    q->cond = SDL_CreateCond();
227
}
228

    
229
static void packet_queue_flush(PacketQueue *q)
230
{
231
    AVPacketList *pkt, *pkt1;
232

    
233
    SDL_LockMutex(q->mutex);
234
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
235
        pkt1 = pkt->next;
236
        av_free_packet(&pkt->pkt);
237
        av_freep(&pkt);
238
    }
239
    q->last_pkt = NULL;
240
    q->first_pkt = NULL;
241
    q->nb_packets = 0;
242
    q->size = 0;
243
    SDL_UnlockMutex(q->mutex);
244
}
245

    
246
static void packet_queue_end(PacketQueue *q)
247
{
248
    packet_queue_flush(q);
249
    SDL_DestroyMutex(q->mutex);
250
    SDL_DestroyCond(q->cond);
251
}
252

    
253
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
254
{
255
    AVPacketList *pkt1;
256

    
257
    /* duplicate the packet */
258
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
259
        return -1;
260

    
261
    pkt1 = av_malloc(sizeof(AVPacketList));
262
    if (!pkt1)
263
        return -1;
264
    pkt1->pkt = *pkt;
265
    pkt1->next = NULL;
266

    
267

    
268
    SDL_LockMutex(q->mutex);
269

    
270
    if (!q->last_pkt)
271

    
272
        q->first_pkt = pkt1;
273
    else
274
        q->last_pkt->next = pkt1;
275
    q->last_pkt = pkt1;
276
    q->nb_packets++;
277
    q->size += pkt1->pkt.size;
278
    /* XXX: should duplicate packet data in DV case */
279
    SDL_CondSignal(q->cond);
280

    
281
    SDL_UnlockMutex(q->mutex);
282
    return 0;
283
}
284

    
285
static void packet_queue_abort(PacketQueue *q)
286
{
287
    SDL_LockMutex(q->mutex);
288

    
289
    q->abort_request = 1;
290

    
291
    SDL_CondSignal(q->cond);
292

    
293
    SDL_UnlockMutex(q->mutex);
294
}
295

    
296
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
297
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
298
{
299
    AVPacketList *pkt1;
300
    int ret;
301

    
302
    SDL_LockMutex(q->mutex);
303

    
304
    for(;;) {
305
        if (q->abort_request) {
306
            ret = -1;
307
            break;
308
        }
309

    
310
        pkt1 = q->first_pkt;
311
        if (pkt1) {
312
            q->first_pkt = pkt1->next;
313
            if (!q->first_pkt)
314
                q->last_pkt = NULL;
315
            q->nb_packets--;
316
            q->size -= pkt1->pkt.size;
317
            *pkt = pkt1->pkt;
318
            av_free(pkt1);
319
            ret = 1;
320
            break;
321
        } else if (!block) {
322
            ret = 0;
323
            break;
324
        } else {
325
            SDL_CondWait(q->cond, q->mutex);
326
        }
327
    }
328
    SDL_UnlockMutex(q->mutex);
329
    return ret;
330
}
331

    
332
static inline void fill_rectangle(SDL_Surface *screen,
333
                                  int x, int y, int w, int h, int color)
334
{
335
    SDL_Rect rect;
336
    rect.x = x;
337
    rect.y = y;
338
    rect.w = w;
339
    rect.h = h;
340
    SDL_FillRect(screen, &rect, color);
341
}
342

    
343
#if 0
344
/* draw only the border of a rectangle */
345
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
346
{
347
    int w1, w2, h1, h2;
348

349
    /* fill the background */
350
    w1 = x;
351
    if (w1 < 0)
352
        w1 = 0;
353
    w2 = s->width - (x + w);
354
    if (w2 < 0)
355
        w2 = 0;
356
    h1 = y;
357
    if (h1 < 0)
358
        h1 = 0;
359
    h2 = s->height - (y + h);
360
    if (h2 < 0)
361
        h2 = 0;
362
    fill_rectangle(screen,
363
                   s->xleft, s->ytop,
364
                   w1, s->height,
365
                   color);
366
    fill_rectangle(screen,
367
                   s->xleft + s->width - w2, s->ytop,
368
                   w2, s->height,
369
                   color);
370
    fill_rectangle(screen,
371
                   s->xleft + w1, s->ytop,
372
                   s->width - w1 - w2, h1,
373
                   color);
374
    fill_rectangle(screen,
375
                   s->xleft + w1, s->ytop + s->height - h2,
376
                   s->width - w1 - w2, h2,
377
                   color);
378
}
379
#endif
380

    
381

    
382

    
383
#define SCALEBITS 10
384
#define ONE_HALF  (1 << (SCALEBITS - 1))
385
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
386

    
387
#define RGB_TO_Y_CCIR(r, g, b) \
388
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
389
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
390

    
391
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
392
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
393
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
394

    
395
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
396
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
397
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
398

    
399
#define ALPHA_BLEND(a, oldp, newp, s)\
400
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
401

    
402
#define RGBA_IN(r, g, b, a, s)\
403
{\
404
    unsigned int v = ((const uint32_t *)(s))[0];\
405
    a = (v >> 24) & 0xff;\
406
    r = (v >> 16) & 0xff;\
407
    g = (v >> 8) & 0xff;\
408
    b = v & 0xff;\
409
}
410

    
411
#define YUVA_IN(y, u, v, a, s, pal)\
412
{\
413
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
414
    a = (val >> 24) & 0xff;\
415
    y = (val >> 16) & 0xff;\
416
    u = (val >> 8) & 0xff;\
417
    v = val & 0xff;\
418
}
419

    
420
#define YUVA_OUT(d, y, u, v, a)\
421
{\
422
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
423
}
424

    
425

    
426
#define BPP 1
427

    
428
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
429
{
430
    int wrap, wrap3, width2, skip2;
431
    int y, u, v, a, u1, v1, a1, w, h;
432
    uint8_t *lum, *cb, *cr;
433
    const uint8_t *p;
434
    const uint32_t *pal;
435
    int dstx, dsty, dstw, dsth;
436

    
437
    dstx = FFMIN(FFMAX(rect->x, 0), imgw);
438
    dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
439
    dsty = FFMIN(FFMAX(rect->y, 0), imgh);
440
    dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
441
    lum = dst->data[0] + dsty * dst->linesize[0];
442
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
443
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
444

    
445
    width2 = (dstw + 1) >> 1;
446
    skip2 = dstx >> 1;
447
    wrap = dst->linesize[0];
448
    wrap3 = rect->linesize;
449
    p = rect->bitmap;
450
    pal = rect->rgba_palette;  /* Now in YCrCb! */
451

    
452
    if (dsty & 1) {
453
        lum += dstx;
454
        cb += skip2;
455
        cr += skip2;
456

    
457
        if (dstx & 1) {
458
            YUVA_IN(y, u, v, a, p, pal);
459
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
460
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
461
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
462
            cb++;
463
            cr++;
464
            lum++;
465
            p += BPP;
466
        }
467
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
468
            YUVA_IN(y, u, v, a, p, pal);
469
            u1 = u;
470
            v1 = v;
471
            a1 = a;
472
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473

    
474
            YUVA_IN(y, u, v, a, p + BPP, pal);
475
            u1 += u;
476
            v1 += v;
477
            a1 += a;
478
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
479
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
480
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
481
            cb++;
482
            cr++;
483
            p += 2 * BPP;
484
            lum += 2;
485
        }
486
        if (w) {
487
            YUVA_IN(y, u, v, a, p, pal);
488
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
489
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
490
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
491
        }
492
        p += wrap3 + (wrap3 - dstw * BPP);
493
        lum += wrap + (wrap - dstw - dstx);
494
        cb += dst->linesize[1] - width2 - skip2;
495
        cr += dst->linesize[2] - width2 - skip2;
496
    }
497
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
498
        lum += dstx;
499
        cb += skip2;
500
        cr += skip2;
501

    
502
        if (dstx & 1) {
503
            YUVA_IN(y, u, v, a, p, pal);
504
            u1 = u;
505
            v1 = v;
506
            a1 = a;
507
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
508
            p += wrap3;
509
            lum += wrap;
510
            YUVA_IN(y, u, v, a, p, pal);
511
            u1 += u;
512
            v1 += v;
513
            a1 += a;
514
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
516
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
517
            cb++;
518
            cr++;
519
            p += -wrap3 + BPP;
520
            lum += -wrap + 1;
521
        }
522
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
523
            YUVA_IN(y, u, v, a, p, pal);
524
            u1 = u;
525
            v1 = v;
526
            a1 = a;
527
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
528

    
529
            YUVA_IN(y, u, v, a, p, pal);
530
            u1 += u;
531
            v1 += v;
532
            a1 += a;
533
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
534
            p += wrap3;
535
            lum += wrap;
536

    
537
            YUVA_IN(y, u, v, a, p, pal);
538
            u1 += u;
539
            v1 += v;
540
            a1 += a;
541
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
542

    
543
            YUVA_IN(y, u, v, a, p, pal);
544
            u1 += u;
545
            v1 += v;
546
            a1 += a;
547
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
548

    
549
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
550
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
551

    
552
            cb++;
553
            cr++;
554
            p += -wrap3 + 2 * BPP;
555
            lum += -wrap + 2;
556
        }
557
        if (w) {
558
            YUVA_IN(y, u, v, a, p, pal);
559
            u1 = u;
560
            v1 = v;
561
            a1 = a;
562
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
563
            p += wrap3;
564
            lum += wrap;
565
            YUVA_IN(y, u, v, a, p, pal);
566
            u1 += u;
567
            v1 += v;
568
            a1 += a;
569
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
571
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
572
            cb++;
573
            cr++;
574
            p += -wrap3 + BPP;
575
            lum += -wrap + 1;
576
        }
577
        p += wrap3 + (wrap3 - dstw * BPP);
578
        lum += wrap + (wrap - dstw - dstx);
579
        cb += dst->linesize[1] - width2 - skip2;
580
        cr += dst->linesize[2] - width2 - skip2;
581
    }
582
    /* handle odd height */
583
    if (h) {
584
        lum += dstx;
585
        cb += skip2;
586
        cr += skip2;
587

    
588
        if (dstx & 1) {
589
            YUVA_IN(y, u, v, a, p, pal);
590
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
592
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
593
            cb++;
594
            cr++;
595
            lum++;
596
            p += BPP;
597
        }
598
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
599
            YUVA_IN(y, u, v, a, p, pal);
600
            u1 = u;
601
            v1 = v;
602
            a1 = a;
603
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
604

    
605
            YUVA_IN(y, u, v, a, p + BPP, pal);
606
            u1 += u;
607
            v1 += v;
608
            a1 += a;
609
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
610
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
611
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
612
            cb++;
613
            cr++;
614
            p += 2 * BPP;
615
            lum += 2;
616
        }
617
        if (w) {
618
            YUVA_IN(y, u, v, a, p, pal);
619
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
620
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
621
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
622
        }
623
    }
624
}
625

    
626
static void free_subpicture(SubPicture *sp)
627
{
628
    int i;
629

    
630
    for (i = 0; i < sp->sub.num_rects; i++)
631
    {
632
        av_free(sp->sub.rects[i].bitmap);
633
        av_free(sp->sub.rects[i].rgba_palette);
634
    }
635

    
636
    av_free(sp->sub.rects);
637

    
638
    memset(&sp->sub, 0, sizeof(AVSubtitle));
639
}
640

    
641
static void video_image_display(VideoState *is)
642
{
643
    VideoPicture *vp;
644
    SubPicture *sp;
645
    AVPicture pict;
646
    float aspect_ratio;
647
    int width, height, x, y;
648
    SDL_Rect rect;
649
    int i;
650

    
651
    vp = &is->pictq[is->pictq_rindex];
652
    if (vp->bmp) {
653
        /* XXX: use variable in the frame */
654
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
655
            aspect_ratio = 0;
656
        else
657
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
658
                * is->video_st->codec->width / is->video_st->codec->height;;
659
        if (aspect_ratio <= 0.0)
660
            aspect_ratio = (float)is->video_st->codec->width /
661
                (float)is->video_st->codec->height;
662
        /* if an active format is indicated, then it overrides the
663
           mpeg format */
664
#if 0
665
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
666
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
667
            printf("dtg_active_format=%d\n", is->dtg_active_format);
668
        }
669
#endif
670
#if 0
671
        switch(is->video_st->codec->dtg_active_format) {
672
        case FF_DTG_AFD_SAME:
673
        default:
674
            /* nothing to do */
675
            break;
676
        case FF_DTG_AFD_4_3:
677
            aspect_ratio = 4.0 / 3.0;
678
            break;
679
        case FF_DTG_AFD_16_9:
680
            aspect_ratio = 16.0 / 9.0;
681
            break;
682
        case FF_DTG_AFD_14_9:
683
            aspect_ratio = 14.0 / 9.0;
684
            break;
685
        case FF_DTG_AFD_4_3_SP_14_9:
686
            aspect_ratio = 14.0 / 9.0;
687
            break;
688
        case FF_DTG_AFD_16_9_SP_14_9:
689
            aspect_ratio = 14.0 / 9.0;
690
            break;
691
        case FF_DTG_AFD_SP_4_3:
692
            aspect_ratio = 4.0 / 3.0;
693
            break;
694
        }
695
#endif
696

    
697
        if (is->subtitle_st)
698
        {
699
            if (is->subpq_size > 0)
700
            {
701
                sp = &is->subpq[is->subpq_rindex];
702

    
703
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
704
                {
705
                    SDL_LockYUVOverlay (vp->bmp);
706

    
707
                    pict.data[0] = vp->bmp->pixels[0];
708
                    pict.data[1] = vp->bmp->pixels[2];
709
                    pict.data[2] = vp->bmp->pixels[1];
710

    
711
                    pict.linesize[0] = vp->bmp->pitches[0];
712
                    pict.linesize[1] = vp->bmp->pitches[2];
713
                    pict.linesize[2] = vp->bmp->pitches[1];
714

    
715
                    for (i = 0; i < sp->sub.num_rects; i++)
716
                        blend_subrect(&pict, &sp->sub.rects[i],
717
                                      vp->bmp->w, vp->bmp->h);
718

    
719
                    SDL_UnlockYUVOverlay (vp->bmp);
720
                }
721
            }
722
        }
723

    
724

    
725
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
726
        height = is->height;
727
        width = ((int)rint(height * aspect_ratio)) & -3;
728
        if (width > is->width) {
729
            width = is->width;
730
            height = ((int)rint(width / aspect_ratio)) & -3;
731
        }
732
        x = (is->width - width) / 2;
733
        y = (is->height - height) / 2;
734
        if (!is->no_background) {
735
            /* fill the background */
736
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
737
        } else {
738
            is->no_background = 0;
739
        }
740
        rect.x = is->xleft + x;
741
        rect.y = is->ytop  + y;
742
        rect.w = width;
743
        rect.h = height;
744
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
745
    } else {
746
#if 0
747
        fill_rectangle(screen,
748
                       is->xleft, is->ytop, is->width, is->height,
749
                       QERGB(0x00, 0x00, 0x00));
750
#endif
751
    }
752
}
753

    
754
static inline int compute_mod(int a, int b)
755
{
756
    a = a % b;
757
    if (a >= 0)
758
        return a;
759
    else
760
        return a + b;
761
}
762

    
763
static void video_audio_display(VideoState *s)
764
{
765
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
766
    int ch, channels, h, h2, bgcolor, fgcolor;
767
    int16_t time_diff;
768

    
769
    /* compute display index : center on currently output samples */
770
    channels = s->audio_st->codec->channels;
771
    nb_display_channels = channels;
772
    if (!s->paused) {
773
        n = 2 * channels;
774
        delay = audio_write_get_buf_size(s);
775
        delay /= n;
776

    
777
        /* to be more precise, we take into account the time spent since
778
           the last buffer computation */
779
        if (audio_callback_time) {
780
            time_diff = av_gettime() - audio_callback_time;
781
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
782
        }
783

    
784
        delay -= s->width / 2;
785
        if (delay < s->width)
786
            delay = s->width;
787

    
788
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
789

    
790
        h= INT_MIN;
791
        for(i=0; i<1000; i+=channels){
792
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
793
            int a= s->sample_array[idx];
794
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
795
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
796
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
797
            int score= a-d;
798
            if(h<score && (b^c)<0){
799
                h= score;
800
                i_start= idx;
801
            }
802
        }
803

    
804
        s->last_i_start = i_start;
805
    } else {
806
        i_start = s->last_i_start;
807
    }
808

    
809
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
810
    fill_rectangle(screen,
811
                   s->xleft, s->ytop, s->width, s->height,
812
                   bgcolor);
813

    
814
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
815

    
816
    /* total height for one channel */
817
    h = s->height / nb_display_channels;
818
    /* graph height / 2 */
819
    h2 = (h * 9) / 20;
820
    for(ch = 0;ch < nb_display_channels; ch++) {
821
        i = i_start + ch;
822
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
823
        for(x = 0; x < s->width; x++) {
824
            y = (s->sample_array[i] * h2) >> 15;
825
            if (y < 0) {
826
                y = -y;
827
                ys = y1 - y;
828
            } else {
829
                ys = y1;
830
            }
831
            fill_rectangle(screen,
832
                           s->xleft + x, ys, 1, y,
833
                           fgcolor);
834
            i += channels;
835
            if (i >= SAMPLE_ARRAY_SIZE)
836
                i -= SAMPLE_ARRAY_SIZE;
837
        }
838
    }
839

    
840
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
841

    
842
    for(ch = 1;ch < nb_display_channels; ch++) {
843
        y = s->ytop + ch * h;
844
        fill_rectangle(screen,
845
                       s->xleft, y, s->width, 1,
846
                       fgcolor);
847
    }
848
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
849
}
850

    
851
static int video_open(VideoState *is){
852
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
853
    int w,h;
854

    
855
    if(is_full_screen) flags |= SDL_FULLSCREEN;
856
    else               flags |= SDL_RESIZABLE;
857

    
858
    if (is_full_screen && fs_screen_width) {
859
        w = fs_screen_width;
860
        h = fs_screen_height;
861
    } else if(!is_full_screen && screen_width){
862
        w = screen_width;
863
        h = screen_height;
864
    }else if (is->video_st && is->video_st->codec->width){
865
        w = is->video_st->codec->width;
866
        h = is->video_st->codec->height;
867
    } else {
868
        w = 640;
869
        h = 480;
870
    }
871
#ifndef __APPLE__
872
    screen = SDL_SetVideoMode(w, h, 0, flags);
873
#else
874
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
875
    screen = SDL_SetVideoMode(w, h, 24, flags);
876
#endif
877
    if (!screen) {
878
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
879
        return -1;
880
    }
881
    SDL_WM_SetCaption("FFplay", "FFplay");
882

    
883
    is->width = screen->w;
884
    is->height = screen->h;
885

    
886
    return 0;
887
}
888

    
889
/* display the current picture, if any */
890
static void video_display(VideoState *is)
891
{
892
    if(!screen)
893
        video_open(cur_stream);
894
    if (is->audio_st && is->show_audio)
895
        video_audio_display(is);
896
    else if (is->video_st)
897
        video_image_display(is);
898
}
899

    
900
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
901
{
902
    SDL_Event event;
903
    event.type = FF_REFRESH_EVENT;
904
    event.user.data1 = opaque;
905
    SDL_PushEvent(&event);
906
    return 0; /* 0 means stop timer */
907
}
908

    
909
/* schedule a video refresh in 'delay' ms */
910
static void schedule_refresh(VideoState *is, int delay)
911
{
912
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
913
}
914

    
915
/* get the current audio clock value */
916
static double get_audio_clock(VideoState *is)
917
{
918
    double pts;
919
    int hw_buf_size, bytes_per_sec;
920
    pts = is->audio_clock;
921
    hw_buf_size = audio_write_get_buf_size(is);
922
    bytes_per_sec = 0;
923
    if (is->audio_st) {
924
        bytes_per_sec = is->audio_st->codec->sample_rate *
925
            2 * is->audio_st->codec->channels;
926
    }
927
    if (bytes_per_sec)
928
        pts -= (double)hw_buf_size / bytes_per_sec;
929
    return pts;
930
}
931

    
932
/* get the current video clock value */
933
static double get_video_clock(VideoState *is)
934
{
935
    double delta;
936
    if (is->paused) {
937
        delta = 0;
938
    } else {
939
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
940
    }
941
    return is->video_current_pts + delta;
942
}
943

    
944
/* get the current external clock value */
945
static double get_external_clock(VideoState *is)
946
{
947
    int64_t ti;
948
    ti = av_gettime();
949
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
950
}
951

    
952
/* get the current master clock value */
953
static double get_master_clock(VideoState *is)
954
{
955
    double val;
956

    
957
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
958
        if (is->video_st)
959
            val = get_video_clock(is);
960
        else
961
            val = get_audio_clock(is);
962
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
963
        if (is->audio_st)
964
            val = get_audio_clock(is);
965
        else
966
            val = get_video_clock(is);
967
    } else {
968
        val = get_external_clock(is);
969
    }
970
    return val;
971
}
972

    
973
/* seek in the stream */
974
static void stream_seek(VideoState *is, int64_t pos, int rel)
975
{
976
    if (!is->seek_req) {
977
        is->seek_pos = pos;
978
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
979
        if (seek_by_bytes)
980
            is->seek_flags |= AVSEEK_FLAG_BYTE;
981
        is->seek_req = 1;
982
    }
983
}
984

    
985
/* pause or resume the video */
986
static void stream_pause(VideoState *is)
987
{
988
    is->paused = !is->paused;
989
    if (!is->paused) {
990
        is->video_current_pts = get_video_clock(is);
991
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
992
    }
993
}
994

    
995
/* called to display each frame */
996
static void video_refresh_timer(void *opaque)
997
{
998
    VideoState *is = opaque;
999
    VideoPicture *vp;
1000
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1001

    
1002
    SubPicture *sp, *sp2;
1003

    
1004
    if (is->video_st) {
1005
        if (is->pictq_size == 0) {
1006
            /* if no picture, need to wait */
1007
            schedule_refresh(is, 1);
1008
        } else {
1009
            /* dequeue the picture */
1010
            vp = &is->pictq[is->pictq_rindex];
1011

    
1012
            /* update current video pts */
1013
            is->video_current_pts = vp->pts;
1014
            is->video_current_pts_time = av_gettime();
1015

    
1016
            /* compute nominal delay */
1017
            delay = vp->pts - is->frame_last_pts;
1018
            if (delay <= 0 || delay >= 2.0) {
1019
                /* if incorrect delay, use previous one */
1020
                delay = is->frame_last_delay;
1021
            }
1022
            is->frame_last_delay = delay;
1023
            is->frame_last_pts = vp->pts;
1024

    
1025
            /* update delay to follow master synchronisation source */
1026
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1027
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1028
                /* if video is slave, we try to correct big delays by
1029
                   duplicating or deleting a frame */
1030
                ref_clock = get_master_clock(is);
1031
                diff = vp->pts - ref_clock;
1032

    
1033
                /* skip or repeat frame. We take into account the
1034
                   delay to compute the threshold. I still don't know
1035
                   if it is the best guess */
1036
                sync_threshold = AV_SYNC_THRESHOLD;
1037
                if (delay > sync_threshold)
1038
                    sync_threshold = delay;
1039
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1040
                    if (diff <= -sync_threshold)
1041
                        delay = 0;
1042
                    else if (diff >= sync_threshold)
1043
                        delay = 2 * delay;
1044
                }
1045
            }
1046

    
1047
            is->frame_timer += delay;
1048
            /* compute the REAL delay (we need to do that to avoid
1049
               long term errors */
1050
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1051
            if (actual_delay < 0.010) {
1052
                /* XXX: should skip picture */
1053
                actual_delay = 0.010;
1054
            }
1055
            /* launch timer for next picture */
1056
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1057

    
1058
#if defined(DEBUG_SYNC)
1059
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1060
                   delay, actual_delay, vp->pts, -diff);
1061
#endif
1062

    
1063
            if(is->subtitle_st) {
1064
                if (is->subtitle_stream_changed) {
1065
                    SDL_LockMutex(is->subpq_mutex);
1066

    
1067
                    while (is->subpq_size) {
1068
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1069

    
1070
                        /* update queue size and signal for next picture */
1071
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1072
                            is->subpq_rindex = 0;
1073

    
1074
                        is->subpq_size--;
1075
                    }
1076
                    is->subtitle_stream_changed = 0;
1077

    
1078
                    SDL_CondSignal(is->subpq_cond);
1079
                    SDL_UnlockMutex(is->subpq_mutex);
1080
                } else {
1081
                    if (is->subpq_size > 0) {
1082
                        sp = &is->subpq[is->subpq_rindex];
1083

    
1084
                        if (is->subpq_size > 1)
1085
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1086
                        else
1087
                            sp2 = NULL;
1088

    
1089
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1090
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1091
                        {
1092
                            free_subpicture(sp);
1093

    
1094
                            /* update queue size and signal for next picture */
1095
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1096
                                is->subpq_rindex = 0;
1097

    
1098
                            SDL_LockMutex(is->subpq_mutex);
1099
                            is->subpq_size--;
1100
                            SDL_CondSignal(is->subpq_cond);
1101
                            SDL_UnlockMutex(is->subpq_mutex);
1102
                        }
1103
                    }
1104
                }
1105
            }
1106

    
1107
            /* display picture */
1108
            video_display(is);
1109

    
1110
            /* update queue size and signal for next picture */
1111
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1112
                is->pictq_rindex = 0;
1113

    
1114
            SDL_LockMutex(is->pictq_mutex);
1115
            is->pictq_size--;
1116
            SDL_CondSignal(is->pictq_cond);
1117
            SDL_UnlockMutex(is->pictq_mutex);
1118
        }
1119
    } else if (is->audio_st) {
1120
        /* draw the next audio frame */
1121

    
1122
        schedule_refresh(is, 40);
1123

    
1124
        /* if only audio stream, then display the audio bars (better
1125
           than nothing, just to test the implementation */
1126

    
1127
        /* display picture */
1128
        video_display(is);
1129
    } else {
1130
        schedule_refresh(is, 100);
1131
    }
1132
    if (show_status) {
1133
        static int64_t last_time;
1134
        int64_t cur_time;
1135
        int aqsize, vqsize, sqsize;
1136
        double av_diff;
1137

    
1138
        cur_time = av_gettime();
1139
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1140
            aqsize = 0;
1141
            vqsize = 0;
1142
            sqsize = 0;
1143
            if (is->audio_st)
1144
                aqsize = is->audioq.size;
1145
            if (is->video_st)
1146
                vqsize = is->videoq.size;
1147
            if (is->subtitle_st)
1148
                sqsize = is->subtitleq.size;
1149
            av_diff = 0;
1150
            if (is->audio_st && is->video_st)
1151
                av_diff = get_audio_clock(is) - get_video_clock(is);
1152
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1153
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1154
            fflush(stdout);
1155
            last_time = cur_time;
1156
        }
1157
    }
1158
}
1159

    
1160
/* allocate a picture (needs to do that in main thread to avoid
1161
   potential locking problems */
1162
static void alloc_picture(void *opaque)
1163
{
1164
    VideoState *is = opaque;
1165
    VideoPicture *vp;
1166

    
1167
    vp = &is->pictq[is->pictq_windex];
1168

    
1169
    if (vp->bmp)
1170
        SDL_FreeYUVOverlay(vp->bmp);
1171

    
1172
#if 0
1173
    /* XXX: use generic function */
1174
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1175
    switch(is->video_st->codec->pix_fmt) {
1176
    case PIX_FMT_YUV420P:
1177
    case PIX_FMT_YUV422P:
1178
    case PIX_FMT_YUV444P:
1179
    case PIX_FMT_YUYV422:
1180
    case PIX_FMT_YUV410P:
1181
    case PIX_FMT_YUV411P:
1182
        is_yuv = 1;
1183
        break;
1184
    default:
1185
        is_yuv = 0;
1186
        break;
1187
    }
1188
#endif
1189
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1190
                                   is->video_st->codec->height,
1191
                                   SDL_YV12_OVERLAY,
1192
                                   screen);
1193
    vp->width = is->video_st->codec->width;
1194
    vp->height = is->video_st->codec->height;
1195

    
1196
    SDL_LockMutex(is->pictq_mutex);
1197
    vp->allocated = 1;
1198
    SDL_CondSignal(is->pictq_cond);
1199
    SDL_UnlockMutex(is->pictq_mutex);
1200
}
1201

    
1202
/**
1203
 *
1204
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1205
 */
1206
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1207
{
1208
    VideoPicture *vp;
1209
    int dst_pix_fmt;
1210
    AVPicture pict;
1211
    static struct SwsContext *img_convert_ctx;
1212

    
1213
    /* wait until we have space to put a new picture */
1214
    SDL_LockMutex(is->pictq_mutex);
1215
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1216
           !is->videoq.abort_request) {
1217
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1218
    }
1219
    SDL_UnlockMutex(is->pictq_mutex);
1220

    
1221
    if (is->videoq.abort_request)
1222
        return -1;
1223

    
1224
    vp = &is->pictq[is->pictq_windex];
1225

    
1226
    /* alloc or resize hardware picture buffer */
1227
    if (!vp->bmp ||
1228
        vp->width != is->video_st->codec->width ||
1229
        vp->height != is->video_st->codec->height) {
1230
        SDL_Event event;
1231

    
1232
        vp->allocated = 0;
1233

    
1234
        /* the allocation must be done in the main thread to avoid
1235
           locking problems */
1236
        event.type = FF_ALLOC_EVENT;
1237
        event.user.data1 = is;
1238
        SDL_PushEvent(&event);
1239

    
1240
        /* wait until the picture is allocated */
1241
        SDL_LockMutex(is->pictq_mutex);
1242
        while (!vp->allocated && !is->videoq.abort_request) {
1243
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1244
        }
1245
        SDL_UnlockMutex(is->pictq_mutex);
1246

    
1247
        if (is->videoq.abort_request)
1248
            return -1;
1249
    }
1250

    
1251
    /* if the frame is not skipped, then display it */
1252
    if (vp->bmp) {
1253
        /* get a pointer on the bitmap */
1254
        SDL_LockYUVOverlay (vp->bmp);
1255

    
1256
        dst_pix_fmt = PIX_FMT_YUV420P;
1257
        pict.data[0] = vp->bmp->pixels[0];
1258
        pict.data[1] = vp->bmp->pixels[2];
1259
        pict.data[2] = vp->bmp->pixels[1];
1260

    
1261
        pict.linesize[0] = vp->bmp->pitches[0];
1262
        pict.linesize[1] = vp->bmp->pitches[2];
1263
        pict.linesize[2] = vp->bmp->pitches[1];
1264
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1265
            is->video_st->codec->width, is->video_st->codec->height,
1266
            is->video_st->codec->pix_fmt,
1267
            is->video_st->codec->width, is->video_st->codec->height,
1268
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1269
        if (img_convert_ctx == NULL) {
1270
            fprintf(stderr, "Cannot initialize the conversion context\n");
1271
            exit(1);
1272
        }
1273
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1274
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1275
        /* update the bitmap content */
1276
        SDL_UnlockYUVOverlay(vp->bmp);
1277

    
1278
        vp->pts = pts;
1279

    
1280
        /* now we can update the picture count */
1281
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1282
            is->pictq_windex = 0;
1283
        SDL_LockMutex(is->pictq_mutex);
1284
        is->pictq_size++;
1285
        SDL_UnlockMutex(is->pictq_mutex);
1286
    }
1287
    return 0;
1288
}
1289

    
1290
/**
1291
 * compute the exact PTS for the picture if it is omitted in the stream
1292
 * @param pts1 the dts of the pkt / pts of the frame
1293
 */
1294
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1295
{
1296
    double frame_delay, pts;
1297

    
1298
    pts = pts1;
1299

    
1300
    if (pts != 0) {
1301
        /* update video clock with pts, if present */
1302
        is->video_clock = pts;
1303
    } else {
1304
        pts = is->video_clock;
1305
    }
1306
    /* update video clock for next frame */
1307
    frame_delay = av_q2d(is->video_st->codec->time_base);
1308
    /* for MPEG2, the frame can be repeated, so we update the
1309
       clock accordingly */
1310
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1311
    is->video_clock += frame_delay;
1312

    
1313
#if defined(DEBUG_SYNC) && 0
1314
    {
1315
        int ftype;
1316
        if (src_frame->pict_type == FF_B_TYPE)
1317
            ftype = 'B';
1318
        else if (src_frame->pict_type == FF_I_TYPE)
1319
            ftype = 'I';
1320
        else
1321
            ftype = 'P';
1322
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1323
               ftype, pts, pts1);
1324
    }
1325
#endif
1326
    return queue_picture(is, src_frame, pts);
1327
}
1328

    
1329
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1330

    
1331
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1332
    int ret= avcodec_default_get_buffer(c, pic);
1333
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1334
    *pts= global_video_pkt_pts;
1335
    pic->opaque= pts;
1336
    return ret;
1337
}
1338

    
1339
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1340
    if(pic) av_freep(&pic->opaque);
1341
    avcodec_default_release_buffer(c, pic);
1342
}
1343

    
1344
static int video_thread(void *arg)
1345
{
1346
    VideoState *is = arg;
1347
    AVPacket pkt1, *pkt = &pkt1;
1348
    int len1, got_picture;
1349
    AVFrame *frame= avcodec_alloc_frame();
1350
    double pts;
1351

    
1352
    for(;;) {
1353
        while (is->paused && !is->videoq.abort_request) {
1354
            SDL_Delay(10);
1355
        }
1356
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1357
            break;
1358

    
1359
        if(pkt->data == flush_pkt.data){
1360
            avcodec_flush_buffers(is->video_st->codec);
1361
            continue;
1362
        }
1363

    
1364
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1365
           this packet, if any */
1366
        global_video_pkt_pts= pkt->pts;
1367
        len1 = avcodec_decode_video(is->video_st->codec,
1368
                                    frame, &got_picture,
1369
                                    pkt->data, pkt->size);
1370

    
1371
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1372
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1373
            pts= *(uint64_t*)frame->opaque;
1374
        else if(pkt->dts != AV_NOPTS_VALUE)
1375
            pts= pkt->dts;
1376
        else
1377
            pts= 0;
1378
        pts *= av_q2d(is->video_st->time_base);
1379

    
1380
//            if (len1 < 0)
1381
//                break;
1382
        if (got_picture) {
1383
            if (output_picture2(is, frame, pts) < 0)
1384
                goto the_end;
1385
        }
1386
        av_free_packet(pkt);
1387
        if (step)
1388
            if (cur_stream)
1389
                stream_pause(cur_stream);
1390
    }
1391
 the_end:
1392
    av_free(frame);
1393
    return 0;
1394
}
1395

    
1396
static int subtitle_thread(void *arg)
1397
{
1398
    VideoState *is = arg;
1399
    SubPicture *sp;
1400
    AVPacket pkt1, *pkt = &pkt1;
1401
    int len1, got_subtitle;
1402
    double pts;
1403
    int i, j;
1404
    int r, g, b, y, u, v, a;
1405

    
1406
    for(;;) {
1407
        while (is->paused && !is->subtitleq.abort_request) {
1408
            SDL_Delay(10);
1409
        }
1410
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1411
            break;
1412

    
1413
        if(pkt->data == flush_pkt.data){
1414
            avcodec_flush_buffers(is->subtitle_st->codec);
1415
            continue;
1416
        }
1417
        SDL_LockMutex(is->subpq_mutex);
1418
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1419
               !is->subtitleq.abort_request) {
1420
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1421
        }
1422
        SDL_UnlockMutex(is->subpq_mutex);
1423

    
1424
        if (is->subtitleq.abort_request)
1425
            goto the_end;
1426

    
1427
        sp = &is->subpq[is->subpq_windex];
1428

    
1429
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1430
           this packet, if any */
1431
        pts = 0;
1432
        if (pkt->pts != AV_NOPTS_VALUE)
1433
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1434

    
1435
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1436
                                    &sp->sub, &got_subtitle,
1437
                                    pkt->data, pkt->size);
1438
//            if (len1 < 0)
1439
//                break;
1440
        if (got_subtitle && sp->sub.format == 0) {
1441
            sp->pts = pts;
1442

    
1443
            for (i = 0; i < sp->sub.num_rects; i++)
1444
            {
1445
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1446
                {
1447
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1448
                    y = RGB_TO_Y_CCIR(r, g, b);
1449
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1450
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1451
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1452
                }
1453
            }
1454

    
1455
            /* now we can update the picture count */
1456
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1457
                is->subpq_windex = 0;
1458
            SDL_LockMutex(is->subpq_mutex);
1459
            is->subpq_size++;
1460
            SDL_UnlockMutex(is->subpq_mutex);
1461
        }
1462
        av_free_packet(pkt);
1463
//        if (step)
1464
//            if (cur_stream)
1465
//                stream_pause(cur_stream);
1466
    }
1467
 the_end:
1468
    return 0;
1469
}
1470

    
1471
/* copy samples for viewing in editor window */
1472
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1473
{
1474
    int size, len, channels;
1475

    
1476
    channels = is->audio_st->codec->channels;
1477

    
1478
    size = samples_size / sizeof(short);
1479
    while (size > 0) {
1480
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1481
        if (len > size)
1482
            len = size;
1483
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1484
        samples += len;
1485
        is->sample_array_index += len;
1486
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1487
            is->sample_array_index = 0;
1488
        size -= len;
1489
    }
1490
}
1491

    
1492
/* return the new audio buffer size (samples can be added or deleted
1493
   to get better sync if video or external master clock) */
1494
static int synchronize_audio(VideoState *is, short *samples,
1495
                             int samples_size1, double pts)
1496
{
1497
    int n, samples_size;
1498
    double ref_clock;
1499

    
1500
    n = 2 * is->audio_st->codec->channels;
1501
    samples_size = samples_size1;
1502

    
1503
    /* if not master, then we try to remove or add samples to correct the clock */
1504
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1505
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1506
        double diff, avg_diff;
1507
        int wanted_size, min_size, max_size, nb_samples;
1508

    
1509
        ref_clock = get_master_clock(is);
1510
        diff = get_audio_clock(is) - ref_clock;
1511

    
1512
        if (diff < AV_NOSYNC_THRESHOLD) {
1513
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1514
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1515
                /* not enough measures to have a correct estimate */
1516
                is->audio_diff_avg_count++;
1517
            } else {
1518
                /* estimate the A-V difference */
1519
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1520

    
1521
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1522
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1523
                    nb_samples = samples_size / n;
1524

    
1525
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1526
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1527
                    if (wanted_size < min_size)
1528
                        wanted_size = min_size;
1529
                    else if (wanted_size > max_size)
1530
                        wanted_size = max_size;
1531

    
1532
                    /* add or remove samples to correction the synchro */
1533
                    if (wanted_size < samples_size) {
1534
                        /* remove samples */
1535
                        samples_size = wanted_size;
1536
                    } else if (wanted_size > samples_size) {
1537
                        uint8_t *samples_end, *q;
1538
                        int nb;
1539

    
1540
                        /* add samples */
1541
                        nb = (samples_size - wanted_size);
1542
                        samples_end = (uint8_t *)samples + samples_size - n;
1543
                        q = samples_end + n;
1544
                        while (nb > 0) {
1545
                            memcpy(q, samples_end, n);
1546
                            q += n;
1547
                            nb -= n;
1548
                        }
1549
                        samples_size = wanted_size;
1550
                    }
1551
                }
1552
#if 0
1553
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1554
                       diff, avg_diff, samples_size - samples_size1,
1555
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1556
#endif
1557
            }
1558
        } else {
1559
            /* too big difference : may be initial PTS errors, so
1560
               reset A-V filter */
1561
            is->audio_diff_avg_count = 0;
1562
            is->audio_diff_cum = 0;
1563
        }
1564
    }
1565

    
1566
    return samples_size;
1567
}
1568

    
1569
/* decode one audio frame and returns its uncompressed size */
1570
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1571
{
1572
    AVPacket *pkt = &is->audio_pkt;
1573
    int n, len1, data_size;
1574
    double pts;
1575

    
1576
    for(;;) {
1577
        /* NOTE: the audio packet can contain several frames */
1578
        while (is->audio_pkt_size > 0) {
1579
            data_size = buf_size;
1580
            len1 = avcodec_decode_audio2(is->audio_st->codec,
1581
                                        (int16_t *)audio_buf, &data_size,
1582
                                        is->audio_pkt_data, is->audio_pkt_size);
1583
            if (len1 < 0) {
1584
                /* if error, we skip the frame */
1585
                is->audio_pkt_size = 0;
1586
                break;
1587
            }
1588

    
1589
            is->audio_pkt_data += len1;
1590
            is->audio_pkt_size -= len1;
1591
            if (data_size <= 0)
1592
                continue;
1593
            /* if no pts, then compute it */
1594
            pts = is->audio_clock;
1595
            *pts_ptr = pts;
1596
            n = 2 * is->audio_st->codec->channels;
1597
            is->audio_clock += (double)data_size /
1598
                (double)(n * is->audio_st->codec->sample_rate);
1599
#if defined(DEBUG_SYNC)
1600
            {
1601
                static double last_clock;
1602
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1603
                       is->audio_clock - last_clock,
1604
                       is->audio_clock, pts);
1605
                last_clock = is->audio_clock;
1606
            }
1607
#endif
1608
            return data_size;
1609
        }
1610

    
1611
        /* free the current packet */
1612
        if (pkt->data)
1613
            av_free_packet(pkt);
1614

    
1615
        if (is->paused || is->audioq.abort_request) {
1616
            return -1;
1617
        }
1618

    
1619
        /* read next packet */
1620
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1621
            return -1;
1622
        if(pkt->data == flush_pkt.data){
1623
            avcodec_flush_buffers(is->audio_st->codec);
1624
            continue;
1625
        }
1626

    
1627
        is->audio_pkt_data = pkt->data;
1628
        is->audio_pkt_size = pkt->size;
1629

    
1630
        /* if update the audio clock with the pts */
1631
        if (pkt->pts != AV_NOPTS_VALUE) {
1632
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1633
        }
1634
    }
1635
}
1636

    
1637
/* get the current audio output buffer size, in samples. With SDL, we
1638
   cannot have a precise information */
1639
static int audio_write_get_buf_size(VideoState *is)
1640
{
1641
    return is->audio_buf_size - is->audio_buf_index;
1642
}
1643

    
1644

    
1645
/* prepare a new audio buffer */
1646
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1647
{
1648
    VideoState *is = opaque;
1649
    int audio_size, len1;
1650
    double pts;
1651

    
1652
    audio_callback_time = av_gettime();
1653

    
1654
    while (len > 0) {
1655
        if (is->audio_buf_index >= is->audio_buf_size) {
1656
           audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1657
           if (audio_size < 0) {
1658
                /* if error, just output silence */
1659
               is->audio_buf_size = 1024;
1660
               memset(is->audio_buf, 0, is->audio_buf_size);
1661
           } else {
1662
               if (is->show_audio)
1663
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1664
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1665
                                              pts);
1666
               is->audio_buf_size = audio_size;
1667
           }
1668
           is->audio_buf_index = 0;
1669
        }
1670
        len1 = is->audio_buf_size - is->audio_buf_index;
1671
        if (len1 > len)
1672
            len1 = len;
1673
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1674
        len -= len1;
1675
        stream += len1;
1676
        is->audio_buf_index += len1;
1677
    }
1678
}
1679

    
1680
/* open a given stream. Return 0 if OK */
1681
static int stream_component_open(VideoState *is, int stream_index)
1682
{
1683
    AVFormatContext *ic = is->ic;
1684
    AVCodecContext *enc;
1685
    AVCodec *codec;
1686
    SDL_AudioSpec wanted_spec, spec;
1687

    
1688
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1689
        return -1;
1690
    enc = ic->streams[stream_index]->codec;
1691

    
1692
    /* prepare audio output */
1693
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1694
        wanted_spec.freq = enc->sample_rate;
1695
        wanted_spec.format = AUDIO_S16SYS;
1696
        /* hack for AC3. XXX: suppress that */
1697
        if (enc->channels > 2)
1698
            enc->channels = 2;
1699
        wanted_spec.channels = enc->channels;
1700
        wanted_spec.silence = 0;
1701
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1702
        wanted_spec.callback = sdl_audio_callback;
1703
        wanted_spec.userdata = is;
1704
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1705
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1706
            return -1;
1707
        }
1708
        is->audio_hw_buf_size = spec.size;
1709
    }
1710

    
1711
    codec = avcodec_find_decoder(enc->codec_id);
1712
    enc->debug_mv = debug_mv;
1713
    enc->debug = debug;
1714
    enc->workaround_bugs = workaround_bugs;
1715
    enc->lowres = lowres;
1716
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1717
    enc->idct_algo= idct;
1718
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1719
    enc->skip_frame= skip_frame;
1720
    enc->skip_idct= skip_idct;
1721
    enc->skip_loop_filter= skip_loop_filter;
1722
    enc->error_resilience= error_resilience;
1723
    enc->error_concealment= error_concealment;
1724
    if (!codec ||
1725
        avcodec_open(enc, codec) < 0)
1726
        return -1;
1727
    if(thread_count>1)
1728
        avcodec_thread_init(enc, thread_count);
1729
    enc->thread_count= thread_count;
1730
    switch(enc->codec_type) {
1731
    case CODEC_TYPE_AUDIO:
1732
        is->audio_stream = stream_index;
1733
        is->audio_st = ic->streams[stream_index];
1734
        is->audio_buf_size = 0;
1735
        is->audio_buf_index = 0;
1736

    
1737
        /* init averaging filter */
1738
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1739
        is->audio_diff_avg_count = 0;
1740
        /* since we do not have a precise anough audio fifo fullness,
1741
           we correct audio sync only if larger than this threshold */
1742
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1743

    
1744
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1745
        packet_queue_init(&is->audioq);
1746
        SDL_PauseAudio(0);
1747
        break;
1748
    case CODEC_TYPE_VIDEO:
1749
        is->video_stream = stream_index;
1750
        is->video_st = ic->streams[stream_index];
1751

    
1752
        is->frame_last_delay = 40e-3;
1753
        is->frame_timer = (double)av_gettime() / 1000000.0;
1754
        is->video_current_pts_time = av_gettime();
1755

    
1756
        packet_queue_init(&is->videoq);
1757
        is->video_tid = SDL_CreateThread(video_thread, is);
1758

    
1759
        enc->    get_buffer=     my_get_buffer;
1760
        enc->release_buffer= my_release_buffer;
1761
        break;
1762
    case CODEC_TYPE_SUBTITLE:
1763
        is->subtitle_stream = stream_index;
1764
        is->subtitle_st = ic->streams[stream_index];
1765
        packet_queue_init(&is->subtitleq);
1766

    
1767
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1768
        break;
1769
    default:
1770
        break;
1771
    }
1772
    return 0;
1773
}
1774

    
1775
static void stream_component_close(VideoState *is, int stream_index)
1776
{
1777
    AVFormatContext *ic = is->ic;
1778
    AVCodecContext *enc;
1779

    
1780
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1781
        return;
1782
    enc = ic->streams[stream_index]->codec;
1783

    
1784
    switch(enc->codec_type) {
1785
    case CODEC_TYPE_AUDIO:
1786
        packet_queue_abort(&is->audioq);
1787

    
1788
        SDL_CloseAudio();
1789

    
1790
        packet_queue_end(&is->audioq);
1791
        break;
1792
    case CODEC_TYPE_VIDEO:
1793
        packet_queue_abort(&is->videoq);
1794

    
1795
        /* note: we also signal this mutex to make sure we deblock the
1796
           video thread in all cases */
1797
        SDL_LockMutex(is->pictq_mutex);
1798
        SDL_CondSignal(is->pictq_cond);
1799
        SDL_UnlockMutex(is->pictq_mutex);
1800

    
1801
        SDL_WaitThread(is->video_tid, NULL);
1802

    
1803
        packet_queue_end(&is->videoq);
1804
        break;
1805
    case CODEC_TYPE_SUBTITLE:
1806
        packet_queue_abort(&is->subtitleq);
1807

    
1808
        /* note: we also signal this mutex to make sure we deblock the
1809
           video thread in all cases */
1810
        SDL_LockMutex(is->subpq_mutex);
1811
        is->subtitle_stream_changed = 1;
1812

    
1813
        SDL_CondSignal(is->subpq_cond);
1814
        SDL_UnlockMutex(is->subpq_mutex);
1815

    
1816
        SDL_WaitThread(is->subtitle_tid, NULL);
1817

    
1818
        packet_queue_end(&is->subtitleq);
1819
        break;
1820
    default:
1821
        break;
1822
    }
1823

    
1824
    avcodec_close(enc);
1825
    switch(enc->codec_type) {
1826
    case CODEC_TYPE_AUDIO:
1827
        is->audio_st = NULL;
1828
        is->audio_stream = -1;
1829
        break;
1830
    case CODEC_TYPE_VIDEO:
1831
        is->video_st = NULL;
1832
        is->video_stream = -1;
1833
        break;
1834
    case CODEC_TYPE_SUBTITLE:
1835
        is->subtitle_st = NULL;
1836
        is->subtitle_stream = -1;
1837
        break;
1838
    default:
1839
        break;
1840
    }
1841
}
1842

    
1843
static void dump_stream_info(const AVFormatContext *s)
1844
{
1845
    if (s->track != 0)
1846
        fprintf(stderr, "Track: %d\n", s->track);
1847
    if (s->title[0] != '\0')
1848
        fprintf(stderr, "Title: %s\n", s->title);
1849
    if (s->author[0] != '\0')
1850
        fprintf(stderr, "Author: %s\n", s->author);
1851
    if (s->copyright[0] != '\0')
1852
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1853
    if (s->comment[0] != '\0')
1854
        fprintf(stderr, "Comment: %s\n", s->comment);
1855
    if (s->album[0] != '\0')
1856
        fprintf(stderr, "Album: %s\n", s->album);
1857
    if (s->year != 0)
1858
        fprintf(stderr, "Year: %d\n", s->year);
1859
    if (s->genre[0] != '\0')
1860
        fprintf(stderr, "Genre: %s\n", s->genre);
1861
}
1862

    
1863
/* since we have only one decoding thread, we can use a global
1864
   variable instead of a thread local variable */
1865
static VideoState *global_video_state;
1866

    
1867
static int decode_interrupt_cb(void)
1868
{
1869
    return (global_video_state && global_video_state->abort_request);
1870
}
1871

    
1872
/* this thread gets the stream from the disk or the network */
1873
static int decode_thread(void *arg)
1874
{
1875
    VideoState *is = arg;
1876
    AVFormatContext *ic;
1877
    int err, i, ret, video_index, audio_index;
1878
    AVPacket pkt1, *pkt = &pkt1;
1879
    AVFormatParameters params, *ap = &params;
1880

    
1881
    video_index = -1;
1882
    audio_index = -1;
1883
    is->video_stream = -1;
1884
    is->audio_stream = -1;
1885
    is->subtitle_stream = -1;
1886

    
1887
    global_video_state = is;
1888
    url_set_interrupt_cb(decode_interrupt_cb);
1889

    
1890
    memset(ap, 0, sizeof(*ap));
1891

    
1892
    ap->width = frame_width;
1893
    ap->height= frame_height;
1894
    ap->time_base= (AVRational){1, 25};
1895
    ap->pix_fmt = frame_pix_fmt;
1896

    
1897
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1898
    if (err < 0) {
1899
        print_error(is->filename, err);
1900
        ret = -1;
1901
        goto fail;
1902
    }
1903
    is->ic = ic;
1904

    
1905
    if(genpts)
1906
        ic->flags |= AVFMT_FLAG_GENPTS;
1907

    
1908
    err = av_find_stream_info(ic);
1909
    if (err < 0) {
1910
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1911
        ret = -1;
1912
        goto fail;
1913
    }
1914
    ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1915

    
1916
    /* if seeking requested, we execute it */
1917
    if (start_time != AV_NOPTS_VALUE) {
1918
        int64_t timestamp;
1919

    
1920
        timestamp = start_time;
1921
        /* add the stream start time */
1922
        if (ic->start_time != AV_NOPTS_VALUE)
1923
            timestamp += ic->start_time;
1924
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1925
        if (ret < 0) {
1926
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1927
                    is->filename, (double)timestamp / AV_TIME_BASE);
1928
        }
1929
    }
1930

    
1931
    for(i = 0; i < ic->nb_streams; i++) {
1932
        AVCodecContext *enc = ic->streams[i]->codec;
1933
        switch(enc->codec_type) {
1934
        case CODEC_TYPE_AUDIO:
1935
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1936
                audio_index = i;
1937
            break;
1938
        case CODEC_TYPE_VIDEO:
1939
            if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1940
                video_index = i;
1941
            break;
1942
        default:
1943
            break;
1944
        }
1945
    }
1946
    if (show_status) {
1947
        dump_format(ic, 0, is->filename, 0);
1948
        dump_stream_info(ic);
1949
    }
1950

    
1951
    /* open the streams */
1952
    if (audio_index >= 0) {
1953
        stream_component_open(is, audio_index);
1954
    }
1955

    
1956
    if (video_index >= 0) {
1957
        stream_component_open(is, video_index);
1958
    } else {
1959
        if (!display_disable)
1960
            is->show_audio = 1;
1961
    }
1962

    
1963
    if (is->video_stream < 0 && is->audio_stream < 0) {
1964
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1965
        ret = -1;
1966
        goto fail;
1967
    }
1968

    
1969
    for(;;) {
1970
        if (is->abort_request)
1971
            break;
1972
        if (is->paused != is->last_paused) {
1973
            is->last_paused = is->paused;
1974
            if (is->paused)
1975
                av_read_pause(ic);
1976
            else
1977
                av_read_play(ic);
1978
        }
1979
#ifdef CONFIG_RTSP_DEMUXER
1980
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1981
            /* wait 10 ms to avoid trying to get another packet */
1982
            /* XXX: horrible */
1983
            SDL_Delay(10);
1984
            continue;
1985
        }
1986
#endif
1987
        if (is->seek_req) {
1988
            int stream_index= -1;
1989
            int64_t seek_target= is->seek_pos;
1990

    
1991
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
1992
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
1993
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1994

    
1995
            if(stream_index>=0){
1996
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
1997
            }
1998

    
1999
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2000
            if (ret < 0) {
2001
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2002
            }else{
2003
                if (is->audio_stream >= 0) {
2004
                    packet_queue_flush(&is->audioq);
2005
                    packet_queue_put(&is->audioq, &flush_pkt);
2006
                }
2007
                if (is->subtitle_stream >= 0) {
2008
                    packet_queue_flush(&is->subtitleq);
2009
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2010
                }
2011
                if (is->video_stream >= 0) {
2012
                    packet_queue_flush(&is->videoq);
2013
                    packet_queue_put(&is->videoq, &flush_pkt);
2014
                }
2015
            }
2016
            is->seek_req = 0;
2017
        }
2018

    
2019
        /* if the queue are full, no need to read more */
2020
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2021
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2022
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2023
            url_feof(&ic->pb)) {
2024
            /* wait 10 ms */
2025
            SDL_Delay(10);
2026
            continue;
2027
        }
2028
        ret = av_read_frame(ic, pkt);
2029
        if (ret < 0) {
2030
            if (url_ferror(&ic->pb) == 0) {
2031
                SDL_Delay(100); /* wait for user event */
2032
                continue;
2033
            } else
2034
                break;
2035
        }
2036
        if (pkt->stream_index == is->audio_stream) {
2037
            packet_queue_put(&is->audioq, pkt);
2038
        } else if (pkt->stream_index == is->video_stream) {
2039
            packet_queue_put(&is->videoq, pkt);
2040
        } else if (pkt->stream_index == is->subtitle_stream) {
2041
            packet_queue_put(&is->subtitleq, pkt);
2042
        } else {
2043
            av_free_packet(pkt);
2044
        }
2045
    }
2046
    /* wait until the end */
2047
    while (!is->abort_request) {
2048
        SDL_Delay(100);
2049
    }
2050

    
2051
    ret = 0;
2052
 fail:
2053
    /* disable interrupting */
2054
    global_video_state = NULL;
2055

    
2056
    /* close each stream */
2057
    if (is->audio_stream >= 0)
2058
        stream_component_close(is, is->audio_stream);
2059
    if (is->video_stream >= 0)
2060
        stream_component_close(is, is->video_stream);
2061
    if (is->subtitle_stream >= 0)
2062
        stream_component_close(is, is->subtitle_stream);
2063
    if (is->ic) {
2064
        av_close_input_file(is->ic);
2065
        is->ic = NULL; /* safety */
2066
    }
2067
    url_set_interrupt_cb(NULL);
2068

    
2069
    if (ret != 0) {
2070
        SDL_Event event;
2071

    
2072
        event.type = FF_QUIT_EVENT;
2073
        event.user.data1 = is;
2074
        SDL_PushEvent(&event);
2075
    }
2076
    return 0;
2077
}
2078

    
2079
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2080
{
2081
    VideoState *is;
2082

    
2083
    is = av_mallocz(sizeof(VideoState));
2084
    if (!is)
2085
        return NULL;
2086
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2087
    is->iformat = iformat;
2088
    is->ytop = 0;
2089
    is->xleft = 0;
2090

    
2091
    /* start video display */
2092
    is->pictq_mutex = SDL_CreateMutex();
2093
    is->pictq_cond = SDL_CreateCond();
2094

    
2095
    is->subpq_mutex = SDL_CreateMutex();
2096
    is->subpq_cond = SDL_CreateCond();
2097

    
2098
    /* add the refresh timer to draw the picture */
2099
    schedule_refresh(is, 40);
2100

    
2101
    is->av_sync_type = av_sync_type;
2102
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2103
    if (!is->parse_tid) {
2104
        av_free(is);
2105
        return NULL;
2106
    }
2107
    return is;
2108
}
2109

    
2110
static void stream_close(VideoState *is)
2111
{
2112
    VideoPicture *vp;
2113
    int i;
2114
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2115
    is->abort_request = 1;
2116
    SDL_WaitThread(is->parse_tid, NULL);
2117

    
2118
    /* free all pictures */
2119
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2120
        vp = &is->pictq[i];
2121
        if (vp->bmp) {
2122
            SDL_FreeYUVOverlay(vp->bmp);
2123
            vp->bmp = NULL;
2124
        }
2125
    }
2126
    SDL_DestroyMutex(is->pictq_mutex);
2127
    SDL_DestroyCond(is->pictq_cond);
2128
    SDL_DestroyMutex(is->subpq_mutex);
2129
    SDL_DestroyCond(is->subpq_cond);
2130
}
2131

    
2132
static void stream_cycle_channel(VideoState *is, int codec_type)
2133
{
2134
    AVFormatContext *ic = is->ic;
2135
    int start_index, stream_index;
2136
    AVStream *st;
2137

    
2138
    if (codec_type == CODEC_TYPE_VIDEO)
2139
        start_index = is->video_stream;
2140
    else if (codec_type == CODEC_TYPE_AUDIO)
2141
        start_index = is->audio_stream;
2142
    else
2143
        start_index = is->subtitle_stream;
2144
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2145
        return;
2146
    stream_index = start_index;
2147
    for(;;) {
2148
        if (++stream_index >= is->ic->nb_streams)
2149
        {
2150
            if (codec_type == CODEC_TYPE_SUBTITLE)
2151
            {
2152
                stream_index = -1;
2153
                goto the_end;
2154
            } else
2155
                stream_index = 0;
2156
        }
2157
        if (stream_index == start_index)
2158
            return;
2159
        st = ic->streams[stream_index];
2160
        if (st->codec->codec_type == codec_type) {
2161
            /* check that parameters are OK */
2162
            switch(codec_type) {
2163
            case CODEC_TYPE_AUDIO:
2164
                if (st->codec->sample_rate != 0 &&
2165
                    st->codec->channels != 0)
2166
                    goto the_end;
2167
                break;
2168
            case CODEC_TYPE_VIDEO:
2169
            case CODEC_TYPE_SUBTITLE:
2170
                goto the_end;
2171
            default:
2172
                break;
2173
            }
2174
        }
2175
    }
2176
 the_end:
2177
    stream_component_close(is, start_index);
2178
    stream_component_open(is, stream_index);
2179
}
2180

    
2181

    
2182
static void toggle_full_screen(void)
2183
{
2184
    is_full_screen = !is_full_screen;
2185
    if (!fs_screen_width) {
2186
        /* use default SDL method */
2187
//        SDL_WM_ToggleFullScreen(screen);
2188
    }
2189
    video_open(cur_stream);
2190
}
2191

    
2192
static void toggle_pause(void)
2193
{
2194
    if (cur_stream)
2195
        stream_pause(cur_stream);
2196
    step = 0;
2197
}
2198

    
2199
static void step_to_next_frame(void)
2200
{
2201
    if (cur_stream) {
2202
        /* if the stream is paused unpause it, then step */
2203
        if (cur_stream->paused)
2204
            stream_pause(cur_stream);
2205
    }
2206
    step = 1;
2207
}
2208

    
2209
static void do_exit(void)
2210
{
2211
    if (cur_stream) {
2212
        stream_close(cur_stream);
2213
        cur_stream = NULL;
2214
    }
2215
    if (show_status)
2216
        printf("\n");
2217
    SDL_Quit();
2218
    exit(0);
2219
}
2220

    
2221
static void toggle_audio_display(void)
2222
{
2223
    if (cur_stream) {
2224
        cur_stream->show_audio = !cur_stream->show_audio;
2225
    }
2226
}
2227

    
2228
/* handle an event sent by the GUI */
2229
static void event_loop(void)
2230
{
2231
    SDL_Event event;
2232
    double incr, pos, frac;
2233

    
2234
    for(;;) {
2235
        SDL_WaitEvent(&event);
2236
        switch(event.type) {
2237
        case SDL_KEYDOWN:
2238
            switch(event.key.keysym.sym) {
2239
            case SDLK_ESCAPE:
2240
            case SDLK_q:
2241
                do_exit();
2242
                break;
2243
            case SDLK_f:
2244
                toggle_full_screen();
2245
                break;
2246
            case SDLK_p:
2247
            case SDLK_SPACE:
2248
                toggle_pause();
2249
                break;
2250
            case SDLK_s: //S: Step to next frame
2251
                step_to_next_frame();
2252
                break;
2253
            case SDLK_a:
2254
                if (cur_stream)
2255
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2256
                break;
2257
            case SDLK_v:
2258
                if (cur_stream)
2259
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2260
                break;
2261
            case SDLK_t:
2262
                if (cur_stream)
2263
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2264
                break;
2265
            case SDLK_w:
2266
                toggle_audio_display();
2267
                break;
2268
            case SDLK_LEFT:
2269
                incr = -10.0;
2270
                goto do_seek;
2271
            case SDLK_RIGHT:
2272
                incr = 10.0;
2273
                goto do_seek;
2274
            case SDLK_UP:
2275
                incr = 60.0;
2276
                goto do_seek;
2277
            case SDLK_DOWN:
2278
                incr = -60.0;
2279
            do_seek:
2280
                if (cur_stream) {
2281
                    if (seek_by_bytes) {
2282
                        pos = url_ftell(&cur_stream->ic->pb);
2283
                        if (cur_stream->ic->bit_rate)
2284
                            incr *= cur_stream->ic->bit_rate / 60.0;
2285
                        else
2286
                            incr *= 180000.0;
2287
                        pos += incr;
2288
                        stream_seek(cur_stream, pos, incr);
2289
                    } else {
2290
                        pos = get_master_clock(cur_stream);
2291
                        pos += incr;
2292
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2293
                    }
2294
                }
2295
                break;
2296
            default:
2297
                break;
2298
            }
2299
            break;
2300
        case SDL_MOUSEBUTTONDOWN:
2301
            if (cur_stream) {
2302
                int ns, hh, mm, ss;
2303
                int tns, thh, tmm, tss;
2304
                tns = cur_stream->ic->duration/1000000LL;
2305
                thh = tns/3600;
2306
                tmm = (tns%3600)/60;
2307
                tss = (tns%60);
2308
                frac = (double)event.button.x/(double)cur_stream->width;
2309
                ns = frac*tns;
2310
                hh = ns/3600;
2311
                mm = (ns%3600)/60;
2312
                ss = (ns%60);
2313
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2314
                        hh, mm, ss, thh, tmm, tss);
2315
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2316
            }
2317
            break;
2318
        case SDL_VIDEORESIZE:
2319
            if (cur_stream) {
2320
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2321
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2322
                screen_width = cur_stream->width = event.resize.w;
2323
                screen_height= cur_stream->height= event.resize.h;
2324
            }
2325
            break;
2326
        case SDL_QUIT:
2327
        case FF_QUIT_EVENT:
2328
            do_exit();
2329
            break;
2330
        case FF_ALLOC_EVENT:
2331
            video_open(event.user.data1);
2332
            alloc_picture(event.user.data1);
2333
            break;
2334
        case FF_REFRESH_EVENT:
2335
            video_refresh_timer(event.user.data1);
2336
            break;
2337
        default:
2338
            break;
2339
        }
2340
    }
2341
}
2342

    
2343
static void opt_frame_size(const char *arg)
2344
{
2345
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2346
        fprintf(stderr, "Incorrect frame size\n");
2347
        exit(1);
2348
    }
2349
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2350
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2351
        exit(1);
2352
    }
2353
}
2354

    
2355
static void opt_width(const char *arg)
2356
{
2357
    screen_width = atoi(arg);
2358
    if(screen_width<=0){
2359
        fprintf(stderr, "invalid width\n");
2360
        exit(1);
2361
    }
2362
}
2363

    
2364
static void opt_height(const char *arg)
2365
{
2366
    screen_height = atoi(arg);
2367
    if(screen_height<=0){
2368
        fprintf(stderr, "invalid height\n");
2369
        exit(1);
2370
    }
2371
}
2372

    
2373
static void opt_format(const char *arg)
2374
{
2375
    file_iformat = av_find_input_format(arg);
2376
    if (!file_iformat) {
2377
        fprintf(stderr, "Unknown input format: %s\n", arg);
2378
        exit(1);
2379
    }
2380
}
2381

    
2382
static void opt_frame_pix_fmt(const char *arg)
2383
{
2384
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2385
}
2386

    
2387
#ifdef CONFIG_RTSP_DEMUXER
2388
static void opt_rtp_tcp(void)
2389
{
2390
    /* only tcp protocol */
2391
    rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2392
}
2393
#endif
2394

    
2395
static void opt_sync(const char *arg)
2396
{
2397
    if (!strcmp(arg, "audio"))
2398
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2399
    else if (!strcmp(arg, "video"))
2400
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2401
    else if (!strcmp(arg, "ext"))
2402
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2403
    else {
2404
        show_help();
2405
        exit(1);
2406
    }
2407
}
2408

    
2409
static void opt_seek(const char *arg)
2410
{
2411
    start_time = parse_date(arg, 1);
2412
    if (start_time == INT64_MIN) {
2413
        fprintf(stderr, "Invalid duration specification: %s\n", arg);
2414
        exit(1);
2415
    }
2416
}
2417

    
2418
static void opt_debug(const char *arg)
2419
{
2420
    av_log_level = 99;
2421
    debug = atoi(arg);
2422
}
2423

    
2424
static void opt_vismv(const char *arg)
2425
{
2426
    debug_mv = atoi(arg);
2427
}
2428

    
2429
static void opt_thread_count(const char *arg)
2430
{
2431
    thread_count= atoi(arg);
2432
#if !defined(HAVE_THREADS)
2433
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2434
#endif
2435
}
2436

    
2437
static void opt_show_help(void)
2438
{
2439
    show_help();
2440
    exit(0);
2441
}
2442

    
2443
const OptionDef options[] = {
2444
    { "h", 0, {(void*)opt_show_help}, "show help" },
2445
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2446
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2447
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2448
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2449
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2450
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2451
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2452
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2453
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2454
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2455
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2456
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2457
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2458
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2459
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2460
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2461
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2462
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2463
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2464
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2465
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2466
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2467
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2468
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2469
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2470
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2471
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2472
#ifdef CONFIG_RTSP_DEMUXER
2473
    { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2474
#endif
2475
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2476
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2477
    { NULL, },
2478
};
2479

    
2480
void show_help(void)
2481
{
2482
    printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2483
           "usage: ffplay [options] input_file\n"
2484
           "Simple media player\n");
2485
    printf("\n");
2486
    show_help_options(options, "Main options:\n",
2487
                      OPT_EXPERT, 0);
2488
    show_help_options(options, "\nAdvanced options:\n",
2489
                      OPT_EXPERT, OPT_EXPERT);
2490
    printf("\nWhile playing:\n"
2491
           "q, ESC              quit\n"
2492
           "f                   toggle full screen\n"
2493
           "p, SPC              pause\n"
2494
           "a                   cycle audio channel\n"
2495
           "v                   cycle video channel\n"
2496
           "t                   cycle subtitle channel\n"
2497
           "w                   show audio waves\n"
2498
           "left/right          seek backward/forward 10 seconds\n"
2499
           "down/up             seek backward/forward 1 minute\n"
2500
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2501
           );
2502
}
2503

    
2504
void opt_input_file(const char *filename)
2505
{
2506
    if (!strcmp(filename, "-"))
2507
        filename = "pipe:";
2508
    input_filename = filename;
2509
}
2510

    
2511
/* Called from the main */
2512
int main(int argc, char **argv)
2513
{
2514
    int flags;
2515

    
2516
    /* register all codecs, demux and protocols */
2517
    av_register_all();
2518

    
2519
    show_banner(program_name, program_birth_year);
2520

    
2521
    parse_options(argc, argv, options, opt_input_file);
2522

    
2523
    if (!input_filename) {
2524
        show_help();
2525
        exit(1);
2526
    }
2527

    
2528
    if (display_disable) {
2529
        video_disable = 1;
2530
    }
2531
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2532
#if !defined(__MINGW32__) && !defined(__APPLE__)
2533
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2534
#endif
2535
    if (SDL_Init (flags)) {
2536
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2537
        exit(1);
2538
    }
2539

    
2540
    if (!display_disable) {
2541
#ifdef HAVE_SDL_VIDEO_SIZE
2542
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2543
        fs_screen_width = vi->current_w;
2544
        fs_screen_height = vi->current_h;
2545
#endif
2546
    }
2547

    
2548
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2549
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2550
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2551
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2552

    
2553
    av_init_packet(&flush_pkt);
2554
    flush_pkt.data= "FLUSH";
2555

    
2556
    cur_stream = stream_open(input_filename, file_iformat);
2557

    
2558
    event_loop();
2559

    
2560
    /* never returns */
2561

    
2562
    return 0;
2563
}