Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ b156b88c

History | View | Annotate | Download (76.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "avformat.h"
25
#include "swscale.h"
26
#include "avstring.h"
27

    
28
#include "version.h"
29
#include "cmdutils.h"
30

    
31
#include <SDL.h>
32
#include <SDL_thread.h>
33

    
34
#ifdef __MINGW32__
35
#undef main /* We don't want SDL to override our main() */
36
#endif
37

    
38
#undef exit
39

    
40
//#define DEBUG_SYNC
41

    
42
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
43
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
44
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
45

    
46
/* SDL audio buffer size, in samples. Should be small to have precise
47
   A/V sync as SDL does not have hardware buffer fullness info. */
48
#define SDL_AUDIO_BUFFER_SIZE 1024
49

    
50
/* no AV sync correction is done if below the AV sync threshold */
51
#define AV_SYNC_THRESHOLD 0.01
52
/* no AV correction is done if too big error */
53
#define AV_NOSYNC_THRESHOLD 10.0
54

    
55
/* maximum audio speed change to get correct sync */
56
#define SAMPLE_CORRECTION_PERCENT_MAX 10
57

    
58
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
59
#define AUDIO_DIFF_AVG_NB   20
60

    
61
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
62
#define SAMPLE_ARRAY_SIZE (2*65536)
63

    
64
static int sws_flags = SWS_BICUBIC;
65

    
66
typedef struct PacketQueue {
67
    AVPacketList *first_pkt, *last_pkt;
68
    int nb_packets;
69
    int size;
70
    int abort_request;
71
    SDL_mutex *mutex;
72
    SDL_cond *cond;
73
} PacketQueue;
74

    
75
#define VIDEO_PICTURE_QUEUE_SIZE 1
76
#define SUBPICTURE_QUEUE_SIZE 4
77

    
78
typedef struct VideoPicture {
79
    double pts;                                  ///<presentation time stamp for this picture
80
    SDL_Overlay *bmp;
81
    int width, height; /* source height & width */
82
    int allocated;
83
} VideoPicture;
84

    
85
typedef struct SubPicture {
86
    double pts; /* presentation time stamp for this picture */
87
    AVSubtitle sub;
88
} SubPicture;
89

    
90
enum {
91
    AV_SYNC_AUDIO_MASTER, /* default choice */
92
    AV_SYNC_VIDEO_MASTER,
93
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
94
};
95

    
96
typedef struct VideoState {
97
    SDL_Thread *parse_tid;
98
    SDL_Thread *video_tid;
99
    AVInputFormat *iformat;
100
    int no_background;
101
    int abort_request;
102
    int paused;
103
    int last_paused;
104
    int seek_req;
105
    int seek_flags;
106
    int64_t seek_pos;
107
    AVFormatContext *ic;
108
    int dtg_active_format;
109

    
110
    int audio_stream;
111

    
112
    int av_sync_type;
113
    double external_clock; /* external clock base */
114
    int64_t external_clock_time;
115

    
116
    double audio_clock;
117
    double audio_diff_cum; /* used for AV difference average computation */
118
    double audio_diff_avg_coef;
119
    double audio_diff_threshold;
120
    int audio_diff_avg_count;
121
    AVStream *audio_st;
122
    PacketQueue audioq;
123
    int audio_hw_buf_size;
124
    /* samples output by the codec. we reserve more space for avsync
125
       compensation */
126
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
127
    unsigned int audio_buf_size; /* in bytes */
128
    int audio_buf_index; /* in bytes */
129
    AVPacket audio_pkt;
130
    uint8_t *audio_pkt_data;
131
    int audio_pkt_size;
132

    
133
    int show_audio; /* if true, display audio samples */
134
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
135
    int sample_array_index;
136
    int last_i_start;
137

    
138
    SDL_Thread *subtitle_tid;
139
    int subtitle_stream;
140
    int subtitle_stream_changed;
141
    AVStream *subtitle_st;
142
    PacketQueue subtitleq;
143
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
144
    int subpq_size, subpq_rindex, subpq_windex;
145
    SDL_mutex *subpq_mutex;
146
    SDL_cond *subpq_cond;
147

    
148
    double frame_timer;
149
    double frame_last_pts;
150
    double frame_last_delay;
151
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
152
    int video_stream;
153
    AVStream *video_st;
154
    PacketQueue videoq;
155
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
156
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
157
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
158
    int pictq_size, pictq_rindex, pictq_windex;
159
    SDL_mutex *pictq_mutex;
160
    SDL_cond *pictq_cond;
161

    
162
    //    QETimer *video_timer;
163
    char filename[1024];
164
    int width, height, xleft, ytop;
165
} VideoState;
166

    
167
void show_help(void);
168
static int audio_write_get_buf_size(VideoState *is);
169

    
170
/* options specified by the user */
171
static AVInputFormat *file_iformat;
172
static const char *input_filename;
173
static int fs_screen_width;
174
static int fs_screen_height;
175
static int screen_width = 0;
176
static int screen_height = 0;
177
static int frame_width = 0;
178
static int frame_height = 0;
179
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
180
static int audio_disable;
181
static int video_disable;
182
static int wanted_audio_stream= 0;
183
static int wanted_video_stream= 0;
184
static int seek_by_bytes;
185
static int display_disable;
186
static int show_status;
187
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
188
static int64_t start_time = AV_NOPTS_VALUE;
189
static int debug = 0;
190
static int debug_mv = 0;
191
static int step = 0;
192
static int thread_count = 1;
193
static int workaround_bugs = 1;
194
static int fast = 0;
195
static int genpts = 0;
196
static int lowres = 0;
197
static int idct = FF_IDCT_AUTO;
198
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
199
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
200
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
201
static int error_resilience = FF_ER_CAREFUL;
202
static int error_concealment = 3;
203
static int decoder_reorder_pts= 0;
204

    
205
/* current context */
206
static int is_full_screen;
207
static VideoState *cur_stream;
208
static int64_t audio_callback_time;
209

    
210
AVPacket flush_pkt;
211

    
212
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
213
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
214
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
215

    
216
SDL_Surface *screen;
217

    
218
/* packet queue handling */
219
static void packet_queue_init(PacketQueue *q)
220
{
221
    memset(q, 0, sizeof(PacketQueue));
222
    q->mutex = SDL_CreateMutex();
223
    q->cond = SDL_CreateCond();
224
}
225

    
226
static void packet_queue_flush(PacketQueue *q)
227
{
228
    AVPacketList *pkt, *pkt1;
229

    
230
    SDL_LockMutex(q->mutex);
231
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
232
        pkt1 = pkt->next;
233
        av_free_packet(&pkt->pkt);
234
        av_freep(&pkt);
235
    }
236
    q->last_pkt = NULL;
237
    q->first_pkt = NULL;
238
    q->nb_packets = 0;
239
    q->size = 0;
240
    SDL_UnlockMutex(q->mutex);
241
}
242

    
243
static void packet_queue_end(PacketQueue *q)
244
{
245
    packet_queue_flush(q);
246
    SDL_DestroyMutex(q->mutex);
247
    SDL_DestroyCond(q->cond);
248
}
249

    
250
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
251
{
252
    AVPacketList *pkt1;
253

    
254
    /* duplicate the packet */
255
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
256
        return -1;
257

    
258
    pkt1 = av_malloc(sizeof(AVPacketList));
259
    if (!pkt1)
260
        return -1;
261
    pkt1->pkt = *pkt;
262
    pkt1->next = NULL;
263

    
264

    
265
    SDL_LockMutex(q->mutex);
266

    
267
    if (!q->last_pkt)
268

    
269
        q->first_pkt = pkt1;
270
    else
271
        q->last_pkt->next = pkt1;
272
    q->last_pkt = pkt1;
273
    q->nb_packets++;
274
    q->size += pkt1->pkt.size;
275
    /* XXX: should duplicate packet data in DV case */
276
    SDL_CondSignal(q->cond);
277

    
278
    SDL_UnlockMutex(q->mutex);
279
    return 0;
280
}
281

    
282
static void packet_queue_abort(PacketQueue *q)
283
{
284
    SDL_LockMutex(q->mutex);
285

    
286
    q->abort_request = 1;
287

    
288
    SDL_CondSignal(q->cond);
289

    
290
    SDL_UnlockMutex(q->mutex);
291
}
292

    
293
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
294
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
295
{
296
    AVPacketList *pkt1;
297
    int ret;
298

    
299
    SDL_LockMutex(q->mutex);
300

    
301
    for(;;) {
302
        if (q->abort_request) {
303
            ret = -1;
304
            break;
305
        }
306

    
307
        pkt1 = q->first_pkt;
308
        if (pkt1) {
309
            q->first_pkt = pkt1->next;
310
            if (!q->first_pkt)
311
                q->last_pkt = NULL;
312
            q->nb_packets--;
313
            q->size -= pkt1->pkt.size;
314
            *pkt = pkt1->pkt;
315
            av_free(pkt1);
316
            ret = 1;
317
            break;
318
        } else if (!block) {
319
            ret = 0;
320
            break;
321
        } else {
322
            SDL_CondWait(q->cond, q->mutex);
323
        }
324
    }
325
    SDL_UnlockMutex(q->mutex);
326
    return ret;
327
}
328

    
329
static inline void fill_rectangle(SDL_Surface *screen,
330
                                  int x, int y, int w, int h, int color)
331
{
332
    SDL_Rect rect;
333
    rect.x = x;
334
    rect.y = y;
335
    rect.w = w;
336
    rect.h = h;
337
    SDL_FillRect(screen, &rect, color);
338
}
339

    
340
#if 0
341
/* draw only the border of a rectangle */
342
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
343
{
344
    int w1, w2, h1, h2;
345

346
    /* fill the background */
347
    w1 = x;
348
    if (w1 < 0)
349
        w1 = 0;
350
    w2 = s->width - (x + w);
351
    if (w2 < 0)
352
        w2 = 0;
353
    h1 = y;
354
    if (h1 < 0)
355
        h1 = 0;
356
    h2 = s->height - (y + h);
357
    if (h2 < 0)
358
        h2 = 0;
359
    fill_rectangle(screen,
360
                   s->xleft, s->ytop,
361
                   w1, s->height,
362
                   color);
363
    fill_rectangle(screen,
364
                   s->xleft + s->width - w2, s->ytop,
365
                   w2, s->height,
366
                   color);
367
    fill_rectangle(screen,
368
                   s->xleft + w1, s->ytop,
369
                   s->width - w1 - w2, h1,
370
                   color);
371
    fill_rectangle(screen,
372
                   s->xleft + w1, s->ytop + s->height - h2,
373
                   s->width - w1 - w2, h2,
374
                   color);
375
}
376
#endif
377

    
378

    
379

    
380
#define SCALEBITS 10
381
#define ONE_HALF  (1 << (SCALEBITS - 1))
382
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
383

    
384
#define RGB_TO_Y_CCIR(r, g, b) \
385
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
386
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
387

    
388
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
389
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
390
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
391

    
392
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
393
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
394
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
395

    
396
#define ALPHA_BLEND(a, oldp, newp, s)\
397
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
398

    
399
#define RGBA_IN(r, g, b, a, s)\
400
{\
401
    unsigned int v = ((const uint32_t *)(s))[0];\
402
    a = (v >> 24) & 0xff;\
403
    r = (v >> 16) & 0xff;\
404
    g = (v >> 8) & 0xff;\
405
    b = v & 0xff;\
406
}
407

    
408
#define YUVA_IN(y, u, v, a, s, pal)\
409
{\
410
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
411
    a = (val >> 24) & 0xff;\
412
    y = (val >> 16) & 0xff;\
413
    u = (val >> 8) & 0xff;\
414
    v = val & 0xff;\
415
}
416

    
417
#define YUVA_OUT(d, y, u, v, a)\
418
{\
419
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
420
}
421

    
422

    
423
#define BPP 1
424

    
425
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
426
{
427
    int wrap, wrap3, width2, skip2;
428
    int y, u, v, a, u1, v1, a1, w, h;
429
    uint8_t *lum, *cb, *cr;
430
    const uint8_t *p;
431
    const uint32_t *pal;
432
    int dstx, dsty, dstw, dsth;
433

    
434
    dstx = FFMIN(FFMAX(rect->x, 0), imgw);
435
    dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
436
    dsty = FFMIN(FFMAX(rect->y, 0), imgh);
437
    dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
438
    lum = dst->data[0] + dsty * dst->linesize[0];
439
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
440
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
441

    
442
    width2 = (dstw + 1) >> 1;
443
    skip2 = dstx >> 1;
444
    wrap = dst->linesize[0];
445
    wrap3 = rect->linesize;
446
    p = rect->bitmap;
447
    pal = rect->rgba_palette;  /* Now in YCrCb! */
448

    
449
    if (dsty & 1) {
450
        lum += dstx;
451
        cb += skip2;
452
        cr += skip2;
453

    
454
        if (dstx & 1) {
455
            YUVA_IN(y, u, v, a, p, pal);
456
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
457
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
458
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
459
            cb++;
460
            cr++;
461
            lum++;
462
            p += BPP;
463
        }
464
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
465
            YUVA_IN(y, u, v, a, p, pal);
466
            u1 = u;
467
            v1 = v;
468
            a1 = a;
469
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
470

    
471
            YUVA_IN(y, u, v, a, p + BPP, pal);
472
            u1 += u;
473
            v1 += v;
474
            a1 += a;
475
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
476
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
477
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
478
            cb++;
479
            cr++;
480
            p += 2 * BPP;
481
            lum += 2;
482
        }
483
        if (w) {
484
            YUVA_IN(y, u, v, a, p, pal);
485
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
487
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
488
        }
489
        p += wrap3 + (wrap3 - dstw * BPP);
490
        lum += wrap + (wrap - dstw - dstx);
491
        cb += dst->linesize[1] - width2 - skip2;
492
        cr += dst->linesize[2] - width2 - skip2;
493
    }
494
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
495
        lum += dstx;
496
        cb += skip2;
497
        cr += skip2;
498

    
499
        if (dstx & 1) {
500
            YUVA_IN(y, u, v, a, p, pal);
501
            u1 = u;
502
            v1 = v;
503
            a1 = a;
504
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
505
            p += wrap3;
506
            lum += wrap;
507
            YUVA_IN(y, u, v, a, p, pal);
508
            u1 += u;
509
            v1 += v;
510
            a1 += a;
511
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
513
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
514
            cb++;
515
            cr++;
516
            p += -wrap3 + BPP;
517
            lum += -wrap + 1;
518
        }
519
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
520
            YUVA_IN(y, u, v, a, p, pal);
521
            u1 = u;
522
            v1 = v;
523
            a1 = a;
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525

    
526
            YUVA_IN(y, u, v, a, p, pal);
527
            u1 += u;
528
            v1 += v;
529
            a1 += a;
530
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
531
            p += wrap3;
532
            lum += wrap;
533

    
534
            YUVA_IN(y, u, v, a, p, pal);
535
            u1 += u;
536
            v1 += v;
537
            a1 += a;
538
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
539

    
540
            YUVA_IN(y, u, v, a, p, pal);
541
            u1 += u;
542
            v1 += v;
543
            a1 += a;
544
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
545

    
546
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
547
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
548

    
549
            cb++;
550
            cr++;
551
            p += -wrap3 + 2 * BPP;
552
            lum += -wrap + 2;
553
        }
554
        if (w) {
555
            YUVA_IN(y, u, v, a, p, pal);
556
            u1 = u;
557
            v1 = v;
558
            a1 = a;
559
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
560
            p += wrap3;
561
            lum += wrap;
562
            YUVA_IN(y, u, v, a, p, pal);
563
            u1 += u;
564
            v1 += v;
565
            a1 += a;
566
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
568
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
569
            cb++;
570
            cr++;
571
            p += -wrap3 + BPP;
572
            lum += -wrap + 1;
573
        }
574
        p += wrap3 + (wrap3 - dstw * BPP);
575
        lum += wrap + (wrap - dstw - dstx);
576
        cb += dst->linesize[1] - width2 - skip2;
577
        cr += dst->linesize[2] - width2 - skip2;
578
    }
579
    /* handle odd height */
580
    if (h) {
581
        lum += dstx;
582
        cb += skip2;
583
        cr += skip2;
584

    
585
        if (dstx & 1) {
586
            YUVA_IN(y, u, v, a, p, pal);
587
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
588
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
589
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
590
            cb++;
591
            cr++;
592
            lum++;
593
            p += BPP;
594
        }
595
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            u1 = u;
598
            v1 = v;
599
            a1 = a;
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601

    
602
            YUVA_IN(y, u, v, a, p + BPP, pal);
603
            u1 += u;
604
            v1 += v;
605
            a1 += a;
606
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
607
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
608
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
609
            cb++;
610
            cr++;
611
            p += 2 * BPP;
612
            lum += 2;
613
        }
614
        if (w) {
615
            YUVA_IN(y, u, v, a, p, pal);
616
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
617
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
618
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
619
        }
620
    }
621
}
622

    
623
static void free_subpicture(SubPicture *sp)
624
{
625
    int i;
626

    
627
    for (i = 0; i < sp->sub.num_rects; i++)
628
    {
629
        av_free(sp->sub.rects[i].bitmap);
630
        av_free(sp->sub.rects[i].rgba_palette);
631
    }
632

    
633
    av_free(sp->sub.rects);
634

    
635
    memset(&sp->sub, 0, sizeof(AVSubtitle));
636
}
637

    
638
static void video_image_display(VideoState *is)
639
{
640
    VideoPicture *vp;
641
    SubPicture *sp;
642
    AVPicture pict;
643
    float aspect_ratio;
644
    int width, height, x, y;
645
    SDL_Rect rect;
646
    int i;
647

    
648
    vp = &is->pictq[is->pictq_rindex];
649
    if (vp->bmp) {
650
        /* XXX: use variable in the frame */
651
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
652
            aspect_ratio = 0;
653
        else
654
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
655
                * is->video_st->codec->width / is->video_st->codec->height;;
656
        if (aspect_ratio <= 0.0)
657
            aspect_ratio = (float)is->video_st->codec->width /
658
                (float)is->video_st->codec->height;
659
        /* if an active format is indicated, then it overrides the
660
           mpeg format */
661
#if 0
662
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
663
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
664
            printf("dtg_active_format=%d\n", is->dtg_active_format);
665
        }
666
#endif
667
#if 0
668
        switch(is->video_st->codec->dtg_active_format) {
669
        case FF_DTG_AFD_SAME:
670
        default:
671
            /* nothing to do */
672
            break;
673
        case FF_DTG_AFD_4_3:
674
            aspect_ratio = 4.0 / 3.0;
675
            break;
676
        case FF_DTG_AFD_16_9:
677
            aspect_ratio = 16.0 / 9.0;
678
            break;
679
        case FF_DTG_AFD_14_9:
680
            aspect_ratio = 14.0 / 9.0;
681
            break;
682
        case FF_DTG_AFD_4_3_SP_14_9:
683
            aspect_ratio = 14.0 / 9.0;
684
            break;
685
        case FF_DTG_AFD_16_9_SP_14_9:
686
            aspect_ratio = 14.0 / 9.0;
687
            break;
688
        case FF_DTG_AFD_SP_4_3:
689
            aspect_ratio = 4.0 / 3.0;
690
            break;
691
        }
692
#endif
693

    
694
        if (is->subtitle_st)
695
        {
696
            if (is->subpq_size > 0)
697
            {
698
                sp = &is->subpq[is->subpq_rindex];
699

    
700
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
701
                {
702
                    SDL_LockYUVOverlay (vp->bmp);
703

    
704
                    pict.data[0] = vp->bmp->pixels[0];
705
                    pict.data[1] = vp->bmp->pixels[2];
706
                    pict.data[2] = vp->bmp->pixels[1];
707

    
708
                    pict.linesize[0] = vp->bmp->pitches[0];
709
                    pict.linesize[1] = vp->bmp->pitches[2];
710
                    pict.linesize[2] = vp->bmp->pitches[1];
711

    
712
                    for (i = 0; i < sp->sub.num_rects; i++)
713
                        blend_subrect(&pict, &sp->sub.rects[i],
714
                                      vp->bmp->w, vp->bmp->h);
715

    
716
                    SDL_UnlockYUVOverlay (vp->bmp);
717
                }
718
            }
719
        }
720

    
721

    
722
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
723
        height = is->height;
724
        width = ((int)rint(height * aspect_ratio)) & -3;
725
        if (width > is->width) {
726
            width = is->width;
727
            height = ((int)rint(width / aspect_ratio)) & -3;
728
        }
729
        x = (is->width - width) / 2;
730
        y = (is->height - height) / 2;
731
        if (!is->no_background) {
732
            /* fill the background */
733
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
734
        } else {
735
            is->no_background = 0;
736
        }
737
        rect.x = is->xleft + x;
738
        rect.y = is->ytop  + y;
739
        rect.w = width;
740
        rect.h = height;
741
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
742
    } else {
743
#if 0
744
        fill_rectangle(screen,
745
                       is->xleft, is->ytop, is->width, is->height,
746
                       QERGB(0x00, 0x00, 0x00));
747
#endif
748
    }
749
}
750

    
751
static inline int compute_mod(int a, int b)
752
{
753
    a = a % b;
754
    if (a >= 0)
755
        return a;
756
    else
757
        return a + b;
758
}
759

    
760
static void video_audio_display(VideoState *s)
761
{
762
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
763
    int ch, channels, h, h2, bgcolor, fgcolor;
764
    int16_t time_diff;
765

    
766
    /* compute display index : center on currently output samples */
767
    channels = s->audio_st->codec->channels;
768
    nb_display_channels = channels;
769
    if (!s->paused) {
770
        n = 2 * channels;
771
        delay = audio_write_get_buf_size(s);
772
        delay /= n;
773

    
774
        /* to be more precise, we take into account the time spent since
775
           the last buffer computation */
776
        if (audio_callback_time) {
777
            time_diff = av_gettime() - audio_callback_time;
778
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
779
        }
780

    
781
        delay -= s->width / 2;
782
        if (delay < s->width)
783
            delay = s->width;
784

    
785
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
786

    
787
        h= INT_MIN;
788
        for(i=0; i<1000; i+=channels){
789
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
790
            int a= s->sample_array[idx];
791
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
792
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
793
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
794
            int score= a-d;
795
            if(h<score && (b^c)<0){
796
                h= score;
797
                i_start= idx;
798
            }
799
        }
800

    
801
        s->last_i_start = i_start;
802
    } else {
803
        i_start = s->last_i_start;
804
    }
805

    
806
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
807
    fill_rectangle(screen,
808
                   s->xleft, s->ytop, s->width, s->height,
809
                   bgcolor);
810

    
811
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
812

    
813
    /* total height for one channel */
814
    h = s->height / nb_display_channels;
815
    /* graph height / 2 */
816
    h2 = (h * 9) / 20;
817
    for(ch = 0;ch < nb_display_channels; ch++) {
818
        i = i_start + ch;
819
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
820
        for(x = 0; x < s->width; x++) {
821
            y = (s->sample_array[i] * h2) >> 15;
822
            if (y < 0) {
823
                y = -y;
824
                ys = y1 - y;
825
            } else {
826
                ys = y1;
827
            }
828
            fill_rectangle(screen,
829
                           s->xleft + x, ys, 1, y,
830
                           fgcolor);
831
            i += channels;
832
            if (i >= SAMPLE_ARRAY_SIZE)
833
                i -= SAMPLE_ARRAY_SIZE;
834
        }
835
    }
836

    
837
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
838

    
839
    for(ch = 1;ch < nb_display_channels; ch++) {
840
        y = s->ytop + ch * h;
841
        fill_rectangle(screen,
842
                       s->xleft, y, s->width, 1,
843
                       fgcolor);
844
    }
845
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
846
}
847

    
848
static int video_open(VideoState *is){
849
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
850
    int w,h;
851

    
852
    if(is_full_screen) flags |= SDL_FULLSCREEN;
853
    else               flags |= SDL_RESIZABLE;
854

    
855
    if (is_full_screen && fs_screen_width) {
856
        w = fs_screen_width;
857
        h = fs_screen_height;
858
    } else if(!is_full_screen && screen_width){
859
        w = screen_width;
860
        h = screen_height;
861
    }else if (is->video_st && is->video_st->codec->width){
862
        w = is->video_st->codec->width;
863
        h = is->video_st->codec->height;
864
    } else {
865
        w = 640;
866
        h = 480;
867
    }
868
#ifndef __APPLE__
869
    screen = SDL_SetVideoMode(w, h, 0, flags);
870
#else
871
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
872
    screen = SDL_SetVideoMode(w, h, 24, flags);
873
#endif
874
    if (!screen) {
875
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
876
        return -1;
877
    }
878
    SDL_WM_SetCaption("FFplay", "FFplay");
879

    
880
    is->width = screen->w;
881
    is->height = screen->h;
882

    
883
    return 0;
884
}
885

    
886
/* display the current picture, if any */
887
static void video_display(VideoState *is)
888
{
889
    if(!screen)
890
        video_open(cur_stream);
891
    if (is->audio_st && is->show_audio)
892
        video_audio_display(is);
893
    else if (is->video_st)
894
        video_image_display(is);
895
}
896

    
897
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
898
{
899
    SDL_Event event;
900
    event.type = FF_REFRESH_EVENT;
901
    event.user.data1 = opaque;
902
    SDL_PushEvent(&event);
903
    return 0; /* 0 means stop timer */
904
}
905

    
906
/* schedule a video refresh in 'delay' ms */
907
static void schedule_refresh(VideoState *is, int delay)
908
{
909
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
910
}
911

    
912
/* get the current audio clock value */
913
static double get_audio_clock(VideoState *is)
914
{
915
    double pts;
916
    int hw_buf_size, bytes_per_sec;
917
    pts = is->audio_clock;
918
    hw_buf_size = audio_write_get_buf_size(is);
919
    bytes_per_sec = 0;
920
    if (is->audio_st) {
921
        bytes_per_sec = is->audio_st->codec->sample_rate *
922
            2 * is->audio_st->codec->channels;
923
    }
924
    if (bytes_per_sec)
925
        pts -= (double)hw_buf_size / bytes_per_sec;
926
    return pts;
927
}
928

    
929
/* get the current video clock value */
930
static double get_video_clock(VideoState *is)
931
{
932
    double delta;
933
    if (is->paused) {
934
        delta = 0;
935
    } else {
936
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
937
    }
938
    return is->video_current_pts + delta;
939
}
940

    
941
/* get the current external clock value */
942
static double get_external_clock(VideoState *is)
943
{
944
    int64_t ti;
945
    ti = av_gettime();
946
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
947
}
948

    
949
/* get the current master clock value */
950
static double get_master_clock(VideoState *is)
951
{
952
    double val;
953

    
954
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
955
        if (is->video_st)
956
            val = get_video_clock(is);
957
        else
958
            val = get_audio_clock(is);
959
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
960
        if (is->audio_st)
961
            val = get_audio_clock(is);
962
        else
963
            val = get_video_clock(is);
964
    } else {
965
        val = get_external_clock(is);
966
    }
967
    return val;
968
}
969

    
970
/* seek in the stream */
971
static void stream_seek(VideoState *is, int64_t pos, int rel)
972
{
973
    if (!is->seek_req) {
974
        is->seek_pos = pos;
975
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
976
        if (seek_by_bytes)
977
            is->seek_flags |= AVSEEK_FLAG_BYTE;
978
        is->seek_req = 1;
979
    }
980
}
981

    
982
/* pause or resume the video */
983
static void stream_pause(VideoState *is)
984
{
985
    is->paused = !is->paused;
986
    if (!is->paused) {
987
        is->video_current_pts = get_video_clock(is);
988
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
989
    }
990
}
991

    
992
/* called to display each frame */
993
static void video_refresh_timer(void *opaque)
994
{
995
    VideoState *is = opaque;
996
    VideoPicture *vp;
997
    double actual_delay, delay, sync_threshold, ref_clock, diff;
998

    
999
    SubPicture *sp, *sp2;
1000

    
1001
    if (is->video_st) {
1002
        if (is->pictq_size == 0) {
1003
            /* if no picture, need to wait */
1004
            schedule_refresh(is, 1);
1005
        } else {
1006
            /* dequeue the picture */
1007
            vp = &is->pictq[is->pictq_rindex];
1008

    
1009
            /* update current video pts */
1010
            is->video_current_pts = vp->pts;
1011
            is->video_current_pts_time = av_gettime();
1012

    
1013
            /* compute nominal delay */
1014
            delay = vp->pts - is->frame_last_pts;
1015
            if (delay <= 0 || delay >= 2.0) {
1016
                /* if incorrect delay, use previous one */
1017
                delay = is->frame_last_delay;
1018
            }
1019
            is->frame_last_delay = delay;
1020
            is->frame_last_pts = vp->pts;
1021

    
1022
            /* update delay to follow master synchronisation source */
1023
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1024
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1025
                /* if video is slave, we try to correct big delays by
1026
                   duplicating or deleting a frame */
1027
                ref_clock = get_master_clock(is);
1028
                diff = vp->pts - ref_clock;
1029

    
1030
                /* skip or repeat frame. We take into account the
1031
                   delay to compute the threshold. I still don't know
1032
                   if it is the best guess */
1033
                sync_threshold = AV_SYNC_THRESHOLD;
1034
                if (delay > sync_threshold)
1035
                    sync_threshold = delay;
1036
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1037
                    if (diff <= -sync_threshold)
1038
                        delay = 0;
1039
                    else if (diff >= sync_threshold)
1040
                        delay = 2 * delay;
1041
                }
1042
            }
1043

    
1044
            is->frame_timer += delay;
1045
            /* compute the REAL delay (we need to do that to avoid
1046
               long term errors */
1047
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1048
            if (actual_delay < 0.010) {
1049
                /* XXX: should skip picture */
1050
                actual_delay = 0.010;
1051
            }
1052
            /* launch timer for next picture */
1053
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1054

    
1055
#if defined(DEBUG_SYNC)
1056
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1057
                   delay, actual_delay, vp->pts, -diff);
1058
#endif
1059

    
1060
            if(is->subtitle_st) {
1061
                if (is->subtitle_stream_changed) {
1062
                    SDL_LockMutex(is->subpq_mutex);
1063

    
1064
                    while (is->subpq_size) {
1065
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1066

    
1067
                        /* update queue size and signal for next picture */
1068
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1069
                            is->subpq_rindex = 0;
1070

    
1071
                        is->subpq_size--;
1072
                    }
1073
                    is->subtitle_stream_changed = 0;
1074

    
1075
                    SDL_CondSignal(is->subpq_cond);
1076
                    SDL_UnlockMutex(is->subpq_mutex);
1077
                } else {
1078
                    if (is->subpq_size > 0) {
1079
                        sp = &is->subpq[is->subpq_rindex];
1080

    
1081
                        if (is->subpq_size > 1)
1082
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1083
                        else
1084
                            sp2 = NULL;
1085

    
1086
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1087
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1088
                        {
1089
                            free_subpicture(sp);
1090

    
1091
                            /* update queue size and signal for next picture */
1092
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1093
                                is->subpq_rindex = 0;
1094

    
1095
                            SDL_LockMutex(is->subpq_mutex);
1096
                            is->subpq_size--;
1097
                            SDL_CondSignal(is->subpq_cond);
1098
                            SDL_UnlockMutex(is->subpq_mutex);
1099
                        }
1100
                    }
1101
                }
1102
            }
1103

    
1104
            /* display picture */
1105
            video_display(is);
1106

    
1107
            /* update queue size and signal for next picture */
1108
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1109
                is->pictq_rindex = 0;
1110

    
1111
            SDL_LockMutex(is->pictq_mutex);
1112
            is->pictq_size--;
1113
            SDL_CondSignal(is->pictq_cond);
1114
            SDL_UnlockMutex(is->pictq_mutex);
1115
        }
1116
    } else if (is->audio_st) {
1117
        /* draw the next audio frame */
1118

    
1119
        schedule_refresh(is, 40);
1120

    
1121
        /* if only audio stream, then display the audio bars (better
1122
           than nothing, just to test the implementation */
1123

    
1124
        /* display picture */
1125
        video_display(is);
1126
    } else {
1127
        schedule_refresh(is, 100);
1128
    }
1129
    if (show_status) {
1130
        static int64_t last_time;
1131
        int64_t cur_time;
1132
        int aqsize, vqsize, sqsize;
1133
        double av_diff;
1134

    
1135
        cur_time = av_gettime();
1136
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1137
            aqsize = 0;
1138
            vqsize = 0;
1139
            sqsize = 0;
1140
            if (is->audio_st)
1141
                aqsize = is->audioq.size;
1142
            if (is->video_st)
1143
                vqsize = is->videoq.size;
1144
            if (is->subtitle_st)
1145
                sqsize = is->subtitleq.size;
1146
            av_diff = 0;
1147
            if (is->audio_st && is->video_st)
1148
                av_diff = get_audio_clock(is) - get_video_clock(is);
1149
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1150
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1151
            fflush(stdout);
1152
            last_time = cur_time;
1153
        }
1154
    }
1155
}
1156

    
1157
/* allocate a picture (needs to do that in main thread to avoid
1158
   potential locking problems */
1159
static void alloc_picture(void *opaque)
1160
{
1161
    VideoState *is = opaque;
1162
    VideoPicture *vp;
1163

    
1164
    vp = &is->pictq[is->pictq_windex];
1165

    
1166
    if (vp->bmp)
1167
        SDL_FreeYUVOverlay(vp->bmp);
1168

    
1169
#if 0
1170
    /* XXX: use generic function */
1171
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1172
    switch(is->video_st->codec->pix_fmt) {
1173
    case PIX_FMT_YUV420P:
1174
    case PIX_FMT_YUV422P:
1175
    case PIX_FMT_YUV444P:
1176
    case PIX_FMT_YUYV422:
1177
    case PIX_FMT_YUV410P:
1178
    case PIX_FMT_YUV411P:
1179
        is_yuv = 1;
1180
        break;
1181
    default:
1182
        is_yuv = 0;
1183
        break;
1184
    }
1185
#endif
1186
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1187
                                   is->video_st->codec->height,
1188
                                   SDL_YV12_OVERLAY,
1189
                                   screen);
1190
    vp->width = is->video_st->codec->width;
1191
    vp->height = is->video_st->codec->height;
1192

    
1193
    SDL_LockMutex(is->pictq_mutex);
1194
    vp->allocated = 1;
1195
    SDL_CondSignal(is->pictq_cond);
1196
    SDL_UnlockMutex(is->pictq_mutex);
1197
}
1198

    
1199
/**
1200
 *
1201
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1202
 */
1203
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1204
{
1205
    VideoPicture *vp;
1206
    int dst_pix_fmt;
1207
    AVPicture pict;
1208
    static struct SwsContext *img_convert_ctx;
1209

    
1210
    /* wait until we have space to put a new picture */
1211
    SDL_LockMutex(is->pictq_mutex);
1212
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1213
           !is->videoq.abort_request) {
1214
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1215
    }
1216
    SDL_UnlockMutex(is->pictq_mutex);
1217

    
1218
    if (is->videoq.abort_request)
1219
        return -1;
1220

    
1221
    vp = &is->pictq[is->pictq_windex];
1222

    
1223
    /* alloc or resize hardware picture buffer */
1224
    if (!vp->bmp ||
1225
        vp->width != is->video_st->codec->width ||
1226
        vp->height != is->video_st->codec->height) {
1227
        SDL_Event event;
1228

    
1229
        vp->allocated = 0;
1230

    
1231
        /* the allocation must be done in the main thread to avoid
1232
           locking problems */
1233
        event.type = FF_ALLOC_EVENT;
1234
        event.user.data1 = is;
1235
        SDL_PushEvent(&event);
1236

    
1237
        /* wait until the picture is allocated */
1238
        SDL_LockMutex(is->pictq_mutex);
1239
        while (!vp->allocated && !is->videoq.abort_request) {
1240
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1241
        }
1242
        SDL_UnlockMutex(is->pictq_mutex);
1243

    
1244
        if (is->videoq.abort_request)
1245
            return -1;
1246
    }
1247

    
1248
    /* if the frame is not skipped, then display it */
1249
    if (vp->bmp) {
1250
        /* get a pointer on the bitmap */
1251
        SDL_LockYUVOverlay (vp->bmp);
1252

    
1253
        dst_pix_fmt = PIX_FMT_YUV420P;
1254
        pict.data[0] = vp->bmp->pixels[0];
1255
        pict.data[1] = vp->bmp->pixels[2];
1256
        pict.data[2] = vp->bmp->pixels[1];
1257

    
1258
        pict.linesize[0] = vp->bmp->pitches[0];
1259
        pict.linesize[1] = vp->bmp->pitches[2];
1260
        pict.linesize[2] = vp->bmp->pitches[1];
1261
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1262
            is->video_st->codec->width, is->video_st->codec->height,
1263
            is->video_st->codec->pix_fmt,
1264
            is->video_st->codec->width, is->video_st->codec->height,
1265
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1266
        if (img_convert_ctx == NULL) {
1267
            fprintf(stderr, "Cannot initialize the conversion context\n");
1268
            exit(1);
1269
        }
1270
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1271
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1272
        /* update the bitmap content */
1273
        SDL_UnlockYUVOverlay(vp->bmp);
1274

    
1275
        vp->pts = pts;
1276

    
1277
        /* now we can update the picture count */
1278
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1279
            is->pictq_windex = 0;
1280
        SDL_LockMutex(is->pictq_mutex);
1281
        is->pictq_size++;
1282
        SDL_UnlockMutex(is->pictq_mutex);
1283
    }
1284
    return 0;
1285
}
1286

    
1287
/**
1288
 * compute the exact PTS for the picture if it is omitted in the stream
1289
 * @param pts1 the dts of the pkt / pts of the frame
1290
 */
1291
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1292
{
1293
    double frame_delay, pts;
1294

    
1295
    pts = pts1;
1296

    
1297
    if (pts != 0) {
1298
        /* update video clock with pts, if present */
1299
        is->video_clock = pts;
1300
    } else {
1301
        pts = is->video_clock;
1302
    }
1303
    /* update video clock for next frame */
1304
    frame_delay = av_q2d(is->video_st->codec->time_base);
1305
    /* for MPEG2, the frame can be repeated, so we update the
1306
       clock accordingly */
1307
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1308
    is->video_clock += frame_delay;
1309

    
1310
#if defined(DEBUG_SYNC) && 0
1311
    {
1312
        int ftype;
1313
        if (src_frame->pict_type == FF_B_TYPE)
1314
            ftype = 'B';
1315
        else if (src_frame->pict_type == FF_I_TYPE)
1316
            ftype = 'I';
1317
        else
1318
            ftype = 'P';
1319
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1320
               ftype, pts, pts1);
1321
    }
1322
#endif
1323
    return queue_picture(is, src_frame, pts);
1324
}
1325

    
1326
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1327

    
1328
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1329
    int ret= avcodec_default_get_buffer(c, pic);
1330
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1331
    *pts= global_video_pkt_pts;
1332
    pic->opaque= pts;
1333
    return ret;
1334
}
1335

    
1336
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1337
    if(pic) av_freep(&pic->opaque);
1338
    avcodec_default_release_buffer(c, pic);
1339
}
1340

    
1341
static int video_thread(void *arg)
1342
{
1343
    VideoState *is = arg;
1344
    AVPacket pkt1, *pkt = &pkt1;
1345
    int len1, got_picture;
1346
    AVFrame *frame= avcodec_alloc_frame();
1347
    double pts;
1348

    
1349
    for(;;) {
1350
        while (is->paused && !is->videoq.abort_request) {
1351
            SDL_Delay(10);
1352
        }
1353
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1354
            break;
1355

    
1356
        if(pkt->data == flush_pkt.data){
1357
            avcodec_flush_buffers(is->video_st->codec);
1358
            continue;
1359
        }
1360

    
1361
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1362
           this packet, if any */
1363
        global_video_pkt_pts= pkt->pts;
1364
        len1 = avcodec_decode_video(is->video_st->codec,
1365
                                    frame, &got_picture,
1366
                                    pkt->data, pkt->size);
1367

    
1368
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1369
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1370
            pts= *(uint64_t*)frame->opaque;
1371
        else if(pkt->dts != AV_NOPTS_VALUE)
1372
            pts= pkt->dts;
1373
        else
1374
            pts= 0;
1375
        pts *= av_q2d(is->video_st->time_base);
1376

    
1377
//            if (len1 < 0)
1378
//                break;
1379
        if (got_picture) {
1380
            if (output_picture2(is, frame, pts) < 0)
1381
                goto the_end;
1382
        }
1383
        av_free_packet(pkt);
1384
        if (step)
1385
            if (cur_stream)
1386
                stream_pause(cur_stream);
1387
    }
1388
 the_end:
1389
    av_free(frame);
1390
    return 0;
1391
}
1392

    
1393
static int subtitle_thread(void *arg)
1394
{
1395
    VideoState *is = arg;
1396
    SubPicture *sp;
1397
    AVPacket pkt1, *pkt = &pkt1;
1398
    int len1, got_subtitle;
1399
    double pts;
1400
    int i, j;
1401
    int r, g, b, y, u, v, a;
1402

    
1403
    for(;;) {
1404
        while (is->paused && !is->subtitleq.abort_request) {
1405
            SDL_Delay(10);
1406
        }
1407
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1408
            break;
1409

    
1410
        if(pkt->data == flush_pkt.data){
1411
            avcodec_flush_buffers(is->subtitle_st->codec);
1412
            continue;
1413
        }
1414
        SDL_LockMutex(is->subpq_mutex);
1415
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1416
               !is->subtitleq.abort_request) {
1417
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1418
        }
1419
        SDL_UnlockMutex(is->subpq_mutex);
1420

    
1421
        if (is->subtitleq.abort_request)
1422
            goto the_end;
1423

    
1424
        sp = &is->subpq[is->subpq_windex];
1425

    
1426
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1427
           this packet, if any */
1428
        pts = 0;
1429
        if (pkt->pts != AV_NOPTS_VALUE)
1430
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1431

    
1432
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1433
                                    &sp->sub, &got_subtitle,
1434
                                    pkt->data, pkt->size);
1435
//            if (len1 < 0)
1436
//                break;
1437
        if (got_subtitle && sp->sub.format == 0) {
1438
            sp->pts = pts;
1439

    
1440
            for (i = 0; i < sp->sub.num_rects; i++)
1441
            {
1442
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1443
                {
1444
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1445
                    y = RGB_TO_Y_CCIR(r, g, b);
1446
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1447
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1448
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1449
                }
1450
            }
1451

    
1452
            /* now we can update the picture count */
1453
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1454
                is->subpq_windex = 0;
1455
            SDL_LockMutex(is->subpq_mutex);
1456
            is->subpq_size++;
1457
            SDL_UnlockMutex(is->subpq_mutex);
1458
        }
1459
        av_free_packet(pkt);
1460
//        if (step)
1461
//            if (cur_stream)
1462
//                stream_pause(cur_stream);
1463
    }
1464
 the_end:
1465
    return 0;
1466
}
1467

    
1468
/* copy samples for viewing in editor window */
1469
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1470
{
1471
    int size, len, channels;
1472

    
1473
    channels = is->audio_st->codec->channels;
1474

    
1475
    size = samples_size / sizeof(short);
1476
    while (size > 0) {
1477
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1478
        if (len > size)
1479
            len = size;
1480
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1481
        samples += len;
1482
        is->sample_array_index += len;
1483
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1484
            is->sample_array_index = 0;
1485
        size -= len;
1486
    }
1487
}
1488

    
1489
/* return the new audio buffer size (samples can be added or deleted
1490
   to get better sync if video or external master clock) */
1491
static int synchronize_audio(VideoState *is, short *samples,
1492
                             int samples_size1, double pts)
1493
{
1494
    int n, samples_size;
1495
    double ref_clock;
1496

    
1497
    n = 2 * is->audio_st->codec->channels;
1498
    samples_size = samples_size1;
1499

    
1500
    /* if not master, then we try to remove or add samples to correct the clock */
1501
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1502
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1503
        double diff, avg_diff;
1504
        int wanted_size, min_size, max_size, nb_samples;
1505

    
1506
        ref_clock = get_master_clock(is);
1507
        diff = get_audio_clock(is) - ref_clock;
1508

    
1509
        if (diff < AV_NOSYNC_THRESHOLD) {
1510
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1511
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1512
                /* not enough measures to have a correct estimate */
1513
                is->audio_diff_avg_count++;
1514
            } else {
1515
                /* estimate the A-V difference */
1516
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1517

    
1518
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1519
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1520
                    nb_samples = samples_size / n;
1521

    
1522
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1523
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1524
                    if (wanted_size < min_size)
1525
                        wanted_size = min_size;
1526
                    else if (wanted_size > max_size)
1527
                        wanted_size = max_size;
1528

    
1529
                    /* add or remove samples to correction the synchro */
1530
                    if (wanted_size < samples_size) {
1531
                        /* remove samples */
1532
                        samples_size = wanted_size;
1533
                    } else if (wanted_size > samples_size) {
1534
                        uint8_t *samples_end, *q;
1535
                        int nb;
1536

    
1537
                        /* add samples */
1538
                        nb = (samples_size - wanted_size);
1539
                        samples_end = (uint8_t *)samples + samples_size - n;
1540
                        q = samples_end + n;
1541
                        while (nb > 0) {
1542
                            memcpy(q, samples_end, n);
1543
                            q += n;
1544
                            nb -= n;
1545
                        }
1546
                        samples_size = wanted_size;
1547
                    }
1548
                }
1549
#if 0
1550
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1551
                       diff, avg_diff, samples_size - samples_size1,
1552
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1553
#endif
1554
            }
1555
        } else {
1556
            /* too big difference : may be initial PTS errors, so
1557
               reset A-V filter */
1558
            is->audio_diff_avg_count = 0;
1559
            is->audio_diff_cum = 0;
1560
        }
1561
    }
1562

    
1563
    return samples_size;
1564
}
1565

    
1566
/* decode one audio frame and returns its uncompressed size */
1567
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1568
{
1569
    AVPacket *pkt = &is->audio_pkt;
1570
    int n, len1, data_size;
1571
    double pts;
1572

    
1573
    for(;;) {
1574
        /* NOTE: the audio packet can contain several frames */
1575
        while (is->audio_pkt_size > 0) {
1576
            data_size = buf_size;
1577
            len1 = avcodec_decode_audio2(is->audio_st->codec,
1578
                                        (int16_t *)audio_buf, &data_size,
1579
                                        is->audio_pkt_data, is->audio_pkt_size);
1580
            if (len1 < 0) {
1581
                /* if error, we skip the frame */
1582
                is->audio_pkt_size = 0;
1583
                break;
1584
            }
1585

    
1586
            is->audio_pkt_data += len1;
1587
            is->audio_pkt_size -= len1;
1588
            if (data_size <= 0)
1589
                continue;
1590
            /* if no pts, then compute it */
1591
            pts = is->audio_clock;
1592
            *pts_ptr = pts;
1593
            n = 2 * is->audio_st->codec->channels;
1594
            is->audio_clock += (double)data_size /
1595
                (double)(n * is->audio_st->codec->sample_rate);
1596
#if defined(DEBUG_SYNC)
1597
            {
1598
                static double last_clock;
1599
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1600
                       is->audio_clock - last_clock,
1601
                       is->audio_clock, pts);
1602
                last_clock = is->audio_clock;
1603
            }
1604
#endif
1605
            return data_size;
1606
        }
1607

    
1608
        /* free the current packet */
1609
        if (pkt->data)
1610
            av_free_packet(pkt);
1611

    
1612
        if (is->paused || is->audioq.abort_request) {
1613
            return -1;
1614
        }
1615

    
1616
        /* read next packet */
1617
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1618
            return -1;
1619
        if(pkt->data == flush_pkt.data){
1620
            avcodec_flush_buffers(is->audio_st->codec);
1621
            continue;
1622
        }
1623

    
1624
        is->audio_pkt_data = pkt->data;
1625
        is->audio_pkt_size = pkt->size;
1626

    
1627
        /* if update the audio clock with the pts */
1628
        if (pkt->pts != AV_NOPTS_VALUE) {
1629
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1630
        }
1631
    }
1632
}
1633

    
1634
/* get the current audio output buffer size, in samples. With SDL, we
1635
   cannot have a precise information */
1636
static int audio_write_get_buf_size(VideoState *is)
1637
{
1638
    return is->audio_buf_size - is->audio_buf_index;
1639
}
1640

    
1641

    
1642
/* prepare a new audio buffer */
1643
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1644
{
1645
    VideoState *is = opaque;
1646
    int audio_size, len1;
1647
    double pts;
1648

    
1649
    audio_callback_time = av_gettime();
1650

    
1651
    while (len > 0) {
1652
        if (is->audio_buf_index >= is->audio_buf_size) {
1653
           audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1654
           if (audio_size < 0) {
1655
                /* if error, just output silence */
1656
               is->audio_buf_size = 1024;
1657
               memset(is->audio_buf, 0, is->audio_buf_size);
1658
           } else {
1659
               if (is->show_audio)
1660
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1661
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1662
                                              pts);
1663
               is->audio_buf_size = audio_size;
1664
           }
1665
           is->audio_buf_index = 0;
1666
        }
1667
        len1 = is->audio_buf_size - is->audio_buf_index;
1668
        if (len1 > len)
1669
            len1 = len;
1670
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1671
        len -= len1;
1672
        stream += len1;
1673
        is->audio_buf_index += len1;
1674
    }
1675
}
1676

    
1677
/* open a given stream. Return 0 if OK */
1678
static int stream_component_open(VideoState *is, int stream_index)
1679
{
1680
    AVFormatContext *ic = is->ic;
1681
    AVCodecContext *enc;
1682
    AVCodec *codec;
1683
    SDL_AudioSpec wanted_spec, spec;
1684

    
1685
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1686
        return -1;
1687
    enc = ic->streams[stream_index]->codec;
1688

    
1689
    /* prepare audio output */
1690
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1691
        wanted_spec.freq = enc->sample_rate;
1692
        wanted_spec.format = AUDIO_S16SYS;
1693
        /* hack for AC3. XXX: suppress that */
1694
        if (enc->channels > 2)
1695
            enc->channels = 2;
1696
        wanted_spec.channels = enc->channels;
1697
        wanted_spec.silence = 0;
1698
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1699
        wanted_spec.callback = sdl_audio_callback;
1700
        wanted_spec.userdata = is;
1701
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1702
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1703
            return -1;
1704
        }
1705
        is->audio_hw_buf_size = spec.size;
1706
    }
1707

    
1708
    codec = avcodec_find_decoder(enc->codec_id);
1709
    enc->debug_mv = debug_mv;
1710
    enc->debug = debug;
1711
    enc->workaround_bugs = workaround_bugs;
1712
    enc->lowres = lowres;
1713
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1714
    enc->idct_algo= idct;
1715
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1716
    enc->skip_frame= skip_frame;
1717
    enc->skip_idct= skip_idct;
1718
    enc->skip_loop_filter= skip_loop_filter;
1719
    enc->error_resilience= error_resilience;
1720
    enc->error_concealment= error_concealment;
1721
    if (!codec ||
1722
        avcodec_open(enc, codec) < 0)
1723
        return -1;
1724
    if(thread_count>1)
1725
        avcodec_thread_init(enc, thread_count);
1726
    enc->thread_count= thread_count;
1727
    switch(enc->codec_type) {
1728
    case CODEC_TYPE_AUDIO:
1729
        is->audio_stream = stream_index;
1730
        is->audio_st = ic->streams[stream_index];
1731
        is->audio_buf_size = 0;
1732
        is->audio_buf_index = 0;
1733

    
1734
        /* init averaging filter */
1735
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1736
        is->audio_diff_avg_count = 0;
1737
        /* since we do not have a precise anough audio fifo fullness,
1738
           we correct audio sync only if larger than this threshold */
1739
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1740

    
1741
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1742
        packet_queue_init(&is->audioq);
1743
        SDL_PauseAudio(0);
1744
        break;
1745
    case CODEC_TYPE_VIDEO:
1746
        is->video_stream = stream_index;
1747
        is->video_st = ic->streams[stream_index];
1748

    
1749
        is->frame_last_delay = 40e-3;
1750
        is->frame_timer = (double)av_gettime() / 1000000.0;
1751
        is->video_current_pts_time = av_gettime();
1752

    
1753
        packet_queue_init(&is->videoq);
1754
        is->video_tid = SDL_CreateThread(video_thread, is);
1755

    
1756
        enc->    get_buffer=     my_get_buffer;
1757
        enc->release_buffer= my_release_buffer;
1758
        break;
1759
    case CODEC_TYPE_SUBTITLE:
1760
        is->subtitle_stream = stream_index;
1761
        is->subtitle_st = ic->streams[stream_index];
1762
        packet_queue_init(&is->subtitleq);
1763

    
1764
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1765
        break;
1766
    default:
1767
        break;
1768
    }
1769
    return 0;
1770
}
1771

    
1772
static void stream_component_close(VideoState *is, int stream_index)
1773
{
1774
    AVFormatContext *ic = is->ic;
1775
    AVCodecContext *enc;
1776

    
1777
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1778
        return;
1779
    enc = ic->streams[stream_index]->codec;
1780

    
1781
    switch(enc->codec_type) {
1782
    case CODEC_TYPE_AUDIO:
1783
        packet_queue_abort(&is->audioq);
1784

    
1785
        SDL_CloseAudio();
1786

    
1787
        packet_queue_end(&is->audioq);
1788
        break;
1789
    case CODEC_TYPE_VIDEO:
1790
        packet_queue_abort(&is->videoq);
1791

    
1792
        /* note: we also signal this mutex to make sure we deblock the
1793
           video thread in all cases */
1794
        SDL_LockMutex(is->pictq_mutex);
1795
        SDL_CondSignal(is->pictq_cond);
1796
        SDL_UnlockMutex(is->pictq_mutex);
1797

    
1798
        SDL_WaitThread(is->video_tid, NULL);
1799

    
1800
        packet_queue_end(&is->videoq);
1801
        break;
1802
    case CODEC_TYPE_SUBTITLE:
1803
        packet_queue_abort(&is->subtitleq);
1804

    
1805
        /* note: we also signal this mutex to make sure we deblock the
1806
           video thread in all cases */
1807
        SDL_LockMutex(is->subpq_mutex);
1808
        is->subtitle_stream_changed = 1;
1809

    
1810
        SDL_CondSignal(is->subpq_cond);
1811
        SDL_UnlockMutex(is->subpq_mutex);
1812

    
1813
        SDL_WaitThread(is->subtitle_tid, NULL);
1814

    
1815
        packet_queue_end(&is->subtitleq);
1816
        break;
1817
    default:
1818
        break;
1819
    }
1820

    
1821
    avcodec_close(enc);
1822
    switch(enc->codec_type) {
1823
    case CODEC_TYPE_AUDIO:
1824
        is->audio_st = NULL;
1825
        is->audio_stream = -1;
1826
        break;
1827
    case CODEC_TYPE_VIDEO:
1828
        is->video_st = NULL;
1829
        is->video_stream = -1;
1830
        break;
1831
    case CODEC_TYPE_SUBTITLE:
1832
        is->subtitle_st = NULL;
1833
        is->subtitle_stream = -1;
1834
        break;
1835
    default:
1836
        break;
1837
    }
1838
}
1839

    
1840
static void dump_stream_info(const AVFormatContext *s)
1841
{
1842
    if (s->track != 0)
1843
        fprintf(stderr, "Track: %d\n", s->track);
1844
    if (s->title[0] != '\0')
1845
        fprintf(stderr, "Title: %s\n", s->title);
1846
    if (s->author[0] != '\0')
1847
        fprintf(stderr, "Author: %s\n", s->author);
1848
    if (s->copyright[0] != '\0')
1849
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1850
    if (s->comment[0] != '\0')
1851
        fprintf(stderr, "Comment: %s\n", s->comment);
1852
    if (s->album[0] != '\0')
1853
        fprintf(stderr, "Album: %s\n", s->album);
1854
    if (s->year != 0)
1855
        fprintf(stderr, "Year: %d\n", s->year);
1856
    if (s->genre[0] != '\0')
1857
        fprintf(stderr, "Genre: %s\n", s->genre);
1858
}
1859

    
1860
/* since we have only one decoding thread, we can use a global
1861
   variable instead of a thread local variable */
1862
static VideoState *global_video_state;
1863

    
1864
static int decode_interrupt_cb(void)
1865
{
1866
    return (global_video_state && global_video_state->abort_request);
1867
}
1868

    
1869
/* this thread gets the stream from the disk or the network */
1870
static int decode_thread(void *arg)
1871
{
1872
    VideoState *is = arg;
1873
    AVFormatContext *ic;
1874
    int err, i, ret, video_index, audio_index;
1875
    AVPacket pkt1, *pkt = &pkt1;
1876
    AVFormatParameters params, *ap = &params;
1877

    
1878
    video_index = -1;
1879
    audio_index = -1;
1880
    is->video_stream = -1;
1881
    is->audio_stream = -1;
1882
    is->subtitle_stream = -1;
1883

    
1884
    global_video_state = is;
1885
    url_set_interrupt_cb(decode_interrupt_cb);
1886

    
1887
    memset(ap, 0, sizeof(*ap));
1888

    
1889
    ap->width = frame_width;
1890
    ap->height= frame_height;
1891
    ap->time_base= (AVRational){1, 25};
1892
    ap->pix_fmt = frame_pix_fmt;
1893

    
1894
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1895
    if (err < 0) {
1896
        print_error(is->filename, err);
1897
        ret = -1;
1898
        goto fail;
1899
    }
1900
    is->ic = ic;
1901

    
1902
    if(genpts)
1903
        ic->flags |= AVFMT_FLAG_GENPTS;
1904

    
1905
    err = av_find_stream_info(ic);
1906
    if (err < 0) {
1907
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1908
        ret = -1;
1909
        goto fail;
1910
    }
1911
    ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1912

    
1913
    /* if seeking requested, we execute it */
1914
    if (start_time != AV_NOPTS_VALUE) {
1915
        int64_t timestamp;
1916

    
1917
        timestamp = start_time;
1918
        /* add the stream start time */
1919
        if (ic->start_time != AV_NOPTS_VALUE)
1920
            timestamp += ic->start_time;
1921
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1922
        if (ret < 0) {
1923
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1924
                    is->filename, (double)timestamp / AV_TIME_BASE);
1925
        }
1926
    }
1927

    
1928
    for(i = 0; i < ic->nb_streams; i++) {
1929
        AVCodecContext *enc = ic->streams[i]->codec;
1930
        switch(enc->codec_type) {
1931
        case CODEC_TYPE_AUDIO:
1932
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1933
                audio_index = i;
1934
            break;
1935
        case CODEC_TYPE_VIDEO:
1936
            if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1937
                video_index = i;
1938
            break;
1939
        default:
1940
            break;
1941
        }
1942
    }
1943
    if (show_status) {
1944
        dump_format(ic, 0, is->filename, 0);
1945
        dump_stream_info(ic);
1946
    }
1947

    
1948
    /* open the streams */
1949
    if (audio_index >= 0) {
1950
        stream_component_open(is, audio_index);
1951
    }
1952

    
1953
    if (video_index >= 0) {
1954
        stream_component_open(is, video_index);
1955
    } else {
1956
        if (!display_disable)
1957
            is->show_audio = 1;
1958
    }
1959

    
1960
    if (is->video_stream < 0 && is->audio_stream < 0) {
1961
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1962
        ret = -1;
1963
        goto fail;
1964
    }
1965

    
1966
    for(;;) {
1967
        if (is->abort_request)
1968
            break;
1969
        if (is->paused != is->last_paused) {
1970
            is->last_paused = is->paused;
1971
            if (is->paused)
1972
                av_read_pause(ic);
1973
            else
1974
                av_read_play(ic);
1975
        }
1976
#ifdef CONFIG_RTSP_DEMUXER
1977
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
1978
            /* wait 10 ms to avoid trying to get another packet */
1979
            /* XXX: horrible */
1980
            SDL_Delay(10);
1981
            continue;
1982
        }
1983
#endif
1984
        if (is->seek_req) {
1985
            int stream_index= -1;
1986
            int64_t seek_target= is->seek_pos;
1987

    
1988
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
1989
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
1990
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
1991

    
1992
            if(stream_index>=0){
1993
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
1994
            }
1995

    
1996
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
1997
            if (ret < 0) {
1998
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1999
            }else{
2000
                if (is->audio_stream >= 0) {
2001
                    packet_queue_flush(&is->audioq);
2002
                    packet_queue_put(&is->audioq, &flush_pkt);
2003
                }
2004
                if (is->subtitle_stream >= 0) {
2005
                    packet_queue_flush(&is->subtitleq);
2006
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2007
                }
2008
                if (is->video_stream >= 0) {
2009
                    packet_queue_flush(&is->videoq);
2010
                    packet_queue_put(&is->videoq, &flush_pkt);
2011
                }
2012
            }
2013
            is->seek_req = 0;
2014
        }
2015

    
2016
        /* if the queue are full, no need to read more */
2017
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2018
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2019
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2020
            url_feof(&ic->pb)) {
2021
            /* wait 10 ms */
2022
            SDL_Delay(10);
2023
            continue;
2024
        }
2025
        ret = av_read_frame(ic, pkt);
2026
        if (ret < 0) {
2027
            if (url_ferror(&ic->pb) == 0) {
2028
                SDL_Delay(100); /* wait for user event */
2029
                continue;
2030
            } else
2031
                break;
2032
        }
2033
        if (pkt->stream_index == is->audio_stream) {
2034
            packet_queue_put(&is->audioq, pkt);
2035
        } else if (pkt->stream_index == is->video_stream) {
2036
            packet_queue_put(&is->videoq, pkt);
2037
        } else if (pkt->stream_index == is->subtitle_stream) {
2038
            packet_queue_put(&is->subtitleq, pkt);
2039
        } else {
2040
            av_free_packet(pkt);
2041
        }
2042
    }
2043
    /* wait until the end */
2044
    while (!is->abort_request) {
2045
        SDL_Delay(100);
2046
    }
2047

    
2048
    ret = 0;
2049
 fail:
2050
    /* disable interrupting */
2051
    global_video_state = NULL;
2052

    
2053
    /* close each stream */
2054
    if (is->audio_stream >= 0)
2055
        stream_component_close(is, is->audio_stream);
2056
    if (is->video_stream >= 0)
2057
        stream_component_close(is, is->video_stream);
2058
    if (is->subtitle_stream >= 0)
2059
        stream_component_close(is, is->subtitle_stream);
2060
    if (is->ic) {
2061
        av_close_input_file(is->ic);
2062
        is->ic = NULL; /* safety */
2063
    }
2064
    url_set_interrupt_cb(NULL);
2065

    
2066
    if (ret != 0) {
2067
        SDL_Event event;
2068

    
2069
        event.type = FF_QUIT_EVENT;
2070
        event.user.data1 = is;
2071
        SDL_PushEvent(&event);
2072
    }
2073
    return 0;
2074
}
2075

    
2076
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2077
{
2078
    VideoState *is;
2079

    
2080
    is = av_mallocz(sizeof(VideoState));
2081
    if (!is)
2082
        return NULL;
2083
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2084
    is->iformat = iformat;
2085
    is->ytop = 0;
2086
    is->xleft = 0;
2087

    
2088
    /* start video display */
2089
    is->pictq_mutex = SDL_CreateMutex();
2090
    is->pictq_cond = SDL_CreateCond();
2091

    
2092
    is->subpq_mutex = SDL_CreateMutex();
2093
    is->subpq_cond = SDL_CreateCond();
2094

    
2095
    /* add the refresh timer to draw the picture */
2096
    schedule_refresh(is, 40);
2097

    
2098
    is->av_sync_type = av_sync_type;
2099
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2100
    if (!is->parse_tid) {
2101
        av_free(is);
2102
        return NULL;
2103
    }
2104
    return is;
2105
}
2106

    
2107
static void stream_close(VideoState *is)
2108
{
2109
    VideoPicture *vp;
2110
    int i;
2111
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2112
    is->abort_request = 1;
2113
    SDL_WaitThread(is->parse_tid, NULL);
2114

    
2115
    /* free all pictures */
2116
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2117
        vp = &is->pictq[i];
2118
        if (vp->bmp) {
2119
            SDL_FreeYUVOverlay(vp->bmp);
2120
            vp->bmp = NULL;
2121
        }
2122
    }
2123
    SDL_DestroyMutex(is->pictq_mutex);
2124
    SDL_DestroyCond(is->pictq_cond);
2125
    SDL_DestroyMutex(is->subpq_mutex);
2126
    SDL_DestroyCond(is->subpq_cond);
2127
}
2128

    
2129
static void stream_cycle_channel(VideoState *is, int codec_type)
2130
{
2131
    AVFormatContext *ic = is->ic;
2132
    int start_index, stream_index;
2133
    AVStream *st;
2134

    
2135
    if (codec_type == CODEC_TYPE_VIDEO)
2136
        start_index = is->video_stream;
2137
    else if (codec_type == CODEC_TYPE_AUDIO)
2138
        start_index = is->audio_stream;
2139
    else
2140
        start_index = is->subtitle_stream;
2141
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2142
        return;
2143
    stream_index = start_index;
2144
    for(;;) {
2145
        if (++stream_index >= is->ic->nb_streams)
2146
        {
2147
            if (codec_type == CODEC_TYPE_SUBTITLE)
2148
            {
2149
                stream_index = -1;
2150
                goto the_end;
2151
            } else
2152
                stream_index = 0;
2153
        }
2154
        if (stream_index == start_index)
2155
            return;
2156
        st = ic->streams[stream_index];
2157
        if (st->codec->codec_type == codec_type) {
2158
            /* check that parameters are OK */
2159
            switch(codec_type) {
2160
            case CODEC_TYPE_AUDIO:
2161
                if (st->codec->sample_rate != 0 &&
2162
                    st->codec->channels != 0)
2163
                    goto the_end;
2164
                break;
2165
            case CODEC_TYPE_VIDEO:
2166
            case CODEC_TYPE_SUBTITLE:
2167
                goto the_end;
2168
            default:
2169
                break;
2170
            }
2171
        }
2172
    }
2173
 the_end:
2174
    stream_component_close(is, start_index);
2175
    stream_component_open(is, stream_index);
2176
}
2177

    
2178

    
2179
static void toggle_full_screen(void)
2180
{
2181
    is_full_screen = !is_full_screen;
2182
    if (!fs_screen_width) {
2183
        /* use default SDL method */
2184
//        SDL_WM_ToggleFullScreen(screen);
2185
    }
2186
    video_open(cur_stream);
2187
}
2188

    
2189
static void toggle_pause(void)
2190
{
2191
    if (cur_stream)
2192
        stream_pause(cur_stream);
2193
    step = 0;
2194
}
2195

    
2196
static void step_to_next_frame(void)
2197
{
2198
    if (cur_stream) {
2199
        /* if the stream is paused unpause it, then step */
2200
        if (cur_stream->paused)
2201
            stream_pause(cur_stream);
2202
    }
2203
    step = 1;
2204
}
2205

    
2206
static void do_exit(void)
2207
{
2208
    if (cur_stream) {
2209
        stream_close(cur_stream);
2210
        cur_stream = NULL;
2211
    }
2212
    if (show_status)
2213
        printf("\n");
2214
    SDL_Quit();
2215
    exit(0);
2216
}
2217

    
2218
static void toggle_audio_display(void)
2219
{
2220
    if (cur_stream) {
2221
        cur_stream->show_audio = !cur_stream->show_audio;
2222
    }
2223
}
2224

    
2225
/* handle an event sent by the GUI */
2226
static void event_loop(void)
2227
{
2228
    SDL_Event event;
2229
    double incr, pos, frac;
2230

    
2231
    for(;;) {
2232
        SDL_WaitEvent(&event);
2233
        switch(event.type) {
2234
        case SDL_KEYDOWN:
2235
            switch(event.key.keysym.sym) {
2236
            case SDLK_ESCAPE:
2237
            case SDLK_q:
2238
                do_exit();
2239
                break;
2240
            case SDLK_f:
2241
                toggle_full_screen();
2242
                break;
2243
            case SDLK_p:
2244
            case SDLK_SPACE:
2245
                toggle_pause();
2246
                break;
2247
            case SDLK_s: //S: Step to next frame
2248
                step_to_next_frame();
2249
                break;
2250
            case SDLK_a:
2251
                if (cur_stream)
2252
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2253
                break;
2254
            case SDLK_v:
2255
                if (cur_stream)
2256
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2257
                break;
2258
            case SDLK_t:
2259
                if (cur_stream)
2260
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2261
                break;
2262
            case SDLK_w:
2263
                toggle_audio_display();
2264
                break;
2265
            case SDLK_LEFT:
2266
                incr = -10.0;
2267
                goto do_seek;
2268
            case SDLK_RIGHT:
2269
                incr = 10.0;
2270
                goto do_seek;
2271
            case SDLK_UP:
2272
                incr = 60.0;
2273
                goto do_seek;
2274
            case SDLK_DOWN:
2275
                incr = -60.0;
2276
            do_seek:
2277
                if (cur_stream) {
2278
                    if (seek_by_bytes) {
2279
                        pos = url_ftell(&cur_stream->ic->pb);
2280
                        if (cur_stream->ic->bit_rate)
2281
                            incr *= cur_stream->ic->bit_rate / 60.0;
2282
                        else
2283
                            incr *= 180000.0;
2284
                        pos += incr;
2285
                        stream_seek(cur_stream, pos, incr);
2286
                    } else {
2287
                        pos = get_master_clock(cur_stream);
2288
                        pos += incr;
2289
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2290
                    }
2291
                }
2292
                break;
2293
            default:
2294
                break;
2295
            }
2296
            break;
2297
        case SDL_MOUSEBUTTONDOWN:
2298
            if (cur_stream) {
2299
                int ns, hh, mm, ss;
2300
                int tns, thh, tmm, tss;
2301
                tns = cur_stream->ic->duration/1000000LL;
2302
                thh = tns/3600;
2303
                tmm = (tns%3600)/60;
2304
                tss = (tns%60);
2305
                frac = (double)event.button.x/(double)cur_stream->width;
2306
                ns = frac*tns;
2307
                hh = ns/3600;
2308
                mm = (ns%3600)/60;
2309
                ss = (ns%60);
2310
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2311
                        hh, mm, ss, thh, tmm, tss);
2312
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2313
            }
2314
            break;
2315
        case SDL_VIDEORESIZE:
2316
            if (cur_stream) {
2317
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2318
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2319
                screen_width = cur_stream->width = event.resize.w;
2320
                screen_height= cur_stream->height= event.resize.h;
2321
            }
2322
            break;
2323
        case SDL_QUIT:
2324
        case FF_QUIT_EVENT:
2325
            do_exit();
2326
            break;
2327
        case FF_ALLOC_EVENT:
2328
            video_open(event.user.data1);
2329
            alloc_picture(event.user.data1);
2330
            break;
2331
        case FF_REFRESH_EVENT:
2332
            video_refresh_timer(event.user.data1);
2333
            break;
2334
        default:
2335
            break;
2336
        }
2337
    }
2338
}
2339

    
2340
static void opt_frame_size(const char *arg)
2341
{
2342
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2343
        fprintf(stderr, "Incorrect frame size\n");
2344
        exit(1);
2345
    }
2346
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2347
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2348
        exit(1);
2349
    }
2350
}
2351

    
2352
static void opt_width(const char *arg)
2353
{
2354
    screen_width = atoi(arg);
2355
    if(screen_width<=0){
2356
        fprintf(stderr, "invalid width\n");
2357
        exit(1);
2358
    }
2359
}
2360

    
2361
static void opt_height(const char *arg)
2362
{
2363
    screen_height = atoi(arg);
2364
    if(screen_height<=0){
2365
        fprintf(stderr, "invalid height\n");
2366
        exit(1);
2367
    }
2368
}
2369

    
2370
static void opt_format(const char *arg)
2371
{
2372
    file_iformat = av_find_input_format(arg);
2373
    if (!file_iformat) {
2374
        fprintf(stderr, "Unknown input format: %s\n", arg);
2375
        exit(1);
2376
    }
2377
}
2378

    
2379
static void opt_frame_pix_fmt(const char *arg)
2380
{
2381
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2382
}
2383

    
2384
#ifdef CONFIG_RTSP_DEMUXER
2385
static void opt_rtp_tcp(void)
2386
{
2387
    /* only tcp protocol */
2388
    rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2389
}
2390
#endif
2391

    
2392
static void opt_sync(const char *arg)
2393
{
2394
    if (!strcmp(arg, "audio"))
2395
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2396
    else if (!strcmp(arg, "video"))
2397
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2398
    else if (!strcmp(arg, "ext"))
2399
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2400
    else {
2401
        show_help();
2402
        exit(1);
2403
    }
2404
}
2405

    
2406
static void opt_seek(const char *arg)
2407
{
2408
    start_time = parse_date(arg, 1);
2409
}
2410

    
2411
static void opt_debug(const char *arg)
2412
{
2413
    av_log_level = 99;
2414
    debug = atoi(arg);
2415
}
2416

    
2417
static void opt_vismv(const char *arg)
2418
{
2419
    debug_mv = atoi(arg);
2420
}
2421

    
2422
static void opt_thread_count(const char *arg)
2423
{
2424
    thread_count= atoi(arg);
2425
#if !defined(HAVE_THREADS)
2426
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2427
#endif
2428
}
2429

    
2430
static void opt_show_help(void)
2431
{
2432
    show_help();
2433
    exit(0);
2434
}
2435

    
2436
const OptionDef options[] = {
2437
    { "h", 0, {(void*)opt_show_help}, "show help" },
2438
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2439
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2440
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2441
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2442
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2443
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2444
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2445
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2446
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2447
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2448
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2449
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2450
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2451
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2452
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2453
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2454
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2455
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2456
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2457
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2458
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2459
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2460
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2461
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2462
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2463
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2464
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2465
#ifdef CONFIG_RTSP_DEMUXER
2466
    { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2467
#endif
2468
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2469
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2470
    { NULL, },
2471
};
2472

    
2473
void show_help(void)
2474
{
2475
    printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2476
           "usage: ffplay [options] input_file\n"
2477
           "Simple media player\n");
2478
    printf("\n");
2479
    show_help_options(options, "Main options:\n",
2480
                      OPT_EXPERT, 0);
2481
    show_help_options(options, "\nAdvanced options:\n",
2482
                      OPT_EXPERT, OPT_EXPERT);
2483
    printf("\nWhile playing:\n"
2484
           "q, ESC              quit\n"
2485
           "f                   toggle full screen\n"
2486
           "p, SPC              pause\n"
2487
           "a                   cycle audio channel\n"
2488
           "v                   cycle video channel\n"
2489
           "t                   cycle subtitle channel\n"
2490
           "w                   show audio waves\n"
2491
           "left/right          seek backward/forward 10 seconds\n"
2492
           "down/up             seek backward/forward 1 minute\n"
2493
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2494
           );
2495
}
2496

    
2497
void opt_input_file(const char *filename)
2498
{
2499
    if (!strcmp(filename, "-"))
2500
        filename = "pipe:";
2501
    input_filename = filename;
2502
}
2503

    
2504
/* Called from the main */
2505
int main(int argc, char **argv)
2506
{
2507
    int flags;
2508

    
2509
    /* register all codecs, demux and protocols */
2510
    av_register_all();
2511

    
2512
    parse_options(argc, argv, options, opt_input_file);
2513

    
2514
    if (!input_filename) {
2515
        show_help();
2516
        exit(1);
2517
    }
2518

    
2519
    if (display_disable) {
2520
        video_disable = 1;
2521
    }
2522
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2523
#if !defined(__MINGW32__) && !defined(__APPLE__)
2524
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2525
#endif
2526
    if (SDL_Init (flags)) {
2527
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2528
        exit(1);
2529
    }
2530

    
2531
    if (!display_disable) {
2532
#ifdef HAVE_SDL_VIDEO_SIZE
2533
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2534
        fs_screen_width = vi->current_w;
2535
        fs_screen_height = vi->current_h;
2536
#endif
2537
    }
2538

    
2539
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2540
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2541
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2542
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2543

    
2544
    av_init_packet(&flush_pkt);
2545
    flush_pkt.data= "FLUSH";
2546

    
2547
    cur_stream = stream_open(input_filename, file_iformat);
2548

    
2549
    event_loop();
2550

    
2551
    /* never returns */
2552

    
2553
    return 0;
2554
}