Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ c482500f

History | View | Annotate | Download (76.6 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "avformat.h"
25
#include "avdevice.h"
26
#include "rtsp.h"
27
#include "swscale.h"
28
#include "avstring.h"
29

    
30
#include "version.h"
31
#include "cmdutils.h"
32

    
33
#include <SDL.h>
34
#include <SDL_thread.h>
35

    
36
#ifdef __MINGW32__
37
#undef main /* We don't want SDL to override our main() */
38
#endif
39

    
40
#undef exit
41

    
42
static const char program_name[] = "FFplay";
43
static const int program_birth_year = 2003;
44

    
45
//#define DEBUG_SYNC
46

    
47
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
50

    
51
/* SDL audio buffer size, in samples. Should be small to have precise
52
   A/V sync as SDL does not have hardware buffer fullness info. */
53
#define SDL_AUDIO_BUFFER_SIZE 1024
54

    
55
/* no AV sync correction is done if below the AV sync threshold */
56
#define AV_SYNC_THRESHOLD 0.01
57
/* no AV correction is done if too big error */
58
#define AV_NOSYNC_THRESHOLD 10.0
59

    
60
/* maximum audio speed change to get correct sync */
61
#define SAMPLE_CORRECTION_PERCENT_MAX 10
62

    
63
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64
#define AUDIO_DIFF_AVG_NB   20
65

    
66
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67
#define SAMPLE_ARRAY_SIZE (2*65536)
68

    
69
static int sws_flags = SWS_BICUBIC;
70

    
71
typedef struct PacketQueue {
72
    AVPacketList *first_pkt, *last_pkt;
73
    int nb_packets;
74
    int size;
75
    int abort_request;
76
    SDL_mutex *mutex;
77
    SDL_cond *cond;
78
} PacketQueue;
79

    
80
#define VIDEO_PICTURE_QUEUE_SIZE 1
81
#define SUBPICTURE_QUEUE_SIZE 4
82

    
83
typedef struct VideoPicture {
84
    double pts;                                  ///<presentation time stamp for this picture
85
    SDL_Overlay *bmp;
86
    int width, height; /* source height & width */
87
    int allocated;
88
} VideoPicture;
89

    
90
typedef struct SubPicture {
91
    double pts; /* presentation time stamp for this picture */
92
    AVSubtitle sub;
93
} SubPicture;
94

    
95
enum {
96
    AV_SYNC_AUDIO_MASTER, /* default choice */
97
    AV_SYNC_VIDEO_MASTER,
98
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
99
};
100

    
101
typedef struct VideoState {
102
    SDL_Thread *parse_tid;
103
    SDL_Thread *video_tid;
104
    AVInputFormat *iformat;
105
    int no_background;
106
    int abort_request;
107
    int paused;
108
    int last_paused;
109
    int seek_req;
110
    int seek_flags;
111
    int64_t seek_pos;
112
    AVFormatContext *ic;
113
    int dtg_active_format;
114

    
115
    int audio_stream;
116

    
117
    int av_sync_type;
118
    double external_clock; /* external clock base */
119
    int64_t external_clock_time;
120

    
121
    double audio_clock;
122
    double audio_diff_cum; /* used for AV difference average computation */
123
    double audio_diff_avg_coef;
124
    double audio_diff_threshold;
125
    int audio_diff_avg_count;
126
    AVStream *audio_st;
127
    PacketQueue audioq;
128
    int audio_hw_buf_size;
129
    /* samples output by the codec. we reserve more space for avsync
130
       compensation */
131
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132
    unsigned int audio_buf_size; /* in bytes */
133
    int audio_buf_index; /* in bytes */
134
    AVPacket audio_pkt;
135
    uint8_t *audio_pkt_data;
136
    int audio_pkt_size;
137

    
138
    int show_audio; /* if true, display audio samples */
139
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
140
    int sample_array_index;
141
    int last_i_start;
142

    
143
    SDL_Thread *subtitle_tid;
144
    int subtitle_stream;
145
    int subtitle_stream_changed;
146
    AVStream *subtitle_st;
147
    PacketQueue subtitleq;
148
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
149
    int subpq_size, subpq_rindex, subpq_windex;
150
    SDL_mutex *subpq_mutex;
151
    SDL_cond *subpq_cond;
152

    
153
    double frame_timer;
154
    double frame_last_pts;
155
    double frame_last_delay;
156
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
157
    int video_stream;
158
    AVStream *video_st;
159
    PacketQueue videoq;
160
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
161
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
162
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
163
    int pictq_size, pictq_rindex, pictq_windex;
164
    SDL_mutex *pictq_mutex;
165
    SDL_cond *pictq_cond;
166

    
167
    //    QETimer *video_timer;
168
    char filename[1024];
169
    int width, height, xleft, ytop;
170
} VideoState;
171

    
172
void show_help(void);
173
static int audio_write_get_buf_size(VideoState *is);
174

    
175
/* options specified by the user */
176
static AVInputFormat *file_iformat;
177
static const char *input_filename;
178
static int fs_screen_width;
179
static int fs_screen_height;
180
static int screen_width = 0;
181
static int screen_height = 0;
182
static int frame_width = 0;
183
static int frame_height = 0;
184
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
185
static int audio_disable;
186
static int video_disable;
187
static int wanted_audio_stream= 0;
188
static int wanted_video_stream= 0;
189
static int seek_by_bytes;
190
static int display_disable;
191
static int show_status;
192
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
193
static int64_t start_time = AV_NOPTS_VALUE;
194
static int debug = 0;
195
static int debug_mv = 0;
196
static int step = 0;
197
static int thread_count = 1;
198
static int workaround_bugs = 1;
199
static int fast = 0;
200
static int genpts = 0;
201
static int lowres = 0;
202
static int idct = FF_IDCT_AUTO;
203
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
204
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
205
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
206
static int error_resilience = FF_ER_CAREFUL;
207
static int error_concealment = 3;
208
static int decoder_reorder_pts= 0;
209

    
210
/* current context */
211
static int is_full_screen;
212
static VideoState *cur_stream;
213
static int64_t audio_callback_time;
214

    
215
AVPacket flush_pkt;
216

    
217
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
218
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
219
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
220

    
221
SDL_Surface *screen;
222

    
223
/* packet queue handling */
224
static void packet_queue_init(PacketQueue *q)
225
{
226
    memset(q, 0, sizeof(PacketQueue));
227
    q->mutex = SDL_CreateMutex();
228
    q->cond = SDL_CreateCond();
229
}
230

    
231
static void packet_queue_flush(PacketQueue *q)
232
{
233
    AVPacketList *pkt, *pkt1;
234

    
235
    SDL_LockMutex(q->mutex);
236
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
237
        pkt1 = pkt->next;
238
        av_free_packet(&pkt->pkt);
239
        av_freep(&pkt);
240
    }
241
    q->last_pkt = NULL;
242
    q->first_pkt = NULL;
243
    q->nb_packets = 0;
244
    q->size = 0;
245
    SDL_UnlockMutex(q->mutex);
246
}
247

    
248
static void packet_queue_end(PacketQueue *q)
249
{
250
    packet_queue_flush(q);
251
    SDL_DestroyMutex(q->mutex);
252
    SDL_DestroyCond(q->cond);
253
}
254

    
255
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
256
{
257
    AVPacketList *pkt1;
258

    
259
    /* duplicate the packet */
260
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
261
        return -1;
262

    
263
    pkt1 = av_malloc(sizeof(AVPacketList));
264
    if (!pkt1)
265
        return -1;
266
    pkt1->pkt = *pkt;
267
    pkt1->next = NULL;
268

    
269

    
270
    SDL_LockMutex(q->mutex);
271

    
272
    if (!q->last_pkt)
273

    
274
        q->first_pkt = pkt1;
275
    else
276
        q->last_pkt->next = pkt1;
277
    q->last_pkt = pkt1;
278
    q->nb_packets++;
279
    q->size += pkt1->pkt.size;
280
    /* XXX: should duplicate packet data in DV case */
281
    SDL_CondSignal(q->cond);
282

    
283
    SDL_UnlockMutex(q->mutex);
284
    return 0;
285
}
286

    
287
static void packet_queue_abort(PacketQueue *q)
288
{
289
    SDL_LockMutex(q->mutex);
290

    
291
    q->abort_request = 1;
292

    
293
    SDL_CondSignal(q->cond);
294

    
295
    SDL_UnlockMutex(q->mutex);
296
}
297

    
298
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
299
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
300
{
301
    AVPacketList *pkt1;
302
    int ret;
303

    
304
    SDL_LockMutex(q->mutex);
305

    
306
    for(;;) {
307
        if (q->abort_request) {
308
            ret = -1;
309
            break;
310
        }
311

    
312
        pkt1 = q->first_pkt;
313
        if (pkt1) {
314
            q->first_pkt = pkt1->next;
315
            if (!q->first_pkt)
316
                q->last_pkt = NULL;
317
            q->nb_packets--;
318
            q->size -= pkt1->pkt.size;
319
            *pkt = pkt1->pkt;
320
            av_free(pkt1);
321
            ret = 1;
322
            break;
323
        } else if (!block) {
324
            ret = 0;
325
            break;
326
        } else {
327
            SDL_CondWait(q->cond, q->mutex);
328
        }
329
    }
330
    SDL_UnlockMutex(q->mutex);
331
    return ret;
332
}
333

    
334
static inline void fill_rectangle(SDL_Surface *screen,
335
                                  int x, int y, int w, int h, int color)
336
{
337
    SDL_Rect rect;
338
    rect.x = x;
339
    rect.y = y;
340
    rect.w = w;
341
    rect.h = h;
342
    SDL_FillRect(screen, &rect, color);
343
}
344

    
345
#if 0
346
/* draw only the border of a rectangle */
347
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
348
{
349
    int w1, w2, h1, h2;
350

351
    /* fill the background */
352
    w1 = x;
353
    if (w1 < 0)
354
        w1 = 0;
355
    w2 = s->width - (x + w);
356
    if (w2 < 0)
357
        w2 = 0;
358
    h1 = y;
359
    if (h1 < 0)
360
        h1 = 0;
361
    h2 = s->height - (y + h);
362
    if (h2 < 0)
363
        h2 = 0;
364
    fill_rectangle(screen,
365
                   s->xleft, s->ytop,
366
                   w1, s->height,
367
                   color);
368
    fill_rectangle(screen,
369
                   s->xleft + s->width - w2, s->ytop,
370
                   w2, s->height,
371
                   color);
372
    fill_rectangle(screen,
373
                   s->xleft + w1, s->ytop,
374
                   s->width - w1 - w2, h1,
375
                   color);
376
    fill_rectangle(screen,
377
                   s->xleft + w1, s->ytop + s->height - h2,
378
                   s->width - w1 - w2, h2,
379
                   color);
380
}
381
#endif
382

    
383

    
384

    
385
#define SCALEBITS 10
386
#define ONE_HALF  (1 << (SCALEBITS - 1))
387
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
388

    
389
#define RGB_TO_Y_CCIR(r, g, b) \
390
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
391
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
392

    
393
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
394
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
395
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
396

    
397
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
398
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
399
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
400

    
401
#define ALPHA_BLEND(a, oldp, newp, s)\
402
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
403

    
404
#define RGBA_IN(r, g, b, a, s)\
405
{\
406
    unsigned int v = ((const uint32_t *)(s))[0];\
407
    a = (v >> 24) & 0xff;\
408
    r = (v >> 16) & 0xff;\
409
    g = (v >> 8) & 0xff;\
410
    b = v & 0xff;\
411
}
412

    
413
#define YUVA_IN(y, u, v, a, s, pal)\
414
{\
415
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
416
    a = (val >> 24) & 0xff;\
417
    y = (val >> 16) & 0xff;\
418
    u = (val >> 8) & 0xff;\
419
    v = val & 0xff;\
420
}
421

    
422
#define YUVA_OUT(d, y, u, v, a)\
423
{\
424
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
425
}
426

    
427

    
428
#define BPP 1
429

    
430
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
431
{
432
    int wrap, wrap3, width2, skip2;
433
    int y, u, v, a, u1, v1, a1, w, h;
434
    uint8_t *lum, *cb, *cr;
435
    const uint8_t *p;
436
    const uint32_t *pal;
437
    int dstx, dsty, dstw, dsth;
438

    
439
    dstx = FFMIN(FFMAX(rect->x, 0), imgw);
440
    dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
441
    dsty = FFMIN(FFMAX(rect->y, 0), imgh);
442
    dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
443
    lum = dst->data[0] + dsty * dst->linesize[0];
444
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
445
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
446

    
447
    width2 = (dstw + 1) >> 1;
448
    skip2 = dstx >> 1;
449
    wrap = dst->linesize[0];
450
    wrap3 = rect->linesize;
451
    p = rect->bitmap;
452
    pal = rect->rgba_palette;  /* Now in YCrCb! */
453

    
454
    if (dsty & 1) {
455
        lum += dstx;
456
        cb += skip2;
457
        cr += skip2;
458

    
459
        if (dstx & 1) {
460
            YUVA_IN(y, u, v, a, p, pal);
461
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464
            cb++;
465
            cr++;
466
            lum++;
467
            p += BPP;
468
        }
469
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
470
            YUVA_IN(y, u, v, a, p, pal);
471
            u1 = u;
472
            v1 = v;
473
            a1 = a;
474
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475

    
476
            YUVA_IN(y, u, v, a, p + BPP, pal);
477
            u1 += u;
478
            v1 += v;
479
            a1 += a;
480
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483
            cb++;
484
            cr++;
485
            p += 2 * BPP;
486
            lum += 2;
487
        }
488
        if (w) {
489
            YUVA_IN(y, u, v, a, p, pal);
490
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493
        }
494
        p += wrap3 + (wrap3 - dstw * BPP);
495
        lum += wrap + (wrap - dstw - dstx);
496
        cb += dst->linesize[1] - width2 - skip2;
497
        cr += dst->linesize[2] - width2 - skip2;
498
    }
499
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
500
        lum += dstx;
501
        cb += skip2;
502
        cr += skip2;
503

    
504
        if (dstx & 1) {
505
            YUVA_IN(y, u, v, a, p, pal);
506
            u1 = u;
507
            v1 = v;
508
            a1 = a;
509
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510
            p += wrap3;
511
            lum += wrap;
512
            YUVA_IN(y, u, v, a, p, pal);
513
            u1 += u;
514
            v1 += v;
515
            a1 += a;
516
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
518
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
519
            cb++;
520
            cr++;
521
            p += -wrap3 + BPP;
522
            lum += -wrap + 1;
523
        }
524
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
525
            YUVA_IN(y, u, v, a, p, pal);
526
            u1 = u;
527
            v1 = v;
528
            a1 = a;
529
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530

    
531
            YUVA_IN(y, u, v, a, p, pal);
532
            u1 += u;
533
            v1 += v;
534
            a1 += a;
535
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
536
            p += wrap3;
537
            lum += wrap;
538

    
539
            YUVA_IN(y, u, v, a, p, pal);
540
            u1 += u;
541
            v1 += v;
542
            a1 += a;
543
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544

    
545
            YUVA_IN(y, u, v, a, p, pal);
546
            u1 += u;
547
            v1 += v;
548
            a1 += a;
549
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550

    
551
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
552
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
553

    
554
            cb++;
555
            cr++;
556
            p += -wrap3 + 2 * BPP;
557
            lum += -wrap + 2;
558
        }
559
        if (w) {
560
            YUVA_IN(y, u, v, a, p, pal);
561
            u1 = u;
562
            v1 = v;
563
            a1 = a;
564
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565
            p += wrap3;
566
            lum += wrap;
567
            YUVA_IN(y, u, v, a, p, pal);
568
            u1 += u;
569
            v1 += v;
570
            a1 += a;
571
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574
            cb++;
575
            cr++;
576
            p += -wrap3 + BPP;
577
            lum += -wrap + 1;
578
        }
579
        p += wrap3 + (wrap3 - dstw * BPP);
580
        lum += wrap + (wrap - dstw - dstx);
581
        cb += dst->linesize[1] - width2 - skip2;
582
        cr += dst->linesize[2] - width2 - skip2;
583
    }
584
    /* handle odd height */
585
    if (h) {
586
        lum += dstx;
587
        cb += skip2;
588
        cr += skip2;
589

    
590
        if (dstx & 1) {
591
            YUVA_IN(y, u, v, a, p, pal);
592
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
594
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
595
            cb++;
596
            cr++;
597
            lum++;
598
            p += BPP;
599
        }
600
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
601
            YUVA_IN(y, u, v, a, p, pal);
602
            u1 = u;
603
            v1 = v;
604
            a1 = a;
605
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606

    
607
            YUVA_IN(y, u, v, a, p + BPP, pal);
608
            u1 += u;
609
            v1 += v;
610
            a1 += a;
611
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
612
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
613
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
614
            cb++;
615
            cr++;
616
            p += 2 * BPP;
617
            lum += 2;
618
        }
619
        if (w) {
620
            YUVA_IN(y, u, v, a, p, pal);
621
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
623
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
624
        }
625
    }
626
}
627

    
628
static void free_subpicture(SubPicture *sp)
629
{
630
    int i;
631

    
632
    for (i = 0; i < sp->sub.num_rects; i++)
633
    {
634
        av_free(sp->sub.rects[i].bitmap);
635
        av_free(sp->sub.rects[i].rgba_palette);
636
    }
637

    
638
    av_free(sp->sub.rects);
639

    
640
    memset(&sp->sub, 0, sizeof(AVSubtitle));
641
}
642

    
643
static void video_image_display(VideoState *is)
644
{
645
    VideoPicture *vp;
646
    SubPicture *sp;
647
    AVPicture pict;
648
    float aspect_ratio;
649
    int width, height, x, y;
650
    SDL_Rect rect;
651
    int i;
652

    
653
    vp = &is->pictq[is->pictq_rindex];
654
    if (vp->bmp) {
655
        /* XXX: use variable in the frame */
656
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
657
            aspect_ratio = 0;
658
        else
659
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
660
                * is->video_st->codec->width / is->video_st->codec->height;;
661
        if (aspect_ratio <= 0.0)
662
            aspect_ratio = (float)is->video_st->codec->width /
663
                (float)is->video_st->codec->height;
664
        /* if an active format is indicated, then it overrides the
665
           mpeg format */
666
#if 0
667
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
668
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
669
            printf("dtg_active_format=%d\n", is->dtg_active_format);
670
        }
671
#endif
672
#if 0
673
        switch(is->video_st->codec->dtg_active_format) {
674
        case FF_DTG_AFD_SAME:
675
        default:
676
            /* nothing to do */
677
            break;
678
        case FF_DTG_AFD_4_3:
679
            aspect_ratio = 4.0 / 3.0;
680
            break;
681
        case FF_DTG_AFD_16_9:
682
            aspect_ratio = 16.0 / 9.0;
683
            break;
684
        case FF_DTG_AFD_14_9:
685
            aspect_ratio = 14.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_4_3_SP_14_9:
688
            aspect_ratio = 14.0 / 9.0;
689
            break;
690
        case FF_DTG_AFD_16_9_SP_14_9:
691
            aspect_ratio = 14.0 / 9.0;
692
            break;
693
        case FF_DTG_AFD_SP_4_3:
694
            aspect_ratio = 4.0 / 3.0;
695
            break;
696
        }
697
#endif
698

    
699
        if (is->subtitle_st)
700
        {
701
            if (is->subpq_size > 0)
702
            {
703
                sp = &is->subpq[is->subpq_rindex];
704

    
705
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
706
                {
707
                    SDL_LockYUVOverlay (vp->bmp);
708

    
709
                    pict.data[0] = vp->bmp->pixels[0];
710
                    pict.data[1] = vp->bmp->pixels[2];
711
                    pict.data[2] = vp->bmp->pixels[1];
712

    
713
                    pict.linesize[0] = vp->bmp->pitches[0];
714
                    pict.linesize[1] = vp->bmp->pitches[2];
715
                    pict.linesize[2] = vp->bmp->pitches[1];
716

    
717
                    for (i = 0; i < sp->sub.num_rects; i++)
718
                        blend_subrect(&pict, &sp->sub.rects[i],
719
                                      vp->bmp->w, vp->bmp->h);
720

    
721
                    SDL_UnlockYUVOverlay (vp->bmp);
722
                }
723
            }
724
        }
725

    
726

    
727
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
728
        height = is->height;
729
        width = ((int)rint(height * aspect_ratio)) & -3;
730
        if (width > is->width) {
731
            width = is->width;
732
            height = ((int)rint(width / aspect_ratio)) & -3;
733
        }
734
        x = (is->width - width) / 2;
735
        y = (is->height - height) / 2;
736
        if (!is->no_background) {
737
            /* fill the background */
738
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
739
        } else {
740
            is->no_background = 0;
741
        }
742
        rect.x = is->xleft + x;
743
        rect.y = is->ytop  + y;
744
        rect.w = width;
745
        rect.h = height;
746
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
747
    } else {
748
#if 0
749
        fill_rectangle(screen,
750
                       is->xleft, is->ytop, is->width, is->height,
751
                       QERGB(0x00, 0x00, 0x00));
752
#endif
753
    }
754
}
755

    
756
static inline int compute_mod(int a, int b)
757
{
758
    a = a % b;
759
    if (a >= 0)
760
        return a;
761
    else
762
        return a + b;
763
}
764

    
765
static void video_audio_display(VideoState *s)
766
{
767
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
768
    int ch, channels, h, h2, bgcolor, fgcolor;
769
    int16_t time_diff;
770

    
771
    /* compute display index : center on currently output samples */
772
    channels = s->audio_st->codec->channels;
773
    nb_display_channels = channels;
774
    if (!s->paused) {
775
        n = 2 * channels;
776
        delay = audio_write_get_buf_size(s);
777
        delay /= n;
778

    
779
        /* to be more precise, we take into account the time spent since
780
           the last buffer computation */
781
        if (audio_callback_time) {
782
            time_diff = av_gettime() - audio_callback_time;
783
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
784
        }
785

    
786
        delay -= s->width / 2;
787
        if (delay < s->width)
788
            delay = s->width;
789

    
790
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
791

    
792
        h= INT_MIN;
793
        for(i=0; i<1000; i+=channels){
794
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
795
            int a= s->sample_array[idx];
796
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
797
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
798
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
799
            int score= a-d;
800
            if(h<score && (b^c)<0){
801
                h= score;
802
                i_start= idx;
803
            }
804
        }
805

    
806
        s->last_i_start = i_start;
807
    } else {
808
        i_start = s->last_i_start;
809
    }
810

    
811
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
812
    fill_rectangle(screen,
813
                   s->xleft, s->ytop, s->width, s->height,
814
                   bgcolor);
815

    
816
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
817

    
818
    /* total height for one channel */
819
    h = s->height / nb_display_channels;
820
    /* graph height / 2 */
821
    h2 = (h * 9) / 20;
822
    for(ch = 0;ch < nb_display_channels; ch++) {
823
        i = i_start + ch;
824
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
825
        for(x = 0; x < s->width; x++) {
826
            y = (s->sample_array[i] * h2) >> 15;
827
            if (y < 0) {
828
                y = -y;
829
                ys = y1 - y;
830
            } else {
831
                ys = y1;
832
            }
833
            fill_rectangle(screen,
834
                           s->xleft + x, ys, 1, y,
835
                           fgcolor);
836
            i += channels;
837
            if (i >= SAMPLE_ARRAY_SIZE)
838
                i -= SAMPLE_ARRAY_SIZE;
839
        }
840
    }
841

    
842
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
843

    
844
    for(ch = 1;ch < nb_display_channels; ch++) {
845
        y = s->ytop + ch * h;
846
        fill_rectangle(screen,
847
                       s->xleft, y, s->width, 1,
848
                       fgcolor);
849
    }
850
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
851
}
852

    
853
static int video_open(VideoState *is){
854
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
855
    int w,h;
856

    
857
    if(is_full_screen) flags |= SDL_FULLSCREEN;
858
    else               flags |= SDL_RESIZABLE;
859

    
860
    if (is_full_screen && fs_screen_width) {
861
        w = fs_screen_width;
862
        h = fs_screen_height;
863
    } else if(!is_full_screen && screen_width){
864
        w = screen_width;
865
        h = screen_height;
866
    }else if (is->video_st && is->video_st->codec->width){
867
        w = is->video_st->codec->width;
868
        h = is->video_st->codec->height;
869
    } else {
870
        w = 640;
871
        h = 480;
872
    }
873
#ifndef __APPLE__
874
    screen = SDL_SetVideoMode(w, h, 0, flags);
875
#else
876
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
877
    screen = SDL_SetVideoMode(w, h, 24, flags);
878
#endif
879
    if (!screen) {
880
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
881
        return -1;
882
    }
883
    SDL_WM_SetCaption("FFplay", "FFplay");
884

    
885
    is->width = screen->w;
886
    is->height = screen->h;
887

    
888
    return 0;
889
}
890

    
891
/* display the current picture, if any */
892
static void video_display(VideoState *is)
893
{
894
    if(!screen)
895
        video_open(cur_stream);
896
    if (is->audio_st && is->show_audio)
897
        video_audio_display(is);
898
    else if (is->video_st)
899
        video_image_display(is);
900
}
901

    
902
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
903
{
904
    SDL_Event event;
905
    event.type = FF_REFRESH_EVENT;
906
    event.user.data1 = opaque;
907
    SDL_PushEvent(&event);
908
    return 0; /* 0 means stop timer */
909
}
910

    
911
/* schedule a video refresh in 'delay' ms */
912
static void schedule_refresh(VideoState *is, int delay)
913
{
914
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
915
}
916

    
917
/* get the current audio clock value */
918
static double get_audio_clock(VideoState *is)
919
{
920
    double pts;
921
    int hw_buf_size, bytes_per_sec;
922
    pts = is->audio_clock;
923
    hw_buf_size = audio_write_get_buf_size(is);
924
    bytes_per_sec = 0;
925
    if (is->audio_st) {
926
        bytes_per_sec = is->audio_st->codec->sample_rate *
927
            2 * is->audio_st->codec->channels;
928
    }
929
    if (bytes_per_sec)
930
        pts -= (double)hw_buf_size / bytes_per_sec;
931
    return pts;
932
}
933

    
934
/* get the current video clock value */
935
static double get_video_clock(VideoState *is)
936
{
937
    double delta;
938
    if (is->paused) {
939
        delta = 0;
940
    } else {
941
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
942
    }
943
    return is->video_current_pts + delta;
944
}
945

    
946
/* get the current external clock value */
947
static double get_external_clock(VideoState *is)
948
{
949
    int64_t ti;
950
    ti = av_gettime();
951
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
952
}
953

    
954
/* get the current master clock value */
955
static double get_master_clock(VideoState *is)
956
{
957
    double val;
958

    
959
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
960
        if (is->video_st)
961
            val = get_video_clock(is);
962
        else
963
            val = get_audio_clock(is);
964
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
965
        if (is->audio_st)
966
            val = get_audio_clock(is);
967
        else
968
            val = get_video_clock(is);
969
    } else {
970
        val = get_external_clock(is);
971
    }
972
    return val;
973
}
974

    
975
/* seek in the stream */
976
static void stream_seek(VideoState *is, int64_t pos, int rel)
977
{
978
    if (!is->seek_req) {
979
        is->seek_pos = pos;
980
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
981
        if (seek_by_bytes)
982
            is->seek_flags |= AVSEEK_FLAG_BYTE;
983
        is->seek_req = 1;
984
    }
985
}
986

    
987
/* pause or resume the video */
988
static void stream_pause(VideoState *is)
989
{
990
    is->paused = !is->paused;
991
    if (!is->paused) {
992
        is->video_current_pts = get_video_clock(is);
993
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
994
    }
995
}
996

    
997
/* called to display each frame */
998
static void video_refresh_timer(void *opaque)
999
{
1000
    VideoState *is = opaque;
1001
    VideoPicture *vp;
1002
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1003

    
1004
    SubPicture *sp, *sp2;
1005

    
1006
    if (is->video_st) {
1007
        if (is->pictq_size == 0) {
1008
            /* if no picture, need to wait */
1009
            schedule_refresh(is, 1);
1010
        } else {
1011
            /* dequeue the picture */
1012
            vp = &is->pictq[is->pictq_rindex];
1013

    
1014
            /* update current video pts */
1015
            is->video_current_pts = vp->pts;
1016
            is->video_current_pts_time = av_gettime();
1017

    
1018
            /* compute nominal delay */
1019
            delay = vp->pts - is->frame_last_pts;
1020
            if (delay <= 0 || delay >= 2.0) {
1021
                /* if incorrect delay, use previous one */
1022
                delay = is->frame_last_delay;
1023
            }
1024
            is->frame_last_delay = delay;
1025
            is->frame_last_pts = vp->pts;
1026

    
1027
            /* update delay to follow master synchronisation source */
1028
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1029
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1030
                /* if video is slave, we try to correct big delays by
1031
                   duplicating or deleting a frame */
1032
                ref_clock = get_master_clock(is);
1033
                diff = vp->pts - ref_clock;
1034

    
1035
                /* skip or repeat frame. We take into account the
1036
                   delay to compute the threshold. I still don't know
1037
                   if it is the best guess */
1038
                sync_threshold = AV_SYNC_THRESHOLD;
1039
                if (delay > sync_threshold)
1040
                    sync_threshold = delay;
1041
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1042
                    if (diff <= -sync_threshold)
1043
                        delay = 0;
1044
                    else if (diff >= sync_threshold)
1045
                        delay = 2 * delay;
1046
                }
1047
            }
1048

    
1049
            is->frame_timer += delay;
1050
            /* compute the REAL delay (we need to do that to avoid
1051
               long term errors */
1052
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1053
            if (actual_delay < 0.010) {
1054
                /* XXX: should skip picture */
1055
                actual_delay = 0.010;
1056
            }
1057
            /* launch timer for next picture */
1058
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1059

    
1060
#if defined(DEBUG_SYNC)
1061
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1062
                   delay, actual_delay, vp->pts, -diff);
1063
#endif
1064

    
1065
            if(is->subtitle_st) {
1066
                if (is->subtitle_stream_changed) {
1067
                    SDL_LockMutex(is->subpq_mutex);
1068

    
1069
                    while (is->subpq_size) {
1070
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1071

    
1072
                        /* update queue size and signal for next picture */
1073
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1074
                            is->subpq_rindex = 0;
1075

    
1076
                        is->subpq_size--;
1077
                    }
1078
                    is->subtitle_stream_changed = 0;
1079

    
1080
                    SDL_CondSignal(is->subpq_cond);
1081
                    SDL_UnlockMutex(is->subpq_mutex);
1082
                } else {
1083
                    if (is->subpq_size > 0) {
1084
                        sp = &is->subpq[is->subpq_rindex];
1085

    
1086
                        if (is->subpq_size > 1)
1087
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1088
                        else
1089
                            sp2 = NULL;
1090

    
1091
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1092
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1093
                        {
1094
                            free_subpicture(sp);
1095

    
1096
                            /* update queue size and signal for next picture */
1097
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1098
                                is->subpq_rindex = 0;
1099

    
1100
                            SDL_LockMutex(is->subpq_mutex);
1101
                            is->subpq_size--;
1102
                            SDL_CondSignal(is->subpq_cond);
1103
                            SDL_UnlockMutex(is->subpq_mutex);
1104
                        }
1105
                    }
1106
                }
1107
            }
1108

    
1109
            /* display picture */
1110
            video_display(is);
1111

    
1112
            /* update queue size and signal for next picture */
1113
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114
                is->pictq_rindex = 0;
1115

    
1116
            SDL_LockMutex(is->pictq_mutex);
1117
            is->pictq_size--;
1118
            SDL_CondSignal(is->pictq_cond);
1119
            SDL_UnlockMutex(is->pictq_mutex);
1120
        }
1121
    } else if (is->audio_st) {
1122
        /* draw the next audio frame */
1123

    
1124
        schedule_refresh(is, 40);
1125

    
1126
        /* if only audio stream, then display the audio bars (better
1127
           than nothing, just to test the implementation */
1128

    
1129
        /* display picture */
1130
        video_display(is);
1131
    } else {
1132
        schedule_refresh(is, 100);
1133
    }
1134
    if (show_status) {
1135
        static int64_t last_time;
1136
        int64_t cur_time;
1137
        int aqsize, vqsize, sqsize;
1138
        double av_diff;
1139

    
1140
        cur_time = av_gettime();
1141
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1142
            aqsize = 0;
1143
            vqsize = 0;
1144
            sqsize = 0;
1145
            if (is->audio_st)
1146
                aqsize = is->audioq.size;
1147
            if (is->video_st)
1148
                vqsize = is->videoq.size;
1149
            if (is->subtitle_st)
1150
                sqsize = is->subtitleq.size;
1151
            av_diff = 0;
1152
            if (is->audio_st && is->video_st)
1153
                av_diff = get_audio_clock(is) - get_video_clock(is);
1154
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1155
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1156
            fflush(stdout);
1157
            last_time = cur_time;
1158
        }
1159
    }
1160
}
1161

    
1162
/* allocate a picture (needs to do that in main thread to avoid
1163
   potential locking problems */
1164
static void alloc_picture(void *opaque)
1165
{
1166
    VideoState *is = opaque;
1167
    VideoPicture *vp;
1168

    
1169
    vp = &is->pictq[is->pictq_windex];
1170

    
1171
    if (vp->bmp)
1172
        SDL_FreeYUVOverlay(vp->bmp);
1173

    
1174
#if 0
1175
    /* XXX: use generic function */
1176
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1177
    switch(is->video_st->codec->pix_fmt) {
1178
    case PIX_FMT_YUV420P:
1179
    case PIX_FMT_YUV422P:
1180
    case PIX_FMT_YUV444P:
1181
    case PIX_FMT_YUYV422:
1182
    case PIX_FMT_YUV410P:
1183
    case PIX_FMT_YUV411P:
1184
        is_yuv = 1;
1185
        break;
1186
    default:
1187
        is_yuv = 0;
1188
        break;
1189
    }
1190
#endif
1191
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1192
                                   is->video_st->codec->height,
1193
                                   SDL_YV12_OVERLAY,
1194
                                   screen);
1195
    vp->width = is->video_st->codec->width;
1196
    vp->height = is->video_st->codec->height;
1197

    
1198
    SDL_LockMutex(is->pictq_mutex);
1199
    vp->allocated = 1;
1200
    SDL_CondSignal(is->pictq_cond);
1201
    SDL_UnlockMutex(is->pictq_mutex);
1202
}
1203

    
1204
/**
1205
 *
1206
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1207
 */
1208
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1209
{
1210
    VideoPicture *vp;
1211
    int dst_pix_fmt;
1212
    AVPicture pict;
1213
    static struct SwsContext *img_convert_ctx;
1214

    
1215
    /* wait until we have space to put a new picture */
1216
    SDL_LockMutex(is->pictq_mutex);
1217
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1218
           !is->videoq.abort_request) {
1219
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1220
    }
1221
    SDL_UnlockMutex(is->pictq_mutex);
1222

    
1223
    if (is->videoq.abort_request)
1224
        return -1;
1225

    
1226
    vp = &is->pictq[is->pictq_windex];
1227

    
1228
    /* alloc or resize hardware picture buffer */
1229
    if (!vp->bmp ||
1230
        vp->width != is->video_st->codec->width ||
1231
        vp->height != is->video_st->codec->height) {
1232
        SDL_Event event;
1233

    
1234
        vp->allocated = 0;
1235

    
1236
        /* the allocation must be done in the main thread to avoid
1237
           locking problems */
1238
        event.type = FF_ALLOC_EVENT;
1239
        event.user.data1 = is;
1240
        SDL_PushEvent(&event);
1241

    
1242
        /* wait until the picture is allocated */
1243
        SDL_LockMutex(is->pictq_mutex);
1244
        while (!vp->allocated && !is->videoq.abort_request) {
1245
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1246
        }
1247
        SDL_UnlockMutex(is->pictq_mutex);
1248

    
1249
        if (is->videoq.abort_request)
1250
            return -1;
1251
    }
1252

    
1253
    /* if the frame is not skipped, then display it */
1254
    if (vp->bmp) {
1255
        /* get a pointer on the bitmap */
1256
        SDL_LockYUVOverlay (vp->bmp);
1257

    
1258
        dst_pix_fmt = PIX_FMT_YUV420P;
1259
        pict.data[0] = vp->bmp->pixels[0];
1260
        pict.data[1] = vp->bmp->pixels[2];
1261
        pict.data[2] = vp->bmp->pixels[1];
1262

    
1263
        pict.linesize[0] = vp->bmp->pitches[0];
1264
        pict.linesize[1] = vp->bmp->pitches[2];
1265
        pict.linesize[2] = vp->bmp->pitches[1];
1266
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1267
            is->video_st->codec->width, is->video_st->codec->height,
1268
            is->video_st->codec->pix_fmt,
1269
            is->video_st->codec->width, is->video_st->codec->height,
1270
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1271
        if (img_convert_ctx == NULL) {
1272
            fprintf(stderr, "Cannot initialize the conversion context\n");
1273
            exit(1);
1274
        }
1275
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1276
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1277
        /* update the bitmap content */
1278
        SDL_UnlockYUVOverlay(vp->bmp);
1279

    
1280
        vp->pts = pts;
1281

    
1282
        /* now we can update the picture count */
1283
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1284
            is->pictq_windex = 0;
1285
        SDL_LockMutex(is->pictq_mutex);
1286
        is->pictq_size++;
1287
        SDL_UnlockMutex(is->pictq_mutex);
1288
    }
1289
    return 0;
1290
}
1291

    
1292
/**
1293
 * compute the exact PTS for the picture if it is omitted in the stream
1294
 * @param pts1 the dts of the pkt / pts of the frame
1295
 */
1296
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1297
{
1298
    double frame_delay, pts;
1299

    
1300
    pts = pts1;
1301

    
1302
    if (pts != 0) {
1303
        /* update video clock with pts, if present */
1304
        is->video_clock = pts;
1305
    } else {
1306
        pts = is->video_clock;
1307
    }
1308
    /* update video clock for next frame */
1309
    frame_delay = av_q2d(is->video_st->codec->time_base);
1310
    /* for MPEG2, the frame can be repeated, so we update the
1311
       clock accordingly */
1312
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313
    is->video_clock += frame_delay;
1314

    
1315
#if defined(DEBUG_SYNC) && 0
1316
    {
1317
        int ftype;
1318
        if (src_frame->pict_type == FF_B_TYPE)
1319
            ftype = 'B';
1320
        else if (src_frame->pict_type == FF_I_TYPE)
1321
            ftype = 'I';
1322
        else
1323
            ftype = 'P';
1324
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325
               ftype, pts, pts1);
1326
    }
1327
#endif
1328
    return queue_picture(is, src_frame, pts);
1329
}
1330

    
1331
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1332

    
1333
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1334
    int ret= avcodec_default_get_buffer(c, pic);
1335
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1336
    *pts= global_video_pkt_pts;
1337
    pic->opaque= pts;
1338
    return ret;
1339
}
1340

    
1341
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1342
    if(pic) av_freep(&pic->opaque);
1343
    avcodec_default_release_buffer(c, pic);
1344
}
1345

    
1346
static int video_thread(void *arg)
1347
{
1348
    VideoState *is = arg;
1349
    AVPacket pkt1, *pkt = &pkt1;
1350
    int len1, got_picture;
1351
    AVFrame *frame= avcodec_alloc_frame();
1352
    double pts;
1353

    
1354
    for(;;) {
1355
        while (is->paused && !is->videoq.abort_request) {
1356
            SDL_Delay(10);
1357
        }
1358
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1359
            break;
1360

    
1361
        if(pkt->data == flush_pkt.data){
1362
            avcodec_flush_buffers(is->video_st->codec);
1363
            continue;
1364
        }
1365

    
1366
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1367
           this packet, if any */
1368
        global_video_pkt_pts= pkt->pts;
1369
        len1 = avcodec_decode_video(is->video_st->codec,
1370
                                    frame, &got_picture,
1371
                                    pkt->data, pkt->size);
1372

    
1373
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1374
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1375
            pts= *(uint64_t*)frame->opaque;
1376
        else if(pkt->dts != AV_NOPTS_VALUE)
1377
            pts= pkt->dts;
1378
        else
1379
            pts= 0;
1380
        pts *= av_q2d(is->video_st->time_base);
1381

    
1382
//            if (len1 < 0)
1383
//                break;
1384
        if (got_picture) {
1385
            if (output_picture2(is, frame, pts) < 0)
1386
                goto the_end;
1387
        }
1388
        av_free_packet(pkt);
1389
        if (step)
1390
            if (cur_stream)
1391
                stream_pause(cur_stream);
1392
    }
1393
 the_end:
1394
    av_free(frame);
1395
    return 0;
1396
}
1397

    
1398
static int subtitle_thread(void *arg)
1399
{
1400
    VideoState *is = arg;
1401
    SubPicture *sp;
1402
    AVPacket pkt1, *pkt = &pkt1;
1403
    int len1, got_subtitle;
1404
    double pts;
1405
    int i, j;
1406
    int r, g, b, y, u, v, a;
1407

    
1408
    for(;;) {
1409
        while (is->paused && !is->subtitleq.abort_request) {
1410
            SDL_Delay(10);
1411
        }
1412
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1413
            break;
1414

    
1415
        if(pkt->data == flush_pkt.data){
1416
            avcodec_flush_buffers(is->subtitle_st->codec);
1417
            continue;
1418
        }
1419
        SDL_LockMutex(is->subpq_mutex);
1420
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1421
               !is->subtitleq.abort_request) {
1422
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1423
        }
1424
        SDL_UnlockMutex(is->subpq_mutex);
1425

    
1426
        if (is->subtitleq.abort_request)
1427
            goto the_end;
1428

    
1429
        sp = &is->subpq[is->subpq_windex];
1430

    
1431
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1432
           this packet, if any */
1433
        pts = 0;
1434
        if (pkt->pts != AV_NOPTS_VALUE)
1435
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1436

    
1437
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1438
                                    &sp->sub, &got_subtitle,
1439
                                    pkt->data, pkt->size);
1440
//            if (len1 < 0)
1441
//                break;
1442
        if (got_subtitle && sp->sub.format == 0) {
1443
            sp->pts = pts;
1444

    
1445
            for (i = 0; i < sp->sub.num_rects; i++)
1446
            {
1447
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1448
                {
1449
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1450
                    y = RGB_TO_Y_CCIR(r, g, b);
1451
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1452
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1453
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1454
                }
1455
            }
1456

    
1457
            /* now we can update the picture count */
1458
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1459
                is->subpq_windex = 0;
1460
            SDL_LockMutex(is->subpq_mutex);
1461
            is->subpq_size++;
1462
            SDL_UnlockMutex(is->subpq_mutex);
1463
        }
1464
        av_free_packet(pkt);
1465
//        if (step)
1466
//            if (cur_stream)
1467
//                stream_pause(cur_stream);
1468
    }
1469
 the_end:
1470
    return 0;
1471
}
1472

    
1473
/* copy samples for viewing in editor window */
1474
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1475
{
1476
    int size, len, channels;
1477

    
1478
    channels = is->audio_st->codec->channels;
1479

    
1480
    size = samples_size / sizeof(short);
1481
    while (size > 0) {
1482
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1483
        if (len > size)
1484
            len = size;
1485
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1486
        samples += len;
1487
        is->sample_array_index += len;
1488
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1489
            is->sample_array_index = 0;
1490
        size -= len;
1491
    }
1492
}
1493

    
1494
/* return the new audio buffer size (samples can be added or deleted
1495
   to get better sync if video or external master clock) */
1496
static int synchronize_audio(VideoState *is, short *samples,
1497
                             int samples_size1, double pts)
1498
{
1499
    int n, samples_size;
1500
    double ref_clock;
1501

    
1502
    n = 2 * is->audio_st->codec->channels;
1503
    samples_size = samples_size1;
1504

    
1505
    /* if not master, then we try to remove or add samples to correct the clock */
1506
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1507
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1508
        double diff, avg_diff;
1509
        int wanted_size, min_size, max_size, nb_samples;
1510

    
1511
        ref_clock = get_master_clock(is);
1512
        diff = get_audio_clock(is) - ref_clock;
1513

    
1514
        if (diff < AV_NOSYNC_THRESHOLD) {
1515
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1516
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1517
                /* not enough measures to have a correct estimate */
1518
                is->audio_diff_avg_count++;
1519
            } else {
1520
                /* estimate the A-V difference */
1521
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1522

    
1523
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1524
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1525
                    nb_samples = samples_size / n;
1526

    
1527
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1528
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1529
                    if (wanted_size < min_size)
1530
                        wanted_size = min_size;
1531
                    else if (wanted_size > max_size)
1532
                        wanted_size = max_size;
1533

    
1534
                    /* add or remove samples to correction the synchro */
1535
                    if (wanted_size < samples_size) {
1536
                        /* remove samples */
1537
                        samples_size = wanted_size;
1538
                    } else if (wanted_size > samples_size) {
1539
                        uint8_t *samples_end, *q;
1540
                        int nb;
1541

    
1542
                        /* add samples */
1543
                        nb = (samples_size - wanted_size);
1544
                        samples_end = (uint8_t *)samples + samples_size - n;
1545
                        q = samples_end + n;
1546
                        while (nb > 0) {
1547
                            memcpy(q, samples_end, n);
1548
                            q += n;
1549
                            nb -= n;
1550
                        }
1551
                        samples_size = wanted_size;
1552
                    }
1553
                }
1554
#if 0
1555
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1556
                       diff, avg_diff, samples_size - samples_size1,
1557
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1558
#endif
1559
            }
1560
        } else {
1561
            /* too big difference : may be initial PTS errors, so
1562
               reset A-V filter */
1563
            is->audio_diff_avg_count = 0;
1564
            is->audio_diff_cum = 0;
1565
        }
1566
    }
1567

    
1568
    return samples_size;
1569
}
1570

    
1571
/* decode one audio frame and returns its uncompressed size */
1572
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1573
{
1574
    AVPacket *pkt = &is->audio_pkt;
1575
    int n, len1, data_size;
1576
    double pts;
1577

    
1578
    for(;;) {
1579
        /* NOTE: the audio packet can contain several frames */
1580
        while (is->audio_pkt_size > 0) {
1581
            data_size = buf_size;
1582
            len1 = avcodec_decode_audio2(is->audio_st->codec,
1583
                                        (int16_t *)audio_buf, &data_size,
1584
                                        is->audio_pkt_data, is->audio_pkt_size);
1585
            if (len1 < 0) {
1586
                /* if error, we skip the frame */
1587
                is->audio_pkt_size = 0;
1588
                break;
1589
            }
1590

    
1591
            is->audio_pkt_data += len1;
1592
            is->audio_pkt_size -= len1;
1593
            if (data_size <= 0)
1594
                continue;
1595
            /* if no pts, then compute it */
1596
            pts = is->audio_clock;
1597
            *pts_ptr = pts;
1598
            n = 2 * is->audio_st->codec->channels;
1599
            is->audio_clock += (double)data_size /
1600
                (double)(n * is->audio_st->codec->sample_rate);
1601
#if defined(DEBUG_SYNC)
1602
            {
1603
                static double last_clock;
1604
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1605
                       is->audio_clock - last_clock,
1606
                       is->audio_clock, pts);
1607
                last_clock = is->audio_clock;
1608
            }
1609
#endif
1610
            return data_size;
1611
        }
1612

    
1613
        /* free the current packet */
1614
        if (pkt->data)
1615
            av_free_packet(pkt);
1616

    
1617
        if (is->paused || is->audioq.abort_request) {
1618
            return -1;
1619
        }
1620

    
1621
        /* read next packet */
1622
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1623
            return -1;
1624
        if(pkt->data == flush_pkt.data){
1625
            avcodec_flush_buffers(is->audio_st->codec);
1626
            continue;
1627
        }
1628

    
1629
        is->audio_pkt_data = pkt->data;
1630
        is->audio_pkt_size = pkt->size;
1631

    
1632
        /* if update the audio clock with the pts */
1633
        if (pkt->pts != AV_NOPTS_VALUE) {
1634
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1635
        }
1636
    }
1637
}
1638

    
1639
/* get the current audio output buffer size, in samples. With SDL, we
1640
   cannot have a precise information */
1641
static int audio_write_get_buf_size(VideoState *is)
1642
{
1643
    return is->audio_buf_size - is->audio_buf_index;
1644
}
1645

    
1646

    
1647
/* prepare a new audio buffer */
1648
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1649
{
1650
    VideoState *is = opaque;
1651
    int audio_size, len1;
1652
    double pts;
1653

    
1654
    audio_callback_time = av_gettime();
1655

    
1656
    while (len > 0) {
1657
        if (is->audio_buf_index >= is->audio_buf_size) {
1658
           audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1659
           if (audio_size < 0) {
1660
                /* if error, just output silence */
1661
               is->audio_buf_size = 1024;
1662
               memset(is->audio_buf, 0, is->audio_buf_size);
1663
           } else {
1664
               if (is->show_audio)
1665
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1666
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1667
                                              pts);
1668
               is->audio_buf_size = audio_size;
1669
           }
1670
           is->audio_buf_index = 0;
1671
        }
1672
        len1 = is->audio_buf_size - is->audio_buf_index;
1673
        if (len1 > len)
1674
            len1 = len;
1675
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1676
        len -= len1;
1677
        stream += len1;
1678
        is->audio_buf_index += len1;
1679
    }
1680
}
1681

    
1682
/* open a given stream. Return 0 if OK */
1683
static int stream_component_open(VideoState *is, int stream_index)
1684
{
1685
    AVFormatContext *ic = is->ic;
1686
    AVCodecContext *enc;
1687
    AVCodec *codec;
1688
    SDL_AudioSpec wanted_spec, spec;
1689

    
1690
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1691
        return -1;
1692
    enc = ic->streams[stream_index]->codec;
1693

    
1694
    /* prepare audio output */
1695
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1696
        wanted_spec.freq = enc->sample_rate;
1697
        wanted_spec.format = AUDIO_S16SYS;
1698
        if(enc->channels > 2) {
1699
            wanted_spec.channels = 2;
1700
            enc->request_channels = 2;
1701
        } else {
1702
            wanted_spec.channels = enc->channels;
1703
        }
1704
        wanted_spec.silence = 0;
1705
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1706
        wanted_spec.callback = sdl_audio_callback;
1707
        wanted_spec.userdata = is;
1708
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1709
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1710
            return -1;
1711
        }
1712
        is->audio_hw_buf_size = spec.size;
1713
    }
1714

    
1715
    codec = avcodec_find_decoder(enc->codec_id);
1716
    enc->debug_mv = debug_mv;
1717
    enc->debug = debug;
1718
    enc->workaround_bugs = workaround_bugs;
1719
    enc->lowres = lowres;
1720
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1721
    enc->idct_algo= idct;
1722
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1723
    enc->skip_frame= skip_frame;
1724
    enc->skip_idct= skip_idct;
1725
    enc->skip_loop_filter= skip_loop_filter;
1726
    enc->error_resilience= error_resilience;
1727
    enc->error_concealment= error_concealment;
1728
    if (!codec ||
1729
        avcodec_open(enc, codec) < 0)
1730
        return -1;
1731
    if(thread_count>1)
1732
        avcodec_thread_init(enc, thread_count);
1733
    enc->thread_count= thread_count;
1734
    switch(enc->codec_type) {
1735
    case CODEC_TYPE_AUDIO:
1736
        is->audio_stream = stream_index;
1737
        is->audio_st = ic->streams[stream_index];
1738
        is->audio_buf_size = 0;
1739
        is->audio_buf_index = 0;
1740

    
1741
        /* init averaging filter */
1742
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1743
        is->audio_diff_avg_count = 0;
1744
        /* since we do not have a precise anough audio fifo fullness,
1745
           we correct audio sync only if larger than this threshold */
1746
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1747

    
1748
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1749
        packet_queue_init(&is->audioq);
1750
        SDL_PauseAudio(0);
1751
        break;
1752
    case CODEC_TYPE_VIDEO:
1753
        is->video_stream = stream_index;
1754
        is->video_st = ic->streams[stream_index];
1755

    
1756
        is->frame_last_delay = 40e-3;
1757
        is->frame_timer = (double)av_gettime() / 1000000.0;
1758
        is->video_current_pts_time = av_gettime();
1759

    
1760
        packet_queue_init(&is->videoq);
1761
        is->video_tid = SDL_CreateThread(video_thread, is);
1762

    
1763
        enc->    get_buffer=     my_get_buffer;
1764
        enc->release_buffer= my_release_buffer;
1765
        break;
1766
    case CODEC_TYPE_SUBTITLE:
1767
        is->subtitle_stream = stream_index;
1768
        is->subtitle_st = ic->streams[stream_index];
1769
        packet_queue_init(&is->subtitleq);
1770

    
1771
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1772
        break;
1773
    default:
1774
        break;
1775
    }
1776
    return 0;
1777
}
1778

    
1779
static void stream_component_close(VideoState *is, int stream_index)
1780
{
1781
    AVFormatContext *ic = is->ic;
1782
    AVCodecContext *enc;
1783

    
1784
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1785
        return;
1786
    enc = ic->streams[stream_index]->codec;
1787

    
1788
    switch(enc->codec_type) {
1789
    case CODEC_TYPE_AUDIO:
1790
        packet_queue_abort(&is->audioq);
1791

    
1792
        SDL_CloseAudio();
1793

    
1794
        packet_queue_end(&is->audioq);
1795
        break;
1796
    case CODEC_TYPE_VIDEO:
1797
        packet_queue_abort(&is->videoq);
1798

    
1799
        /* note: we also signal this mutex to make sure we deblock the
1800
           video thread in all cases */
1801
        SDL_LockMutex(is->pictq_mutex);
1802
        SDL_CondSignal(is->pictq_cond);
1803
        SDL_UnlockMutex(is->pictq_mutex);
1804

    
1805
        SDL_WaitThread(is->video_tid, NULL);
1806

    
1807
        packet_queue_end(&is->videoq);
1808
        break;
1809
    case CODEC_TYPE_SUBTITLE:
1810
        packet_queue_abort(&is->subtitleq);
1811

    
1812
        /* note: we also signal this mutex to make sure we deblock the
1813
           video thread in all cases */
1814
        SDL_LockMutex(is->subpq_mutex);
1815
        is->subtitle_stream_changed = 1;
1816

    
1817
        SDL_CondSignal(is->subpq_cond);
1818
        SDL_UnlockMutex(is->subpq_mutex);
1819

    
1820
        SDL_WaitThread(is->subtitle_tid, NULL);
1821

    
1822
        packet_queue_end(&is->subtitleq);
1823
        break;
1824
    default:
1825
        break;
1826
    }
1827

    
1828
    avcodec_close(enc);
1829
    switch(enc->codec_type) {
1830
    case CODEC_TYPE_AUDIO:
1831
        is->audio_st = NULL;
1832
        is->audio_stream = -1;
1833
        break;
1834
    case CODEC_TYPE_VIDEO:
1835
        is->video_st = NULL;
1836
        is->video_stream = -1;
1837
        break;
1838
    case CODEC_TYPE_SUBTITLE:
1839
        is->subtitle_st = NULL;
1840
        is->subtitle_stream = -1;
1841
        break;
1842
    default:
1843
        break;
1844
    }
1845
}
1846

    
1847
static void dump_stream_info(const AVFormatContext *s)
1848
{
1849
    if (s->track != 0)
1850
        fprintf(stderr, "Track: %d\n", s->track);
1851
    if (s->title[0] != '\0')
1852
        fprintf(stderr, "Title: %s\n", s->title);
1853
    if (s->author[0] != '\0')
1854
        fprintf(stderr, "Author: %s\n", s->author);
1855
    if (s->copyright[0] != '\0')
1856
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1857
    if (s->comment[0] != '\0')
1858
        fprintf(stderr, "Comment: %s\n", s->comment);
1859
    if (s->album[0] != '\0')
1860
        fprintf(stderr, "Album: %s\n", s->album);
1861
    if (s->year != 0)
1862
        fprintf(stderr, "Year: %d\n", s->year);
1863
    if (s->genre[0] != '\0')
1864
        fprintf(stderr, "Genre: %s\n", s->genre);
1865
}
1866

    
1867
/* since we have only one decoding thread, we can use a global
1868
   variable instead of a thread local variable */
1869
static VideoState *global_video_state;
1870

    
1871
static int decode_interrupt_cb(void)
1872
{
1873
    return (global_video_state && global_video_state->abort_request);
1874
}
1875

    
1876
/* this thread gets the stream from the disk or the network */
1877
static int decode_thread(void *arg)
1878
{
1879
    VideoState *is = arg;
1880
    AVFormatContext *ic;
1881
    int err, i, ret, video_index, audio_index;
1882
    AVPacket pkt1, *pkt = &pkt1;
1883
    AVFormatParameters params, *ap = &params;
1884

    
1885
    video_index = -1;
1886
    audio_index = -1;
1887
    is->video_stream = -1;
1888
    is->audio_stream = -1;
1889
    is->subtitle_stream = -1;
1890

    
1891
    global_video_state = is;
1892
    url_set_interrupt_cb(decode_interrupt_cb);
1893

    
1894
    memset(ap, 0, sizeof(*ap));
1895

    
1896
    ap->width = frame_width;
1897
    ap->height= frame_height;
1898
    ap->time_base= (AVRational){1, 25};
1899
    ap->pix_fmt = frame_pix_fmt;
1900

    
1901
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1902
    if (err < 0) {
1903
        print_error(is->filename, err);
1904
        ret = -1;
1905
        goto fail;
1906
    }
1907
    is->ic = ic;
1908

    
1909
    if(genpts)
1910
        ic->flags |= AVFMT_FLAG_GENPTS;
1911

    
1912
    err = av_find_stream_info(ic);
1913
    if (err < 0) {
1914
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1915
        ret = -1;
1916
        goto fail;
1917
    }
1918
    if(ic->pb)
1919
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1920

    
1921
    /* if seeking requested, we execute it */
1922
    if (start_time != AV_NOPTS_VALUE) {
1923
        int64_t timestamp;
1924

    
1925
        timestamp = start_time;
1926
        /* add the stream start time */
1927
        if (ic->start_time != AV_NOPTS_VALUE)
1928
            timestamp += ic->start_time;
1929
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1930
        if (ret < 0) {
1931
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1932
                    is->filename, (double)timestamp / AV_TIME_BASE);
1933
        }
1934
    }
1935

    
1936
    for(i = 0; i < ic->nb_streams; i++) {
1937
        AVCodecContext *enc = ic->streams[i]->codec;
1938
        switch(enc->codec_type) {
1939
        case CODEC_TYPE_AUDIO:
1940
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1941
                audio_index = i;
1942
            break;
1943
        case CODEC_TYPE_VIDEO:
1944
            if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1945
                video_index = i;
1946
            break;
1947
        default:
1948
            break;
1949
        }
1950
    }
1951
    if (show_status) {
1952
        dump_format(ic, 0, is->filename, 0);
1953
        dump_stream_info(ic);
1954
    }
1955

    
1956
    /* open the streams */
1957
    if (audio_index >= 0) {
1958
        stream_component_open(is, audio_index);
1959
    }
1960

    
1961
    if (video_index >= 0) {
1962
        stream_component_open(is, video_index);
1963
    } else {
1964
        if (!display_disable)
1965
            is->show_audio = 1;
1966
    }
1967

    
1968
    if (is->video_stream < 0 && is->audio_stream < 0) {
1969
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1970
        ret = -1;
1971
        goto fail;
1972
    }
1973

    
1974
    for(;;) {
1975
        if (is->abort_request)
1976
            break;
1977
        if (is->paused != is->last_paused) {
1978
            is->last_paused = is->paused;
1979
            if (is->paused)
1980
                av_read_pause(ic);
1981
            else
1982
                av_read_play(ic);
1983
        }
1984
#if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
1985
        if (is->paused &&
1986
                (!strcmp(ic->iformat->name, "rtsp") ||
1987
                 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
1988
            /* wait 10 ms to avoid trying to get another packet */
1989
            /* XXX: horrible */
1990
            SDL_Delay(10);
1991
            continue;
1992
        }
1993
#endif
1994
        if (is->seek_req) {
1995
            int stream_index= -1;
1996
            int64_t seek_target= is->seek_pos;
1997

    
1998
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
1999
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
2000
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2001

    
2002
            if(stream_index>=0){
2003
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2004
            }
2005

    
2006
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2007
            if (ret < 0) {
2008
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2009
            }else{
2010
                if (is->audio_stream >= 0) {
2011
                    packet_queue_flush(&is->audioq);
2012
                    packet_queue_put(&is->audioq, &flush_pkt);
2013
                }
2014
                if (is->subtitle_stream >= 0) {
2015
                    packet_queue_flush(&is->subtitleq);
2016
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2017
                }
2018
                if (is->video_stream >= 0) {
2019
                    packet_queue_flush(&is->videoq);
2020
                    packet_queue_put(&is->videoq, &flush_pkt);
2021
                }
2022
            }
2023
            is->seek_req = 0;
2024
        }
2025

    
2026
        /* if the queue are full, no need to read more */
2027
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2028
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2029
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2030
            url_feof(ic->pb)) {
2031
            /* wait 10 ms */
2032
            SDL_Delay(10);
2033
            continue;
2034
        }
2035
        ret = av_read_frame(ic, pkt);
2036
        if (ret < 0) {
2037
            if (url_ferror(ic->pb) == 0) {
2038
                SDL_Delay(100); /* wait for user event */
2039
                continue;
2040
            } else
2041
                break;
2042
        }
2043
        if (pkt->stream_index == is->audio_stream) {
2044
            packet_queue_put(&is->audioq, pkt);
2045
        } else if (pkt->stream_index == is->video_stream) {
2046
            packet_queue_put(&is->videoq, pkt);
2047
        } else if (pkt->stream_index == is->subtitle_stream) {
2048
            packet_queue_put(&is->subtitleq, pkt);
2049
        } else {
2050
            av_free_packet(pkt);
2051
        }
2052
    }
2053
    /* wait until the end */
2054
    while (!is->abort_request) {
2055
        SDL_Delay(100);
2056
    }
2057

    
2058
    ret = 0;
2059
 fail:
2060
    /* disable interrupting */
2061
    global_video_state = NULL;
2062

    
2063
    /* close each stream */
2064
    if (is->audio_stream >= 0)
2065
        stream_component_close(is, is->audio_stream);
2066
    if (is->video_stream >= 0)
2067
        stream_component_close(is, is->video_stream);
2068
    if (is->subtitle_stream >= 0)
2069
        stream_component_close(is, is->subtitle_stream);
2070
    if (is->ic) {
2071
        av_close_input_file(is->ic);
2072
        is->ic = NULL; /* safety */
2073
    }
2074
    url_set_interrupt_cb(NULL);
2075

    
2076
    if (ret != 0) {
2077
        SDL_Event event;
2078

    
2079
        event.type = FF_QUIT_EVENT;
2080
        event.user.data1 = is;
2081
        SDL_PushEvent(&event);
2082
    }
2083
    return 0;
2084
}
2085

    
2086
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2087
{
2088
    VideoState *is;
2089

    
2090
    is = av_mallocz(sizeof(VideoState));
2091
    if (!is)
2092
        return NULL;
2093
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2094
    is->iformat = iformat;
2095
    is->ytop = 0;
2096
    is->xleft = 0;
2097

    
2098
    /* start video display */
2099
    is->pictq_mutex = SDL_CreateMutex();
2100
    is->pictq_cond = SDL_CreateCond();
2101

    
2102
    is->subpq_mutex = SDL_CreateMutex();
2103
    is->subpq_cond = SDL_CreateCond();
2104

    
2105
    /* add the refresh timer to draw the picture */
2106
    schedule_refresh(is, 40);
2107

    
2108
    is->av_sync_type = av_sync_type;
2109
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2110
    if (!is->parse_tid) {
2111
        av_free(is);
2112
        return NULL;
2113
    }
2114
    return is;
2115
}
2116

    
2117
static void stream_close(VideoState *is)
2118
{
2119
    VideoPicture *vp;
2120
    int i;
2121
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2122
    is->abort_request = 1;
2123
    SDL_WaitThread(is->parse_tid, NULL);
2124

    
2125
    /* free all pictures */
2126
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2127
        vp = &is->pictq[i];
2128
        if (vp->bmp) {
2129
            SDL_FreeYUVOverlay(vp->bmp);
2130
            vp->bmp = NULL;
2131
        }
2132
    }
2133
    SDL_DestroyMutex(is->pictq_mutex);
2134
    SDL_DestroyCond(is->pictq_cond);
2135
    SDL_DestroyMutex(is->subpq_mutex);
2136
    SDL_DestroyCond(is->subpq_cond);
2137
}
2138

    
2139
static void stream_cycle_channel(VideoState *is, int codec_type)
2140
{
2141
    AVFormatContext *ic = is->ic;
2142
    int start_index, stream_index;
2143
    AVStream *st;
2144

    
2145
    if (codec_type == CODEC_TYPE_VIDEO)
2146
        start_index = is->video_stream;
2147
    else if (codec_type == CODEC_TYPE_AUDIO)
2148
        start_index = is->audio_stream;
2149
    else
2150
        start_index = is->subtitle_stream;
2151
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2152
        return;
2153
    stream_index = start_index;
2154
    for(;;) {
2155
        if (++stream_index >= is->ic->nb_streams)
2156
        {
2157
            if (codec_type == CODEC_TYPE_SUBTITLE)
2158
            {
2159
                stream_index = -1;
2160
                goto the_end;
2161
            } else
2162
                stream_index = 0;
2163
        }
2164
        if (stream_index == start_index)
2165
            return;
2166
        st = ic->streams[stream_index];
2167
        if (st->codec->codec_type == codec_type) {
2168
            /* check that parameters are OK */
2169
            switch(codec_type) {
2170
            case CODEC_TYPE_AUDIO:
2171
                if (st->codec->sample_rate != 0 &&
2172
                    st->codec->channels != 0)
2173
                    goto the_end;
2174
                break;
2175
            case CODEC_TYPE_VIDEO:
2176
            case CODEC_TYPE_SUBTITLE:
2177
                goto the_end;
2178
            default:
2179
                break;
2180
            }
2181
        }
2182
    }
2183
 the_end:
2184
    stream_component_close(is, start_index);
2185
    stream_component_open(is, stream_index);
2186
}
2187

    
2188

    
2189
static void toggle_full_screen(void)
2190
{
2191
    is_full_screen = !is_full_screen;
2192
    if (!fs_screen_width) {
2193
        /* use default SDL method */
2194
//        SDL_WM_ToggleFullScreen(screen);
2195
    }
2196
    video_open(cur_stream);
2197
}
2198

    
2199
static void toggle_pause(void)
2200
{
2201
    if (cur_stream)
2202
        stream_pause(cur_stream);
2203
    step = 0;
2204
}
2205

    
2206
static void step_to_next_frame(void)
2207
{
2208
    if (cur_stream) {
2209
        /* if the stream is paused unpause it, then step */
2210
        if (cur_stream->paused)
2211
            stream_pause(cur_stream);
2212
    }
2213
    step = 1;
2214
}
2215

    
2216
static void do_exit(void)
2217
{
2218
    if (cur_stream) {
2219
        stream_close(cur_stream);
2220
        cur_stream = NULL;
2221
    }
2222
    if (show_status)
2223
        printf("\n");
2224
    SDL_Quit();
2225
    exit(0);
2226
}
2227

    
2228
static void toggle_audio_display(void)
2229
{
2230
    if (cur_stream) {
2231
        cur_stream->show_audio = !cur_stream->show_audio;
2232
    }
2233
}
2234

    
2235
/* handle an event sent by the GUI */
2236
static void event_loop(void)
2237
{
2238
    SDL_Event event;
2239
    double incr, pos, frac;
2240

    
2241
    for(;;) {
2242
        SDL_WaitEvent(&event);
2243
        switch(event.type) {
2244
        case SDL_KEYDOWN:
2245
            switch(event.key.keysym.sym) {
2246
            case SDLK_ESCAPE:
2247
            case SDLK_q:
2248
                do_exit();
2249
                break;
2250
            case SDLK_f:
2251
                toggle_full_screen();
2252
                break;
2253
            case SDLK_p:
2254
            case SDLK_SPACE:
2255
                toggle_pause();
2256
                break;
2257
            case SDLK_s: //S: Step to next frame
2258
                step_to_next_frame();
2259
                break;
2260
            case SDLK_a:
2261
                if (cur_stream)
2262
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2263
                break;
2264
            case SDLK_v:
2265
                if (cur_stream)
2266
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2267
                break;
2268
            case SDLK_t:
2269
                if (cur_stream)
2270
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2271
                break;
2272
            case SDLK_w:
2273
                toggle_audio_display();
2274
                break;
2275
            case SDLK_LEFT:
2276
                incr = -10.0;
2277
                goto do_seek;
2278
            case SDLK_RIGHT:
2279
                incr = 10.0;
2280
                goto do_seek;
2281
            case SDLK_UP:
2282
                incr = 60.0;
2283
                goto do_seek;
2284
            case SDLK_DOWN:
2285
                incr = -60.0;
2286
            do_seek:
2287
                if (cur_stream) {
2288
                    if (seek_by_bytes) {
2289
                        pos = url_ftell(cur_stream->ic->pb);
2290
                        if (cur_stream->ic->bit_rate)
2291
                            incr *= cur_stream->ic->bit_rate / 60.0;
2292
                        else
2293
                            incr *= 180000.0;
2294
                        pos += incr;
2295
                        stream_seek(cur_stream, pos, incr);
2296
                    } else {
2297
                        pos = get_master_clock(cur_stream);
2298
                        pos += incr;
2299
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2300
                    }
2301
                }
2302
                break;
2303
            default:
2304
                break;
2305
            }
2306
            break;
2307
        case SDL_MOUSEBUTTONDOWN:
2308
            if (cur_stream) {
2309
                int ns, hh, mm, ss;
2310
                int tns, thh, tmm, tss;
2311
                tns = cur_stream->ic->duration/1000000LL;
2312
                thh = tns/3600;
2313
                tmm = (tns%3600)/60;
2314
                tss = (tns%60);
2315
                frac = (double)event.button.x/(double)cur_stream->width;
2316
                ns = frac*tns;
2317
                hh = ns/3600;
2318
                mm = (ns%3600)/60;
2319
                ss = (ns%60);
2320
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2321
                        hh, mm, ss, thh, tmm, tss);
2322
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2323
            }
2324
            break;
2325
        case SDL_VIDEORESIZE:
2326
            if (cur_stream) {
2327
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2328
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2329
                screen_width = cur_stream->width = event.resize.w;
2330
                screen_height= cur_stream->height= event.resize.h;
2331
            }
2332
            break;
2333
        case SDL_QUIT:
2334
        case FF_QUIT_EVENT:
2335
            do_exit();
2336
            break;
2337
        case FF_ALLOC_EVENT:
2338
            video_open(event.user.data1);
2339
            alloc_picture(event.user.data1);
2340
            break;
2341
        case FF_REFRESH_EVENT:
2342
            video_refresh_timer(event.user.data1);
2343
            break;
2344
        default:
2345
            break;
2346
        }
2347
    }
2348
}
2349

    
2350
static void opt_frame_size(const char *arg)
2351
{
2352
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2353
        fprintf(stderr, "Incorrect frame size\n");
2354
        exit(1);
2355
    }
2356
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2357
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2358
        exit(1);
2359
    }
2360
}
2361

    
2362
static void opt_width(const char *arg)
2363
{
2364
    screen_width = atoi(arg);
2365
    if(screen_width<=0){
2366
        fprintf(stderr, "invalid width\n");
2367
        exit(1);
2368
    }
2369
}
2370

    
2371
static void opt_height(const char *arg)
2372
{
2373
    screen_height = atoi(arg);
2374
    if(screen_height<=0){
2375
        fprintf(stderr, "invalid height\n");
2376
        exit(1);
2377
    }
2378
}
2379

    
2380
static void opt_format(const char *arg)
2381
{
2382
    file_iformat = av_find_input_format(arg);
2383
    if (!file_iformat) {
2384
        fprintf(stderr, "Unknown input format: %s\n", arg);
2385
        exit(1);
2386
    }
2387
}
2388

    
2389
static void opt_frame_pix_fmt(const char *arg)
2390
{
2391
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2392
}
2393

    
2394
static void opt_sync(const char *arg)
2395
{
2396
    if (!strcmp(arg, "audio"))
2397
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2398
    else if (!strcmp(arg, "video"))
2399
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2400
    else if (!strcmp(arg, "ext"))
2401
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2402
    else {
2403
        show_help();
2404
        exit(1);
2405
    }
2406
}
2407

    
2408
static void opt_seek(const char *arg)
2409
{
2410
    start_time = parse_date(arg, 1);
2411
    if (start_time == INT64_MIN) {
2412
        fprintf(stderr, "Invalid duration specification: %s\n", arg);
2413
        exit(1);
2414
    }
2415
}
2416

    
2417
static void opt_debug(const char *arg)
2418
{
2419
    av_log_set_level(99);
2420
    debug = atoi(arg);
2421
}
2422

    
2423
static void opt_vismv(const char *arg)
2424
{
2425
    debug_mv = atoi(arg);
2426
}
2427

    
2428
static void opt_thread_count(const char *arg)
2429
{
2430
    thread_count= atoi(arg);
2431
#if !defined(HAVE_THREADS)
2432
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2433
#endif
2434
}
2435

    
2436
static void opt_show_help(void)
2437
{
2438
    show_help();
2439
    exit(0);
2440
}
2441

    
2442
const OptionDef options[] = {
2443
    { "h", 0, {(void*)opt_show_help}, "show help" },
2444
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2445
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2446
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2447
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2448
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2449
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2450
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2451
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2452
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2453
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2454
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2455
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2456
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2457
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2458
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2459
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2460
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2461
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2462
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2463
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2464
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2465
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2466
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2467
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2468
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2469
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2470
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2471
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2472
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2473
    { NULL, },
2474
};
2475

    
2476
void show_help(void)
2477
{
2478
    printf("usage: ffplay [options] input_file\n"
2479
           "Simple media player\n");
2480
    printf("\n");
2481
    show_help_options(options, "Main options:\n",
2482
                      OPT_EXPERT, 0);
2483
    show_help_options(options, "\nAdvanced options:\n",
2484
                      OPT_EXPERT, OPT_EXPERT);
2485
    printf("\nWhile playing:\n"
2486
           "q, ESC              quit\n"
2487
           "f                   toggle full screen\n"
2488
           "p, SPC              pause\n"
2489
           "a                   cycle audio channel\n"
2490
           "v                   cycle video channel\n"
2491
           "t                   cycle subtitle channel\n"
2492
           "w                   show audio waves\n"
2493
           "left/right          seek backward/forward 10 seconds\n"
2494
           "down/up             seek backward/forward 1 minute\n"
2495
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2496
           );
2497
}
2498

    
2499
void opt_input_file(const char *filename)
2500
{
2501
    if (!strcmp(filename, "-"))
2502
        filename = "pipe:";
2503
    input_filename = filename;
2504
}
2505

    
2506
/* Called from the main */
2507
int main(int argc, char **argv)
2508
{
2509
    int flags;
2510

    
2511
    /* register all codecs, demux and protocols */
2512
    avcodec_register_all();
2513
    avdevice_register_all();
2514
    av_register_all();
2515

    
2516
    show_banner(program_name, program_birth_year);
2517

    
2518
    parse_options(argc, argv, options, opt_input_file);
2519

    
2520
    if (!input_filename) {
2521
        show_help();
2522
        exit(1);
2523
    }
2524

    
2525
    if (display_disable) {
2526
        video_disable = 1;
2527
    }
2528
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2529
#if !defined(__MINGW32__) && !defined(__APPLE__)
2530
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2531
#endif
2532
    if (SDL_Init (flags)) {
2533
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2534
        exit(1);
2535
    }
2536

    
2537
    if (!display_disable) {
2538
#ifdef HAVE_SDL_VIDEO_SIZE
2539
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2540
        fs_screen_width = vi->current_w;
2541
        fs_screen_height = vi->current_h;
2542
#endif
2543
    }
2544

    
2545
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2546
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2547
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2548
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2549

    
2550
    av_init_packet(&flush_pkt);
2551
    flush_pkt.data= "FLUSH";
2552

    
2553
    cur_stream = stream_open(input_filename, file_iformat);
2554

    
2555
    event_loop();
2556

    
2557
    /* never returns */
2558

    
2559
    return 0;
2560
}