Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 755bfeab

History | View | Annotate | Download (76.9 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "avformat.h"
23
#include "swscale.h"
24

    
25
#include "version.h"
26
#include "cmdutils.h"
27

    
28
#include <SDL.h>
29
#include <SDL_thread.h>
30

    
31
#ifdef __MINGW32__
32
#undef main /* We don't want SDL to override our main() */
33
#endif
34

    
35
#ifdef CONFIG_OS2
36
#define INCL_DOS
37
 #include <os2.h>
38
 #include <stdio.h>
39

    
40
 void MorphToPM()
41
 {
42
   PPIB pib;
43
   PTIB tib;
44

    
45
   DosGetInfoBlocks(&tib, &pib);
46

    
47
   // Change flag from VIO to PM:
48
   if (pib->pib_ultype==2) pib->pib_ultype = 3;
49
 }
50
#endif
51

    
52
#undef exit
53

    
54
//#define DEBUG_SYNC
55

    
56
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
57
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
58
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
59

    
60
/* SDL audio buffer size, in samples. Should be small to have precise
61
   A/V sync as SDL does not have hardware buffer fullness info. */
62
#define SDL_AUDIO_BUFFER_SIZE 1024
63

    
64
/* no AV sync correction is done if below the AV sync threshold */
65
#define AV_SYNC_THRESHOLD 0.01
66
/* no AV correction is done if too big error */
67
#define AV_NOSYNC_THRESHOLD 10.0
68

    
69
/* maximum audio speed change to get correct sync */
70
#define SAMPLE_CORRECTION_PERCENT_MAX 10
71

    
72
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
73
#define AUDIO_DIFF_AVG_NB   20
74

    
75
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
76
#define SAMPLE_ARRAY_SIZE (2*65536)
77

    
78
static int sws_flags = SWS_BICUBIC;
79

    
80
typedef struct PacketQueue {
81
    AVPacketList *first_pkt, *last_pkt;
82
    int nb_packets;
83
    int size;
84
    int abort_request;
85
    SDL_mutex *mutex;
86
    SDL_cond *cond;
87
} PacketQueue;
88

    
89
#define VIDEO_PICTURE_QUEUE_SIZE 1
90
#define SUBPICTURE_QUEUE_SIZE 4
91

    
92
typedef struct VideoPicture {
93
    double pts;                                  ///<presentation time stamp for this picture
94
    SDL_Overlay *bmp;
95
    int width, height; /* source height & width */
96
    int allocated;
97
} VideoPicture;
98

    
99
typedef struct SubPicture {
100
    double pts; /* presentation time stamp for this picture */
101
    AVSubtitle sub;
102
} SubPicture;
103

    
104
enum {
105
    AV_SYNC_AUDIO_MASTER, /* default choice */
106
    AV_SYNC_VIDEO_MASTER,
107
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
108
};
109

    
110
typedef struct VideoState {
111
    SDL_Thread *parse_tid;
112
    SDL_Thread *video_tid;
113
    AVInputFormat *iformat;
114
    int no_background;
115
    int abort_request;
116
    int paused;
117
    int last_paused;
118
    int seek_req;
119
    int seek_flags;
120
    int64_t seek_pos;
121
    AVFormatContext *ic;
122
    int dtg_active_format;
123

    
124
    int audio_stream;
125

    
126
    int av_sync_type;
127
    double external_clock; /* external clock base */
128
    int64_t external_clock_time;
129

    
130
    double audio_clock;
131
    double audio_diff_cum; /* used for AV difference average computation */
132
    double audio_diff_avg_coef;
133
    double audio_diff_threshold;
134
    int audio_diff_avg_count;
135
    AVStream *audio_st;
136
    PacketQueue audioq;
137
    int audio_hw_buf_size;
138
    /* samples output by the codec. we reserve more space for avsync
139
       compensation */
140
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
141
    unsigned int audio_buf_size; /* in bytes */
142
    int audio_buf_index; /* in bytes */
143
    AVPacket audio_pkt;
144
    uint8_t *audio_pkt_data;
145
    int audio_pkt_size;
146

    
147
    int show_audio; /* if true, display audio samples */
148
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
149
    int sample_array_index;
150
    int last_i_start;
151

    
152
    SDL_Thread *subtitle_tid;
153
    int subtitle_stream;
154
    int subtitle_stream_changed;
155
    AVStream *subtitle_st;
156
    PacketQueue subtitleq;
157
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
158
    int subpq_size, subpq_rindex, subpq_windex;
159
    SDL_mutex *subpq_mutex;
160
    SDL_cond *subpq_cond;
161

    
162
    double frame_timer;
163
    double frame_last_pts;
164
    double frame_last_delay;
165
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
166
    int video_stream;
167
    AVStream *video_st;
168
    PacketQueue videoq;
169
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
170
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
171
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
172
    int pictq_size, pictq_rindex, pictq_windex;
173
    SDL_mutex *pictq_mutex;
174
    SDL_cond *pictq_cond;
175

    
176
    //    QETimer *video_timer;
177
    char filename[1024];
178
    int width, height, xleft, ytop;
179
} VideoState;
180

    
181
void show_help(void);
182
static int audio_write_get_buf_size(VideoState *is);
183

    
184
/* options specified by the user */
185
static AVInputFormat *file_iformat;
186
static const char *input_filename;
187
static int fs_screen_width;
188
static int fs_screen_height;
189
static int screen_width = 0;
190
static int screen_height = 0;
191
static int frame_width = 0;
192
static int frame_height = 0;
193
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
194
static int audio_disable;
195
static int video_disable;
196
static int wanted_audio_stream= 0;
197
static int seek_by_bytes;
198
static int display_disable;
199
static int show_status;
200
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
201
static int64_t start_time = AV_NOPTS_VALUE;
202
static int debug = 0;
203
static int debug_mv = 0;
204
static int step = 0;
205
static int thread_count = 1;
206
static int workaround_bugs = 1;
207
static int fast = 0;
208
static int genpts = 0;
209
static int lowres = 0;
210
static int idct = FF_IDCT_AUTO;
211
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
212
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
213
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
214
static int error_resilience = FF_ER_CAREFUL;
215
static int error_concealment = 3;
216
static int decoder_reorder_pts= 0;
217

    
218
/* current context */
219
static int is_full_screen;
220
static VideoState *cur_stream;
221
static int64_t audio_callback_time;
222

    
223
AVPacket flush_pkt;
224

    
225
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
226
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
227
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
228

    
229
SDL_Surface *screen;
230

    
231
/* packet queue handling */
232
static void packet_queue_init(PacketQueue *q)
233
{
234
    memset(q, 0, sizeof(PacketQueue));
235
    q->mutex = SDL_CreateMutex();
236
    q->cond = SDL_CreateCond();
237
}
238

    
239
static void packet_queue_flush(PacketQueue *q)
240
{
241
    AVPacketList *pkt, *pkt1;
242

    
243
    SDL_LockMutex(q->mutex);
244
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
245
        pkt1 = pkt->next;
246
        av_free_packet(&pkt->pkt);
247
        av_freep(&pkt);
248
    }
249
    q->last_pkt = NULL;
250
    q->first_pkt = NULL;
251
    q->nb_packets = 0;
252
    q->size = 0;
253
    SDL_UnlockMutex(q->mutex);
254
}
255

    
256
static void packet_queue_end(PacketQueue *q)
257
{
258
    packet_queue_flush(q);
259
    SDL_DestroyMutex(q->mutex);
260
    SDL_DestroyCond(q->cond);
261
}
262

    
263
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
264
{
265
    AVPacketList *pkt1;
266

    
267
    /* duplicate the packet */
268
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
269
        return -1;
270

    
271
    pkt1 = av_malloc(sizeof(AVPacketList));
272
    if (!pkt1)
273
        return -1;
274
    pkt1->pkt = *pkt;
275
    pkt1->next = NULL;
276

    
277

    
278
    SDL_LockMutex(q->mutex);
279

    
280
    if (!q->last_pkt)
281

    
282
        q->first_pkt = pkt1;
283
    else
284
        q->last_pkt->next = pkt1;
285
    q->last_pkt = pkt1;
286
    q->nb_packets++;
287
    q->size += pkt1->pkt.size;
288
    /* XXX: should duplicate packet data in DV case */
289
    SDL_CondSignal(q->cond);
290

    
291
    SDL_UnlockMutex(q->mutex);
292
    return 0;
293
}
294

    
295
static void packet_queue_abort(PacketQueue *q)
296
{
297
    SDL_LockMutex(q->mutex);
298

    
299
    q->abort_request = 1;
300

    
301
    SDL_CondSignal(q->cond);
302

    
303
    SDL_UnlockMutex(q->mutex);
304
}
305

    
306
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
307
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
308
{
309
    AVPacketList *pkt1;
310
    int ret;
311

    
312
    SDL_LockMutex(q->mutex);
313

    
314
    for(;;) {
315
        if (q->abort_request) {
316
            ret = -1;
317
            break;
318
        }
319

    
320
        pkt1 = q->first_pkt;
321
        if (pkt1) {
322
            q->first_pkt = pkt1->next;
323
            if (!q->first_pkt)
324
                q->last_pkt = NULL;
325
            q->nb_packets--;
326
            q->size -= pkt1->pkt.size;
327
            *pkt = pkt1->pkt;
328
            av_free(pkt1);
329
            ret = 1;
330
            break;
331
        } else if (!block) {
332
            ret = 0;
333
            break;
334
        } else {
335
            SDL_CondWait(q->cond, q->mutex);
336
        }
337
    }
338
    SDL_UnlockMutex(q->mutex);
339
    return ret;
340
}
341

    
342
static inline void fill_rectangle(SDL_Surface *screen,
343
                                  int x, int y, int w, int h, int color)
344
{
345
    SDL_Rect rect;
346
    rect.x = x;
347
    rect.y = y;
348
    rect.w = w;
349
    rect.h = h;
350
    SDL_FillRect(screen, &rect, color);
351
}
352

    
353
#if 0
354
/* draw only the border of a rectangle */
355
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
356
{
357
    int w1, w2, h1, h2;
358

359
    /* fill the background */
360
    w1 = x;
361
    if (w1 < 0)
362
        w1 = 0;
363
    w2 = s->width - (x + w);
364
    if (w2 < 0)
365
        w2 = 0;
366
    h1 = y;
367
    if (h1 < 0)
368
        h1 = 0;
369
    h2 = s->height - (y + h);
370
    if (h2 < 0)
371
        h2 = 0;
372
    fill_rectangle(screen,
373
                   s->xleft, s->ytop,
374
                   w1, s->height,
375
                   color);
376
    fill_rectangle(screen,
377
                   s->xleft + s->width - w2, s->ytop,
378
                   w2, s->height,
379
                   color);
380
    fill_rectangle(screen,
381
                   s->xleft + w1, s->ytop,
382
                   s->width - w1 - w2, h1,
383
                   color);
384
    fill_rectangle(screen,
385
                   s->xleft + w1, s->ytop + s->height - h2,
386
                   s->width - w1 - w2, h2,
387
                   color);
388
}
389
#endif
390

    
391

    
392

    
393
#define SCALEBITS 10
394
#define ONE_HALF  (1 << (SCALEBITS - 1))
395
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
396

    
397
#define RGB_TO_Y_CCIR(r, g, b) \
398
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
399
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
400

    
401
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
402
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
403
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
404

    
405
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
406
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
407
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
408

    
409
#define ALPHA_BLEND(a, oldp, newp, s)\
410
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
411

    
412
#define RGBA_IN(r, g, b, a, s)\
413
{\
414
    unsigned int v = ((const uint32_t *)(s))[0];\
415
    a = (v >> 24) & 0xff;\
416
    r = (v >> 16) & 0xff;\
417
    g = (v >> 8) & 0xff;\
418
    b = v & 0xff;\
419
}
420

    
421
#define YUVA_IN(y, u, v, a, s, pal)\
422
{\
423
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
424
    a = (val >> 24) & 0xff;\
425
    y = (val >> 16) & 0xff;\
426
    u = (val >> 8) & 0xff;\
427
    v = val & 0xff;\
428
}
429

    
430
#define YUVA_OUT(d, y, u, v, a)\
431
{\
432
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
433
}
434

    
435

    
436
#define BPP 1
437

    
438
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
439
{
440
    int wrap, wrap3, width2, skip2;
441
    int y, u, v, a, u1, v1, a1, w, h;
442
    uint8_t *lum, *cb, *cr;
443
    const uint8_t *p;
444
    const uint32_t *pal;
445

    
446
    lum = dst->data[0] + rect->y * dst->linesize[0];
447
    cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
448
    cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
449

    
450
    width2 = (rect->w + 1) >> 1;
451
    skip2 = rect->x >> 1;
452
    wrap = dst->linesize[0];
453
    wrap3 = rect->linesize;
454
    p = rect->bitmap;
455
    pal = rect->rgba_palette;  /* Now in YCrCb! */
456

    
457
    if (rect->y & 1) {
458
        lum += rect->x;
459
        cb += skip2;
460
        cr += skip2;
461

    
462
        if (rect->x & 1) {
463
            YUVA_IN(y, u, v, a, p, pal);
464
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
466
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
467
            cb++;
468
            cr++;
469
            lum++;
470
            p += BPP;
471
        }
472
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
473
            YUVA_IN(y, u, v, a, p, pal);
474
            u1 = u;
475
            v1 = v;
476
            a1 = a;
477
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478

    
479
            YUVA_IN(y, u, v, a, p + BPP, pal);
480
            u1 += u;
481
            v1 += v;
482
            a1 += a;
483
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
484
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
485
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
486
            cb++;
487
            cr++;
488
            p += 2 * BPP;
489
            lum += 2;
490
        }
491
        if (w) {
492
            YUVA_IN(y, u, v, a, p, pal);
493
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
494
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
495
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
496
        }
497
        p += wrap3 + (wrap3 - rect->w * BPP);
498
        lum += wrap + (wrap - rect->w - rect->x);
499
        cb += dst->linesize[1] - width2 - skip2;
500
        cr += dst->linesize[2] - width2 - skip2;
501
    }
502
    for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
503
        lum += rect->x;
504
        cb += skip2;
505
        cr += skip2;
506

    
507
        if (rect->x & 1) {
508
            YUVA_IN(y, u, v, a, p, pal);
509
            u1 = u;
510
            v1 = v;
511
            a1 = a;
512
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513
            p += wrap3;
514
            lum += wrap;
515
            YUVA_IN(y, u, v, a, p, pal);
516
            u1 += u;
517
            v1 += v;
518
            a1 += a;
519
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
521
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
522
            cb++;
523
            cr++;
524
            p += -wrap3 + BPP;
525
            lum += -wrap + 1;
526
        }
527
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
528
            YUVA_IN(y, u, v, a, p, pal);
529
            u1 = u;
530
            v1 = v;
531
            a1 = a;
532
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533

    
534
            YUVA_IN(y, u, v, a, p, pal);
535
            u1 += u;
536
            v1 += v;
537
            a1 += a;
538
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
539
            p += wrap3;
540
            lum += wrap;
541

    
542
            YUVA_IN(y, u, v, a, p, pal);
543
            u1 += u;
544
            v1 += v;
545
            a1 += a;
546
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
547

    
548
            YUVA_IN(y, u, v, a, p, pal);
549
            u1 += u;
550
            v1 += v;
551
            a1 += a;
552
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
553

    
554
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
555
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
556

    
557
            cb++;
558
            cr++;
559
            p += -wrap3 + 2 * BPP;
560
            lum += -wrap + 2;
561
        }
562
        if (w) {
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 = u;
565
            v1 = v;
566
            a1 = a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            p += wrap3;
569
            lum += wrap;
570
            YUVA_IN(y, u, v, a, p, pal);
571
            u1 += u;
572
            v1 += v;
573
            a1 += a;
574
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
575
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
576
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
577
            cb++;
578
            cr++;
579
            p += -wrap3 + BPP;
580
            lum += -wrap + 1;
581
        }
582
        p += wrap3 + (wrap3 - rect->w * BPP);
583
        lum += wrap + (wrap - rect->w - rect->x);
584
        cb += dst->linesize[1] - width2 - skip2;
585
        cr += dst->linesize[2] - width2 - skip2;
586
    }
587
    /* handle odd height */
588
    if (h) {
589
        lum += rect->x;
590
        cb += skip2;
591
        cr += skip2;
592

    
593
        if (rect->x & 1) {
594
            YUVA_IN(y, u, v, a, p, pal);
595
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
596
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
597
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
598
            cb++;
599
            cr++;
600
            lum++;
601
            p += BPP;
602
        }
603
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
604
            YUVA_IN(y, u, v, a, p, pal);
605
            u1 = u;
606
            v1 = v;
607
            a1 = a;
608
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609

    
610
            YUVA_IN(y, u, v, a, p + BPP, pal);
611
            u1 += u;
612
            v1 += v;
613
            a1 += a;
614
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
615
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
616
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
617
            cb++;
618
            cr++;
619
            p += 2 * BPP;
620
            lum += 2;
621
        }
622
        if (w) {
623
            YUVA_IN(y, u, v, a, p, pal);
624
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
625
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
626
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627
        }
628
    }
629
}
630

    
631
static void free_subpicture(SubPicture *sp)
632
{
633
    int i;
634

    
635
    for (i = 0; i < sp->sub.num_rects; i++)
636
    {
637
        av_free(sp->sub.rects[i].bitmap);
638
        av_free(sp->sub.rects[i].rgba_palette);
639
    }
640

    
641
    av_free(sp->sub.rects);
642

    
643
    memset(&sp->sub, 0, sizeof(AVSubtitle));
644
}
645

    
646
static void video_image_display(VideoState *is)
647
{
648
    VideoPicture *vp;
649
    SubPicture *sp;
650
    AVPicture pict;
651
    float aspect_ratio;
652
    int width, height, x, y;
653
    SDL_Rect rect;
654
    int i;
655

    
656
    vp = &is->pictq[is->pictq_rindex];
657
    if (vp->bmp) {
658
        /* XXX: use variable in the frame */
659
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
660
            aspect_ratio = 0;
661
        else
662
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
663
                * is->video_st->codec->width / is->video_st->codec->height;;
664
        if (aspect_ratio <= 0.0)
665
            aspect_ratio = (float)is->video_st->codec->width /
666
                (float)is->video_st->codec->height;
667
        /* if an active format is indicated, then it overrides the
668
           mpeg format */
669
#if 0
670
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
671
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
672
            printf("dtg_active_format=%d\n", is->dtg_active_format);
673
        }
674
#endif
675
#if 0
676
        switch(is->video_st->codec->dtg_active_format) {
677
        case FF_DTG_AFD_SAME:
678
        default:
679
            /* nothing to do */
680
            break;
681
        case FF_DTG_AFD_4_3:
682
            aspect_ratio = 4.0 / 3.0;
683
            break;
684
        case FF_DTG_AFD_16_9:
685
            aspect_ratio = 16.0 / 9.0;
686
            break;
687
        case FF_DTG_AFD_14_9:
688
            aspect_ratio = 14.0 / 9.0;
689
            break;
690
        case FF_DTG_AFD_4_3_SP_14_9:
691
            aspect_ratio = 14.0 / 9.0;
692
            break;
693
        case FF_DTG_AFD_16_9_SP_14_9:
694
            aspect_ratio = 14.0 / 9.0;
695
            break;
696
        case FF_DTG_AFD_SP_4_3:
697
            aspect_ratio = 4.0 / 3.0;
698
            break;
699
        }
700
#endif
701

    
702
        if (is->subtitle_st)
703
        {
704
            if (is->subpq_size > 0)
705
            {
706
                sp = &is->subpq[is->subpq_rindex];
707

    
708
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
709
                {
710
                    SDL_LockYUVOverlay (vp->bmp);
711

    
712
                    pict.data[0] = vp->bmp->pixels[0];
713
                    pict.data[1] = vp->bmp->pixels[2];
714
                    pict.data[2] = vp->bmp->pixels[1];
715

    
716
                    pict.linesize[0] = vp->bmp->pitches[0];
717
                    pict.linesize[1] = vp->bmp->pitches[2];
718
                    pict.linesize[2] = vp->bmp->pitches[1];
719

    
720
                    for (i = 0; i < sp->sub.num_rects; i++)
721
                        blend_subrect(&pict, &sp->sub.rects[i]);
722

    
723
                    SDL_UnlockYUVOverlay (vp->bmp);
724
                }
725
            }
726
        }
727

    
728

    
729
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
730
        height = is->height;
731
        width = ((int)rint(height * aspect_ratio)) & -3;
732
        if (width > is->width) {
733
            width = is->width;
734
            height = ((int)rint(width / aspect_ratio)) & -3;
735
        }
736
        x = (is->width - width) / 2;
737
        y = (is->height - height) / 2;
738
        if (!is->no_background) {
739
            /* fill the background */
740
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
741
        } else {
742
            is->no_background = 0;
743
        }
744
        rect.x = is->xleft + x;
745
        rect.y = is->ytop  + y;
746
        rect.w = width;
747
        rect.h = height;
748
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
749
    } else {
750
#if 0
751
        fill_rectangle(screen,
752
                       is->xleft, is->ytop, is->width, is->height,
753
                       QERGB(0x00, 0x00, 0x00));
754
#endif
755
    }
756
}
757

    
758
static inline int compute_mod(int a, int b)
759
{
760
    a = a % b;
761
    if (a >= 0)
762
        return a;
763
    else
764
        return a + b;
765
}
766

    
767
static void video_audio_display(VideoState *s)
768
{
769
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
770
    int ch, channels, h, h2, bgcolor, fgcolor;
771
    int16_t time_diff;
772

    
773
    /* compute display index : center on currently output samples */
774
    channels = s->audio_st->codec->channels;
775
    nb_display_channels = channels;
776
    if (!s->paused) {
777
        n = 2 * channels;
778
        delay = audio_write_get_buf_size(s);
779
        delay /= n;
780

    
781
        /* to be more precise, we take into account the time spent since
782
           the last buffer computation */
783
        if (audio_callback_time) {
784
            time_diff = av_gettime() - audio_callback_time;
785
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
786
        }
787

    
788
        delay -= s->width / 2;
789
        if (delay < s->width)
790
            delay = s->width;
791

    
792
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
793

    
794
        h= INT_MIN;
795
        for(i=0; i<1000; i+=channels){
796
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
797
            int a= s->sample_array[idx];
798
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
799
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
800
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
801
            int score= a-d;
802
            if(h<score && (b^c)<0){
803
                h= score;
804
                i_start= idx;
805
            }
806
        }
807

    
808
        s->last_i_start = i_start;
809
    } else {
810
        i_start = s->last_i_start;
811
    }
812

    
813
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
814
    fill_rectangle(screen,
815
                   s->xleft, s->ytop, s->width, s->height,
816
                   bgcolor);
817

    
818
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
819

    
820
    /* total height for one channel */
821
    h = s->height / nb_display_channels;
822
    /* graph height / 2 */
823
    h2 = (h * 9) / 20;
824
    for(ch = 0;ch < nb_display_channels; ch++) {
825
        i = i_start + ch;
826
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
827
        for(x = 0; x < s->width; x++) {
828
            y = (s->sample_array[i] * h2) >> 15;
829
            if (y < 0) {
830
                y = -y;
831
                ys = y1 - y;
832
            } else {
833
                ys = y1;
834
            }
835
            fill_rectangle(screen,
836
                           s->xleft + x, ys, 1, y,
837
                           fgcolor);
838
            i += channels;
839
            if (i >= SAMPLE_ARRAY_SIZE)
840
                i -= SAMPLE_ARRAY_SIZE;
841
        }
842
    }
843

    
844
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
845

    
846
    for(ch = 1;ch < nb_display_channels; ch++) {
847
        y = s->ytop + ch * h;
848
        fill_rectangle(screen,
849
                       s->xleft, y, s->width, 1,
850
                       fgcolor);
851
    }
852
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
853
}
854

    
855
static int video_open(VideoState *is){
856
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
857
    int w,h;
858

    
859
    if(is_full_screen) flags |= SDL_FULLSCREEN;
860
    else               flags |= SDL_RESIZABLE;
861

    
862
    if (is_full_screen && fs_screen_width) {
863
        w = fs_screen_width;
864
        h = fs_screen_height;
865
    } else if(!is_full_screen && screen_width){
866
        w = screen_width;
867
        h = screen_height;
868
    }else if (is->video_st && is->video_st->codec->width){
869
        w = is->video_st->codec->width;
870
        h = is->video_st->codec->height;
871
    } else {
872
        w = 640;
873
        h = 480;
874
    }
875
#ifndef CONFIG_DARWIN
876
    screen = SDL_SetVideoMode(w, h, 0, flags);
877
#else
878
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
879
    screen = SDL_SetVideoMode(w, h, 24, flags);
880
#endif
881
    if (!screen) {
882
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
883
        return -1;
884
    }
885
    SDL_WM_SetCaption("FFplay", "FFplay");
886

    
887
    is->width = screen->w;
888
    is->height = screen->h;
889

    
890
    return 0;
891
}
892

    
893
/* display the current picture, if any */
894
static void video_display(VideoState *is)
895
{
896
    if(!screen)
897
        video_open(cur_stream);
898
    if (is->audio_st && is->show_audio)
899
        video_audio_display(is);
900
    else if (is->video_st)
901
        video_image_display(is);
902
}
903

    
904
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
905
{
906
    SDL_Event event;
907
    event.type = FF_REFRESH_EVENT;
908
    event.user.data1 = opaque;
909
    SDL_PushEvent(&event);
910
    return 0; /* 0 means stop timer */
911
}
912

    
913
/* schedule a video refresh in 'delay' ms */
914
static void schedule_refresh(VideoState *is, int delay)
915
{
916
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
917
}
918

    
919
/* get the current audio clock value */
920
static double get_audio_clock(VideoState *is)
921
{
922
    double pts;
923
    int hw_buf_size, bytes_per_sec;
924
    pts = is->audio_clock;
925
    hw_buf_size = audio_write_get_buf_size(is);
926
    bytes_per_sec = 0;
927
    if (is->audio_st) {
928
        bytes_per_sec = is->audio_st->codec->sample_rate *
929
            2 * is->audio_st->codec->channels;
930
    }
931
    if (bytes_per_sec)
932
        pts -= (double)hw_buf_size / bytes_per_sec;
933
    return pts;
934
}
935

    
936
/* get the current video clock value */
937
static double get_video_clock(VideoState *is)
938
{
939
    double delta;
940
    if (is->paused) {
941
        delta = 0;
942
    } else {
943
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
944
    }
945
    return is->video_current_pts + delta;
946
}
947

    
948
/* get the current external clock value */
949
static double get_external_clock(VideoState *is)
950
{
951
    int64_t ti;
952
    ti = av_gettime();
953
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
954
}
955

    
956
/* get the current master clock value */
957
static double get_master_clock(VideoState *is)
958
{
959
    double val;
960

    
961
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
962
        if (is->video_st)
963
            val = get_video_clock(is);
964
        else
965
            val = get_audio_clock(is);
966
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
967
        if (is->audio_st)
968
            val = get_audio_clock(is);
969
        else
970
            val = get_video_clock(is);
971
    } else {
972
        val = get_external_clock(is);
973
    }
974
    return val;
975
}
976

    
977
/* seek in the stream */
978
static void stream_seek(VideoState *is, int64_t pos, int rel)
979
{
980
    if (!is->seek_req) {
981
        is->seek_pos = pos;
982
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
983
        if (seek_by_bytes)
984
            is->seek_flags |= AVSEEK_FLAG_BYTE;
985
        is->seek_req = 1;
986
    }
987
}
988

    
989
/* pause or resume the video */
990
static void stream_pause(VideoState *is)
991
{
992
    is->paused = !is->paused;
993
    if (!is->paused) {
994
        is->video_current_pts = get_video_clock(is);
995
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
996
    }
997
}
998

    
999
/* called to display each frame */
1000
static void video_refresh_timer(void *opaque)
1001
{
1002
    VideoState *is = opaque;
1003
    VideoPicture *vp;
1004
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1005

    
1006
    SubPicture *sp, *sp2;
1007

    
1008
    if (is->video_st) {
1009
        if (is->pictq_size == 0) {
1010
            /* if no picture, need to wait */
1011
            schedule_refresh(is, 1);
1012
        } else {
1013
            /* dequeue the picture */
1014
            vp = &is->pictq[is->pictq_rindex];
1015

    
1016
            /* update current video pts */
1017
            is->video_current_pts = vp->pts;
1018
            is->video_current_pts_time = av_gettime();
1019

    
1020
            /* compute nominal delay */
1021
            delay = vp->pts - is->frame_last_pts;
1022
            if (delay <= 0 || delay >= 1.0) {
1023
                /* if incorrect delay, use previous one */
1024
                delay = is->frame_last_delay;
1025
            }
1026
            is->frame_last_delay = delay;
1027
            is->frame_last_pts = vp->pts;
1028

    
1029
            /* update delay to follow master synchronisation source */
1030
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1031
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1032
                /* if video is slave, we try to correct big delays by
1033
                   duplicating or deleting a frame */
1034
                ref_clock = get_master_clock(is);
1035
                diff = vp->pts - ref_clock;
1036

    
1037
                /* skip or repeat frame. We take into account the
1038
                   delay to compute the threshold. I still don't know
1039
                   if it is the best guess */
1040
                sync_threshold = AV_SYNC_THRESHOLD;
1041
                if (delay > sync_threshold)
1042
                    sync_threshold = delay;
1043
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1044
                    if (diff <= -sync_threshold)
1045
                        delay = 0;
1046
                    else if (diff >= sync_threshold)
1047
                        delay = 2 * delay;
1048
                }
1049
            }
1050

    
1051
            is->frame_timer += delay;
1052
            /* compute the REAL delay (we need to do that to avoid
1053
               long term errors */
1054
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1055
            if (actual_delay < 0.010) {
1056
                /* XXX: should skip picture */
1057
                actual_delay = 0.010;
1058
            }
1059
            /* launch timer for next picture */
1060
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1061

    
1062
#if defined(DEBUG_SYNC)
1063
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1064
                   delay, actual_delay, vp->pts, -diff);
1065
#endif
1066

    
1067
            if(is->subtitle_st) {
1068
                if (is->subtitle_stream_changed) {
1069
                    SDL_LockMutex(is->subpq_mutex);
1070

    
1071
                    while (is->subpq_size) {
1072
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1073

    
1074
                        /* update queue size and signal for next picture */
1075
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1076
                            is->subpq_rindex = 0;
1077

    
1078
                        is->subpq_size--;
1079
                    }
1080
                    is->subtitle_stream_changed = 0;
1081

    
1082
                    SDL_CondSignal(is->subpq_cond);
1083
                    SDL_UnlockMutex(is->subpq_mutex);
1084
                } else {
1085
                    if (is->subpq_size > 0) {
1086
                        sp = &is->subpq[is->subpq_rindex];
1087

    
1088
                        if (is->subpq_size > 1)
1089
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1090
                        else
1091
                            sp2 = NULL;
1092

    
1093
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1094
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1095
                        {
1096
                            free_subpicture(sp);
1097

    
1098
                            /* update queue size and signal for next picture */
1099
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1100
                                is->subpq_rindex = 0;
1101

    
1102
                            SDL_LockMutex(is->subpq_mutex);
1103
                            is->subpq_size--;
1104
                            SDL_CondSignal(is->subpq_cond);
1105
                            SDL_UnlockMutex(is->subpq_mutex);
1106
                        }
1107
                    }
1108
                }
1109
            }
1110

    
1111
            /* display picture */
1112
            video_display(is);
1113

    
1114
            /* update queue size and signal for next picture */
1115
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1116
                is->pictq_rindex = 0;
1117

    
1118
            SDL_LockMutex(is->pictq_mutex);
1119
            is->pictq_size--;
1120
            SDL_CondSignal(is->pictq_cond);
1121
            SDL_UnlockMutex(is->pictq_mutex);
1122
        }
1123
    } else if (is->audio_st) {
1124
        /* draw the next audio frame */
1125

    
1126
        schedule_refresh(is, 40);
1127

    
1128
        /* if only audio stream, then display the audio bars (better
1129
           than nothing, just to test the implementation */
1130

    
1131
        /* display picture */
1132
        video_display(is);
1133
    } else {
1134
        schedule_refresh(is, 100);
1135
    }
1136
    if (show_status) {
1137
        static int64_t last_time;
1138
        int64_t cur_time;
1139
        int aqsize, vqsize, sqsize;
1140
        double av_diff;
1141

    
1142
        cur_time = av_gettime();
1143
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1144
            aqsize = 0;
1145
            vqsize = 0;
1146
            sqsize = 0;
1147
            if (is->audio_st)
1148
                aqsize = is->audioq.size;
1149
            if (is->video_st)
1150
                vqsize = is->videoq.size;
1151
            if (is->subtitle_st)
1152
                sqsize = is->subtitleq.size;
1153
            av_diff = 0;
1154
            if (is->audio_st && is->video_st)
1155
                av_diff = get_audio_clock(is) - get_video_clock(is);
1156
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1157
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1158
            fflush(stdout);
1159
            last_time = cur_time;
1160
        }
1161
    }
1162
}
1163

    
1164
/* allocate a picture (needs to do that in main thread to avoid
1165
   potential locking problems */
1166
static void alloc_picture(void *opaque)
1167
{
1168
    VideoState *is = opaque;
1169
    VideoPicture *vp;
1170

    
1171
    vp = &is->pictq[is->pictq_windex];
1172

    
1173
    if (vp->bmp)
1174
        SDL_FreeYUVOverlay(vp->bmp);
1175

    
1176
#if 0
1177
    /* XXX: use generic function */
1178
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1179
    switch(is->video_st->codec->pix_fmt) {
1180
    case PIX_FMT_YUV420P:
1181
    case PIX_FMT_YUV422P:
1182
    case PIX_FMT_YUV444P:
1183
    case PIX_FMT_YUYV422:
1184
    case PIX_FMT_YUV410P:
1185
    case PIX_FMT_YUV411P:
1186
        is_yuv = 1;
1187
        break;
1188
    default:
1189
        is_yuv = 0;
1190
        break;
1191
    }
1192
#endif
1193
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1194
                                   is->video_st->codec->height,
1195
                                   SDL_YV12_OVERLAY,
1196
                                   screen);
1197
    vp->width = is->video_st->codec->width;
1198
    vp->height = is->video_st->codec->height;
1199

    
1200
    SDL_LockMutex(is->pictq_mutex);
1201
    vp->allocated = 1;
1202
    SDL_CondSignal(is->pictq_cond);
1203
    SDL_UnlockMutex(is->pictq_mutex);
1204
}
1205

    
1206
/**
1207
 *
1208
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1209
 */
1210
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1211
{
1212
    VideoPicture *vp;
1213
    int dst_pix_fmt;
1214
    AVPicture pict;
1215
    static struct SwsContext *img_convert_ctx;
1216

    
1217
    /* wait until we have space to put a new picture */
1218
    SDL_LockMutex(is->pictq_mutex);
1219
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1220
           !is->videoq.abort_request) {
1221
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1222
    }
1223
    SDL_UnlockMutex(is->pictq_mutex);
1224

    
1225
    if (is->videoq.abort_request)
1226
        return -1;
1227

    
1228
    vp = &is->pictq[is->pictq_windex];
1229

    
1230
    /* alloc or resize hardware picture buffer */
1231
    if (!vp->bmp ||
1232
        vp->width != is->video_st->codec->width ||
1233
        vp->height != is->video_st->codec->height) {
1234
        SDL_Event event;
1235

    
1236
        vp->allocated = 0;
1237

    
1238
        /* the allocation must be done in the main thread to avoid
1239
           locking problems */
1240
        event.type = FF_ALLOC_EVENT;
1241
        event.user.data1 = is;
1242
        SDL_PushEvent(&event);
1243

    
1244
        /* wait until the picture is allocated */
1245
        SDL_LockMutex(is->pictq_mutex);
1246
        while (!vp->allocated && !is->videoq.abort_request) {
1247
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1248
        }
1249
        SDL_UnlockMutex(is->pictq_mutex);
1250

    
1251
        if (is->videoq.abort_request)
1252
            return -1;
1253
    }
1254

    
1255
    /* if the frame is not skipped, then display it */
1256
    if (vp->bmp) {
1257
        /* get a pointer on the bitmap */
1258
        SDL_LockYUVOverlay (vp->bmp);
1259

    
1260
        dst_pix_fmt = PIX_FMT_YUV420P;
1261
        pict.data[0] = vp->bmp->pixels[0];
1262
        pict.data[1] = vp->bmp->pixels[2];
1263
        pict.data[2] = vp->bmp->pixels[1];
1264

    
1265
        pict.linesize[0] = vp->bmp->pitches[0];
1266
        pict.linesize[1] = vp->bmp->pitches[2];
1267
        pict.linesize[2] = vp->bmp->pitches[1];
1268
        if (img_convert_ctx == NULL) {
1269
            img_convert_ctx = sws_getContext(is->video_st->codec->width,
1270
                    is->video_st->codec->height, is->video_st->codec->pix_fmt,
1271
                    is->video_st->codec->width, is->video_st->codec->height,
1272
                    dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1273
            if (img_convert_ctx == NULL) {
1274
                fprintf(stderr, "Cannot initialize the conversion context\n");
1275
                exit(1);
1276
            }
1277
        }
1278
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1279
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1280
        /* update the bitmap content */
1281
        SDL_UnlockYUVOverlay(vp->bmp);
1282

    
1283
        vp->pts = pts;
1284

    
1285
        /* now we can update the picture count */
1286
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1287
            is->pictq_windex = 0;
1288
        SDL_LockMutex(is->pictq_mutex);
1289
        is->pictq_size++;
1290
        SDL_UnlockMutex(is->pictq_mutex);
1291
    }
1292
    return 0;
1293
}
1294

    
1295
/**
1296
 * compute the exact PTS for the picture if it is omitted in the stream
1297
 * @param pts1 the dts of the pkt / pts of the frame
1298
 */
1299
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1300
{
1301
    double frame_delay, pts;
1302

    
1303
    pts = pts1;
1304

    
1305
    if (pts != 0) {
1306
        /* update video clock with pts, if present */
1307
        is->video_clock = pts;
1308
    } else {
1309
        pts = is->video_clock;
1310
    }
1311
    /* update video clock for next frame */
1312
    frame_delay = av_q2d(is->video_st->codec->time_base);
1313
    /* for MPEG2, the frame can be repeated, so we update the
1314
       clock accordingly */
1315
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1316
    is->video_clock += frame_delay;
1317

    
1318
#if defined(DEBUG_SYNC) && 0
1319
    {
1320
        int ftype;
1321
        if (src_frame->pict_type == FF_B_TYPE)
1322
            ftype = 'B';
1323
        else if (src_frame->pict_type == FF_I_TYPE)
1324
            ftype = 'I';
1325
        else
1326
            ftype = 'P';
1327
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1328
               ftype, pts, pts1);
1329
    }
1330
#endif
1331
    return queue_picture(is, src_frame, pts);
1332
}
1333

    
1334
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1335

    
1336
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1337
    int ret= avcodec_default_get_buffer(c, pic);
1338
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1339
    *pts= global_video_pkt_pts;
1340
    pic->opaque= pts;
1341
    return ret;
1342
}
1343

    
1344
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1345
    if(pic) av_freep(&pic->opaque);
1346
    avcodec_default_release_buffer(c, pic);
1347
}
1348

    
1349
static int video_thread(void *arg)
1350
{
1351
    VideoState *is = arg;
1352
    AVPacket pkt1, *pkt = &pkt1;
1353
    int len1, got_picture;
1354
    AVFrame *frame= avcodec_alloc_frame();
1355
    double pts;
1356

    
1357
    for(;;) {
1358
        while (is->paused && !is->videoq.abort_request) {
1359
            SDL_Delay(10);
1360
        }
1361
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1362
            break;
1363

    
1364
        if(pkt->data == flush_pkt.data){
1365
            avcodec_flush_buffers(is->video_st->codec);
1366
            continue;
1367
        }
1368

    
1369
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1370
           this packet, if any */
1371
        global_video_pkt_pts= pkt->pts;
1372
        len1 = avcodec_decode_video(is->video_st->codec,
1373
                                    frame, &got_picture,
1374
                                    pkt->data, pkt->size);
1375

    
1376
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1377
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1378
            pts= *(uint64_t*)frame->opaque;
1379
        else if(pkt->dts != AV_NOPTS_VALUE)
1380
            pts= pkt->dts;
1381
        else
1382
            pts= 0;
1383
        pts *= av_q2d(is->video_st->time_base);
1384

    
1385
//            if (len1 < 0)
1386
//                break;
1387
        if (got_picture) {
1388
            if (output_picture2(is, frame, pts) < 0)
1389
                goto the_end;
1390
        }
1391
        av_free_packet(pkt);
1392
        if (step)
1393
            if (cur_stream)
1394
                stream_pause(cur_stream);
1395
    }
1396
 the_end:
1397
    av_free(frame);
1398
    return 0;
1399
}
1400

    
1401
static int subtitle_thread(void *arg)
1402
{
1403
    VideoState *is = arg;
1404
    SubPicture *sp;
1405
    AVPacket pkt1, *pkt = &pkt1;
1406
    int len1, got_subtitle;
1407
    double pts;
1408
    int i, j;
1409
    int r, g, b, y, u, v, a;
1410

    
1411
    for(;;) {
1412
        while (is->paused && !is->subtitleq.abort_request) {
1413
            SDL_Delay(10);
1414
        }
1415
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1416
            break;
1417

    
1418
        if(pkt->data == flush_pkt.data){
1419
            avcodec_flush_buffers(is->subtitle_st->codec);
1420
            continue;
1421
        }
1422
        SDL_LockMutex(is->subpq_mutex);
1423
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1424
               !is->subtitleq.abort_request) {
1425
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1426
        }
1427
        SDL_UnlockMutex(is->subpq_mutex);
1428

    
1429
        if (is->subtitleq.abort_request)
1430
            goto the_end;
1431

    
1432
        sp = &is->subpq[is->subpq_windex];
1433

    
1434
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1435
           this packet, if any */
1436
        pts = 0;
1437
        if (pkt->pts != AV_NOPTS_VALUE)
1438
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1439

    
1440
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1441
                                    &sp->sub, &got_subtitle,
1442
                                    pkt->data, pkt->size);
1443
//            if (len1 < 0)
1444
//                break;
1445
        if (got_subtitle && sp->sub.format == 0) {
1446
            sp->pts = pts;
1447

    
1448
            for (i = 0; i < sp->sub.num_rects; i++)
1449
            {
1450
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1451
                {
1452
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1453
                    y = RGB_TO_Y_CCIR(r, g, b);
1454
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1455
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1456
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1457
                }
1458
            }
1459

    
1460
            /* now we can update the picture count */
1461
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1462
                is->subpq_windex = 0;
1463
            SDL_LockMutex(is->subpq_mutex);
1464
            is->subpq_size++;
1465
            SDL_UnlockMutex(is->subpq_mutex);
1466
        }
1467
        av_free_packet(pkt);
1468
//        if (step)
1469
//            if (cur_stream)
1470
//                stream_pause(cur_stream);
1471
    }
1472
 the_end:
1473
    return 0;
1474
}
1475

    
1476
/* copy samples for viewing in editor window */
1477
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1478
{
1479
    int size, len, channels;
1480

    
1481
    channels = is->audio_st->codec->channels;
1482

    
1483
    size = samples_size / sizeof(short);
1484
    while (size > 0) {
1485
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1486
        if (len > size)
1487
            len = size;
1488
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1489
        samples += len;
1490
        is->sample_array_index += len;
1491
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1492
            is->sample_array_index = 0;
1493
        size -= len;
1494
    }
1495
}
1496

    
1497
/* return the new audio buffer size (samples can be added or deleted
1498
   to get better sync if video or external master clock) */
1499
static int synchronize_audio(VideoState *is, short *samples,
1500
                             int samples_size1, double pts)
1501
{
1502
    int n, samples_size;
1503
    double ref_clock;
1504

    
1505
    n = 2 * is->audio_st->codec->channels;
1506
    samples_size = samples_size1;
1507

    
1508
    /* if not master, then we try to remove or add samples to correct the clock */
1509
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1510
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1511
        double diff, avg_diff;
1512
        int wanted_size, min_size, max_size, nb_samples;
1513

    
1514
        ref_clock = get_master_clock(is);
1515
        diff = get_audio_clock(is) - ref_clock;
1516

    
1517
        if (diff < AV_NOSYNC_THRESHOLD) {
1518
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1519
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1520
                /* not enough measures to have a correct estimate */
1521
                is->audio_diff_avg_count++;
1522
            } else {
1523
                /* estimate the A-V difference */
1524
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1525

    
1526
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1527
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1528
                    nb_samples = samples_size / n;
1529

    
1530
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1531
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1532
                    if (wanted_size < min_size)
1533
                        wanted_size = min_size;
1534
                    else if (wanted_size > max_size)
1535
                        wanted_size = max_size;
1536

    
1537
                    /* add or remove samples to correction the synchro */
1538
                    if (wanted_size < samples_size) {
1539
                        /* remove samples */
1540
                        samples_size = wanted_size;
1541
                    } else if (wanted_size > samples_size) {
1542
                        uint8_t *samples_end, *q;
1543
                        int nb;
1544

    
1545
                        /* add samples */
1546
                        nb = (samples_size - wanted_size);
1547
                        samples_end = (uint8_t *)samples + samples_size - n;
1548
                        q = samples_end + n;
1549
                        while (nb > 0) {
1550
                            memcpy(q, samples_end, n);
1551
                            q += n;
1552
                            nb -= n;
1553
                        }
1554
                        samples_size = wanted_size;
1555
                    }
1556
                }
1557
#if 0
1558
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1559
                       diff, avg_diff, samples_size - samples_size1,
1560
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1561
#endif
1562
            }
1563
        } else {
1564
            /* too big difference : may be initial PTS errors, so
1565
               reset A-V filter */
1566
            is->audio_diff_avg_count = 0;
1567
            is->audio_diff_cum = 0;
1568
        }
1569
    }
1570

    
1571
    return samples_size;
1572
}
1573

    
1574
/* decode one audio frame and returns its uncompressed size */
1575
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1576
{
1577
    AVPacket *pkt = &is->audio_pkt;
1578
    int n, len1, data_size;
1579
    double pts;
1580

    
1581
    for(;;) {
1582
        /* NOTE: the audio packet can contain several frames */
1583
        while (is->audio_pkt_size > 0) {
1584
            data_size = buf_size;
1585
            len1 = avcodec_decode_audio2(is->audio_st->codec,
1586
                                        (int16_t *)audio_buf, &data_size,
1587
                                        is->audio_pkt_data, is->audio_pkt_size);
1588
            if (len1 < 0) {
1589
                /* if error, we skip the frame */
1590
                is->audio_pkt_size = 0;
1591
                break;
1592
            }
1593

    
1594
            is->audio_pkt_data += len1;
1595
            is->audio_pkt_size -= len1;
1596
            if (data_size <= 0)
1597
                continue;
1598
            /* if no pts, then compute it */
1599
            pts = is->audio_clock;
1600
            *pts_ptr = pts;
1601
            n = 2 * is->audio_st->codec->channels;
1602
            is->audio_clock += (double)data_size /
1603
                (double)(n * is->audio_st->codec->sample_rate);
1604
#if defined(DEBUG_SYNC)
1605
            {
1606
                static double last_clock;
1607
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1608
                       is->audio_clock - last_clock,
1609
                       is->audio_clock, pts);
1610
                last_clock = is->audio_clock;
1611
            }
1612
#endif
1613
            return data_size;
1614
        }
1615

    
1616
        /* free the current packet */
1617
        if (pkt->data)
1618
            av_free_packet(pkt);
1619

    
1620
        if (is->paused || is->audioq.abort_request) {
1621
            return -1;
1622
        }
1623

    
1624
        /* read next packet */
1625
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1626
            return -1;
1627
        if(pkt->data == flush_pkt.data){
1628
            avcodec_flush_buffers(is->audio_st->codec);
1629
            continue;
1630
        }
1631

    
1632
        is->audio_pkt_data = pkt->data;
1633
        is->audio_pkt_size = pkt->size;
1634

    
1635
        /* if update the audio clock with the pts */
1636
        if (pkt->pts != AV_NOPTS_VALUE) {
1637
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1638
        }
1639
    }
1640
}
1641

    
1642
/* get the current audio output buffer size, in samples. With SDL, we
1643
   cannot have a precise information */
1644
static int audio_write_get_buf_size(VideoState *is)
1645
{
1646
    return is->audio_buf_size - is->audio_buf_index;
1647
}
1648

    
1649

    
1650
/* prepare a new audio buffer */
1651
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1652
{
1653
    VideoState *is = opaque;
1654
    int audio_size, len1;
1655
    double pts;
1656

    
1657
    audio_callback_time = av_gettime();
1658

    
1659
    while (len > 0) {
1660
        if (is->audio_buf_index >= is->audio_buf_size) {
1661
           audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1662
           if (audio_size < 0) {
1663
                /* if error, just output silence */
1664
               is->audio_buf_size = 1024;
1665
               memset(is->audio_buf, 0, is->audio_buf_size);
1666
           } else {
1667
               if (is->show_audio)
1668
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1669
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1670
                                              pts);
1671
               is->audio_buf_size = audio_size;
1672
           }
1673
           is->audio_buf_index = 0;
1674
        }
1675
        len1 = is->audio_buf_size - is->audio_buf_index;
1676
        if (len1 > len)
1677
            len1 = len;
1678
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1679
        len -= len1;
1680
        stream += len1;
1681
        is->audio_buf_index += len1;
1682
    }
1683
}
1684

    
1685
/* open a given stream. Return 0 if OK */
1686
static int stream_component_open(VideoState *is, int stream_index)
1687
{
1688
    AVFormatContext *ic = is->ic;
1689
    AVCodecContext *enc;
1690
    AVCodec *codec;
1691
    SDL_AudioSpec wanted_spec, spec;
1692

    
1693
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1694
        return -1;
1695
    enc = ic->streams[stream_index]->codec;
1696

    
1697
    /* prepare audio output */
1698
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1699
        wanted_spec.freq = enc->sample_rate;
1700
        wanted_spec.format = AUDIO_S16SYS;
1701
        /* hack for AC3. XXX: suppress that */
1702
        if (enc->channels > 2)
1703
            enc->channels = 2;
1704
        wanted_spec.channels = enc->channels;
1705
        wanted_spec.silence = 0;
1706
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1707
        wanted_spec.callback = sdl_audio_callback;
1708
        wanted_spec.userdata = is;
1709
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1710
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1711
            return -1;
1712
        }
1713
        is->audio_hw_buf_size = spec.size;
1714
    }
1715

    
1716
    codec = avcodec_find_decoder(enc->codec_id);
1717
    enc->debug_mv = debug_mv;
1718
    enc->debug = debug;
1719
    enc->workaround_bugs = workaround_bugs;
1720
    enc->lowres = lowres;
1721
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1722
    enc->idct_algo= idct;
1723
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1724
    enc->skip_frame= skip_frame;
1725
    enc->skip_idct= skip_idct;
1726
    enc->skip_loop_filter= skip_loop_filter;
1727
    enc->error_resilience= error_resilience;
1728
    enc->error_concealment= error_concealment;
1729
    if (!codec ||
1730
        avcodec_open(enc, codec) < 0)
1731
        return -1;
1732
    if(thread_count>1)
1733
        avcodec_thread_init(enc, thread_count);
1734
    enc->thread_count= thread_count;
1735
    switch(enc->codec_type) {
1736
    case CODEC_TYPE_AUDIO:
1737
        is->audio_stream = stream_index;
1738
        is->audio_st = ic->streams[stream_index];
1739
        is->audio_buf_size = 0;
1740
        is->audio_buf_index = 0;
1741

    
1742
        /* init averaging filter */
1743
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1744
        is->audio_diff_avg_count = 0;
1745
        /* since we do not have a precise anough audio fifo fullness,
1746
           we correct audio sync only if larger than this threshold */
1747
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1748

    
1749
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1750
        packet_queue_init(&is->audioq);
1751
        SDL_PauseAudio(0);
1752
        break;
1753
    case CODEC_TYPE_VIDEO:
1754
        is->video_stream = stream_index;
1755
        is->video_st = ic->streams[stream_index];
1756

    
1757
        is->frame_last_delay = 40e-3;
1758
        is->frame_timer = (double)av_gettime() / 1000000.0;
1759
        is->video_current_pts_time = av_gettime();
1760

    
1761
        packet_queue_init(&is->videoq);
1762
        is->video_tid = SDL_CreateThread(video_thread, is);
1763

    
1764
        enc->    get_buffer=     my_get_buffer;
1765
        enc->release_buffer= my_release_buffer;
1766
        break;
1767
    case CODEC_TYPE_SUBTITLE:
1768
        is->subtitle_stream = stream_index;
1769
        is->subtitle_st = ic->streams[stream_index];
1770
        packet_queue_init(&is->subtitleq);
1771

    
1772
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1773
        break;
1774
    default:
1775
        break;
1776
    }
1777
    return 0;
1778
}
1779

    
1780
static void stream_component_close(VideoState *is, int stream_index)
1781
{
1782
    AVFormatContext *ic = is->ic;
1783
    AVCodecContext *enc;
1784

    
1785
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1786
        return;
1787
    enc = ic->streams[stream_index]->codec;
1788

    
1789
    switch(enc->codec_type) {
1790
    case CODEC_TYPE_AUDIO:
1791
        packet_queue_abort(&is->audioq);
1792

    
1793
        SDL_CloseAudio();
1794

    
1795
        packet_queue_end(&is->audioq);
1796
        break;
1797
    case CODEC_TYPE_VIDEO:
1798
        packet_queue_abort(&is->videoq);
1799

    
1800
        /* note: we also signal this mutex to make sure we deblock the
1801
           video thread in all cases */
1802
        SDL_LockMutex(is->pictq_mutex);
1803
        SDL_CondSignal(is->pictq_cond);
1804
        SDL_UnlockMutex(is->pictq_mutex);
1805

    
1806
        SDL_WaitThread(is->video_tid, NULL);
1807

    
1808
        packet_queue_end(&is->videoq);
1809
        break;
1810
    case CODEC_TYPE_SUBTITLE:
1811
        packet_queue_abort(&is->subtitleq);
1812

    
1813
        /* note: we also signal this mutex to make sure we deblock the
1814
           video thread in all cases */
1815
        SDL_LockMutex(is->subpq_mutex);
1816
        is->subtitle_stream_changed = 1;
1817

    
1818
        SDL_CondSignal(is->subpq_cond);
1819
        SDL_UnlockMutex(is->subpq_mutex);
1820

    
1821
        SDL_WaitThread(is->subtitle_tid, NULL);
1822

    
1823
        packet_queue_end(&is->subtitleq);
1824
        break;
1825
    default:
1826
        break;
1827
    }
1828

    
1829
    avcodec_close(enc);
1830
    switch(enc->codec_type) {
1831
    case CODEC_TYPE_AUDIO:
1832
        is->audio_st = NULL;
1833
        is->audio_stream = -1;
1834
        break;
1835
    case CODEC_TYPE_VIDEO:
1836
        is->video_st = NULL;
1837
        is->video_stream = -1;
1838
        break;
1839
    case CODEC_TYPE_SUBTITLE:
1840
        is->subtitle_st = NULL;
1841
        is->subtitle_stream = -1;
1842
        break;
1843
    default:
1844
        break;
1845
    }
1846
}
1847

    
1848
static void dump_stream_info(const AVFormatContext *s)
1849
{
1850
    if (s->track != 0)
1851
        fprintf(stderr, "Track: %d\n", s->track);
1852
    if (s->title[0] != '\0')
1853
        fprintf(stderr, "Title: %s\n", s->title);
1854
    if (s->author[0] != '\0')
1855
        fprintf(stderr, "Author: %s\n", s->author);
1856
    if (s->copyright[0] != '\0')
1857
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1858
    if (s->comment[0] != '\0')
1859
        fprintf(stderr, "Comment: %s\n", s->comment);
1860
    if (s->album[0] != '\0')
1861
        fprintf(stderr, "Album: %s\n", s->album);
1862
    if (s->year != 0)
1863
        fprintf(stderr, "Year: %d\n", s->year);
1864
    if (s->genre[0] != '\0')
1865
        fprintf(stderr, "Genre: %s\n", s->genre);
1866
}
1867

    
1868
/* since we have only one decoding thread, we can use a global
1869
   variable instead of a thread local variable */
1870
static VideoState *global_video_state;
1871

    
1872
static int decode_interrupt_cb(void)
1873
{
1874
    return (global_video_state && global_video_state->abort_request);
1875
}
1876

    
1877
/* this thread gets the stream from the disk or the network */
1878
static int decode_thread(void *arg)
1879
{
1880
    VideoState *is = arg;
1881
    AVFormatContext *ic;
1882
    int err, i, ret, video_index, audio_index, use_play;
1883
    AVPacket pkt1, *pkt = &pkt1;
1884
    AVFormatParameters params, *ap = &params;
1885

    
1886
    video_index = -1;
1887
    audio_index = -1;
1888
    is->video_stream = -1;
1889
    is->audio_stream = -1;
1890
    is->subtitle_stream = -1;
1891

    
1892
    global_video_state = is;
1893
    url_set_interrupt_cb(decode_interrupt_cb);
1894

    
1895
    memset(ap, 0, sizeof(*ap));
1896
    ap->initial_pause = 1; /* we force a pause when starting an RTSP
1897
                              stream */
1898

    
1899
    ap->width = frame_width;
1900
    ap->height= frame_height;
1901
    ap->time_base= (AVRational){1, 25};
1902
    ap->pix_fmt = frame_pix_fmt;
1903

    
1904
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1905
    if (err < 0) {
1906
        print_error(is->filename, err);
1907
        ret = -1;
1908
        goto fail;
1909
    }
1910
    is->ic = ic;
1911
#ifdef CONFIG_RTSP_DEMUXER
1912
    use_play = (ic->iformat == &rtsp_demuxer);
1913
#else
1914
    use_play = 0;
1915
#endif
1916

    
1917
    if(genpts)
1918
        ic->flags |= AVFMT_FLAG_GENPTS;
1919

    
1920
    if (!use_play) {
1921
        err = av_find_stream_info(ic);
1922
        if (err < 0) {
1923
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1924
            ret = -1;
1925
            goto fail;
1926
        }
1927
        ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1928
    }
1929

    
1930
    /* if seeking requested, we execute it */
1931
    if (start_time != AV_NOPTS_VALUE) {
1932
        int64_t timestamp;
1933

    
1934
        timestamp = start_time;
1935
        /* add the stream start time */
1936
        if (ic->start_time != AV_NOPTS_VALUE)
1937
            timestamp += ic->start_time;
1938
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1939
        if (ret < 0) {
1940
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1941
                    is->filename, (double)timestamp / AV_TIME_BASE);
1942
        }
1943
    }
1944

    
1945
    /* now we can begin to play (RTSP stream only) */
1946
    av_read_play(ic);
1947

    
1948
    if (use_play) {
1949
        err = av_find_stream_info(ic);
1950
        if (err < 0) {
1951
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1952
            ret = -1;
1953
            goto fail;
1954
        }
1955
    }
1956

    
1957
    for(i = 0; i < ic->nb_streams; i++) {
1958
        AVCodecContext *enc = ic->streams[i]->codec;
1959
        switch(enc->codec_type) {
1960
        case CODEC_TYPE_AUDIO:
1961
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1962
                audio_index = i;
1963
            break;
1964
        case CODEC_TYPE_VIDEO:
1965
            if (video_index < 0 && !video_disable)
1966
                video_index = i;
1967
            break;
1968
        default:
1969
            break;
1970
        }
1971
    }
1972
    if (show_status) {
1973
        dump_format(ic, 0, is->filename, 0);
1974
        dump_stream_info(ic);
1975
    }
1976

    
1977
    /* open the streams */
1978
    if (audio_index >= 0) {
1979
        stream_component_open(is, audio_index);
1980
    }
1981

    
1982
    if (video_index >= 0) {
1983
        stream_component_open(is, video_index);
1984
    } else {
1985
        if (!display_disable)
1986
            is->show_audio = 1;
1987
    }
1988

    
1989
    if (is->video_stream < 0 && is->audio_stream < 0) {
1990
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1991
        ret = -1;
1992
        goto fail;
1993
    }
1994

    
1995
    for(;;) {
1996
        if (is->abort_request)
1997
            break;
1998
        if (is->paused != is->last_paused) {
1999
            is->last_paused = is->paused;
2000
            if (is->paused)
2001
                av_read_pause(ic);
2002
            else
2003
                av_read_play(ic);
2004
        }
2005
#ifdef CONFIG_RTSP_DEMUXER
2006
        if (is->paused && ic->iformat == &rtsp_demuxer) {
2007
            /* wait 10 ms to avoid trying to get another packet */
2008
            /* XXX: horrible */
2009
            SDL_Delay(10);
2010
            continue;
2011
        }
2012
#endif
2013
        if (is->seek_req) {
2014
            int stream_index= -1;
2015
            int64_t seek_target= is->seek_pos;
2016

    
2017
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
2018
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
2019
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2020

    
2021
            if(stream_index>=0){
2022
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2023
            }
2024

    
2025
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2026
            if (ret < 0) {
2027
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2028
            }else{
2029
                if (is->audio_stream >= 0) {
2030
                    packet_queue_flush(&is->audioq);
2031
                    packet_queue_put(&is->audioq, &flush_pkt);
2032
                }
2033
                if (is->subtitle_stream >= 0) {
2034
                    packet_queue_flush(&is->subtitleq);
2035
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2036
                }
2037
                if (is->video_stream >= 0) {
2038
                    packet_queue_flush(&is->videoq);
2039
                    packet_queue_put(&is->videoq, &flush_pkt);
2040
                }
2041
            }
2042
            is->seek_req = 0;
2043
        }
2044

    
2045
        /* if the queue are full, no need to read more */
2046
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2047
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2048
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2049
            url_feof(&ic->pb)) {
2050
            /* wait 10 ms */
2051
            SDL_Delay(10);
2052
            continue;
2053
        }
2054
        ret = av_read_frame(ic, pkt);
2055
        if (ret < 0) {
2056
            if (url_ferror(&ic->pb) == 0) {
2057
                SDL_Delay(100); /* wait for user event */
2058
                continue;
2059
            } else
2060
                break;
2061
        }
2062
        if (pkt->stream_index == is->audio_stream) {
2063
            packet_queue_put(&is->audioq, pkt);
2064
        } else if (pkt->stream_index == is->video_stream) {
2065
            packet_queue_put(&is->videoq, pkt);
2066
        } else if (pkt->stream_index == is->subtitle_stream) {
2067
            packet_queue_put(&is->subtitleq, pkt);
2068
        } else {
2069
            av_free_packet(pkt);
2070
        }
2071
    }
2072
    /* wait until the end */
2073
    while (!is->abort_request) {
2074
        SDL_Delay(100);
2075
    }
2076

    
2077
    ret = 0;
2078
 fail:
2079
    /* disable interrupting */
2080
    global_video_state = NULL;
2081

    
2082
    /* close each stream */
2083
    if (is->audio_stream >= 0)
2084
        stream_component_close(is, is->audio_stream);
2085
    if (is->video_stream >= 0)
2086
        stream_component_close(is, is->video_stream);
2087
    if (is->subtitle_stream >= 0)
2088
        stream_component_close(is, is->subtitle_stream);
2089
    if (is->ic) {
2090
        av_close_input_file(is->ic);
2091
        is->ic = NULL; /* safety */
2092
    }
2093
    url_set_interrupt_cb(NULL);
2094

    
2095
    if (ret != 0) {
2096
        SDL_Event event;
2097

    
2098
        event.type = FF_QUIT_EVENT;
2099
        event.user.data1 = is;
2100
        SDL_PushEvent(&event);
2101
    }
2102
    return 0;
2103
}
2104

    
2105
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2106
{
2107
    VideoState *is;
2108

    
2109
    is = av_mallocz(sizeof(VideoState));
2110
    if (!is)
2111
        return NULL;
2112
    pstrcpy(is->filename, sizeof(is->filename), filename);
2113
    is->iformat = iformat;
2114
    is->ytop = 0;
2115
    is->xleft = 0;
2116

    
2117
    /* start video display */
2118
    is->pictq_mutex = SDL_CreateMutex();
2119
    is->pictq_cond = SDL_CreateCond();
2120

    
2121
    is->subpq_mutex = SDL_CreateMutex();
2122
    is->subpq_cond = SDL_CreateCond();
2123

    
2124
    /* add the refresh timer to draw the picture */
2125
    schedule_refresh(is, 40);
2126

    
2127
    is->av_sync_type = av_sync_type;
2128
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2129
    if (!is->parse_tid) {
2130
        av_free(is);
2131
        return NULL;
2132
    }
2133
    return is;
2134
}
2135

    
2136
static void stream_close(VideoState *is)
2137
{
2138
    VideoPicture *vp;
2139
    int i;
2140
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2141
    is->abort_request = 1;
2142
    SDL_WaitThread(is->parse_tid, NULL);
2143

    
2144
    /* free all pictures */
2145
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2146
        vp = &is->pictq[i];
2147
        if (vp->bmp) {
2148
            SDL_FreeYUVOverlay(vp->bmp);
2149
            vp->bmp = NULL;
2150
        }
2151
    }
2152
    SDL_DestroyMutex(is->pictq_mutex);
2153
    SDL_DestroyCond(is->pictq_cond);
2154
    SDL_DestroyMutex(is->subpq_mutex);
2155
    SDL_DestroyCond(is->subpq_cond);
2156
}
2157

    
2158
static void stream_cycle_channel(VideoState *is, int codec_type)
2159
{
2160
    AVFormatContext *ic = is->ic;
2161
    int start_index, stream_index;
2162
    AVStream *st;
2163

    
2164
    if (codec_type == CODEC_TYPE_VIDEO)
2165
        start_index = is->video_stream;
2166
    else if (codec_type == CODEC_TYPE_AUDIO)
2167
        start_index = is->audio_stream;
2168
    else
2169
        start_index = is->subtitle_stream;
2170
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2171
        return;
2172
    stream_index = start_index;
2173
    for(;;) {
2174
        if (++stream_index >= is->ic->nb_streams)
2175
        {
2176
            if (codec_type == CODEC_TYPE_SUBTITLE)
2177
            {
2178
                stream_index = -1;
2179
                goto the_end;
2180
            } else
2181
                stream_index = 0;
2182
        }
2183
        if (stream_index == start_index)
2184
            return;
2185
        st = ic->streams[stream_index];
2186
        if (st->codec->codec_type == codec_type) {
2187
            /* check that parameters are OK */
2188
            switch(codec_type) {
2189
            case CODEC_TYPE_AUDIO:
2190
                if (st->codec->sample_rate != 0 &&
2191
                    st->codec->channels != 0)
2192
                    goto the_end;
2193
                break;
2194
            case CODEC_TYPE_VIDEO:
2195
            case CODEC_TYPE_SUBTITLE:
2196
                goto the_end;
2197
            default:
2198
                break;
2199
            }
2200
        }
2201
    }
2202
 the_end:
2203
    stream_component_close(is, start_index);
2204
    stream_component_open(is, stream_index);
2205
}
2206

    
2207

    
2208
static void toggle_full_screen(void)
2209
{
2210
    is_full_screen = !is_full_screen;
2211
    if (!fs_screen_width) {
2212
        /* use default SDL method */
2213
//        SDL_WM_ToggleFullScreen(screen);
2214
    }
2215
    video_open(cur_stream);
2216
}
2217

    
2218
static void toggle_pause(void)
2219
{
2220
    if (cur_stream)
2221
        stream_pause(cur_stream);
2222
    step = 0;
2223
}
2224

    
2225
static void step_to_next_frame(void)
2226
{
2227
    if (cur_stream) {
2228
        if (cur_stream->paused)
2229
            cur_stream->paused=0;
2230
        cur_stream->video_current_pts = get_video_clock(cur_stream);
2231
    }
2232
    step = 1;
2233
}
2234

    
2235
static void do_exit(void)
2236
{
2237
    if (cur_stream) {
2238
        stream_close(cur_stream);
2239
        cur_stream = NULL;
2240
    }
2241
    if (show_status)
2242
        printf("\n");
2243
    SDL_Quit();
2244
    exit(0);
2245
}
2246

    
2247
static void toggle_audio_display(void)
2248
{
2249
    if (cur_stream) {
2250
        cur_stream->show_audio = !cur_stream->show_audio;
2251
    }
2252
}
2253

    
2254
/* handle an event sent by the GUI */
2255
static void event_loop(void)
2256
{
2257
    SDL_Event event;
2258
    double incr, pos, frac;
2259

    
2260
    for(;;) {
2261
        SDL_WaitEvent(&event);
2262
        switch(event.type) {
2263
        case SDL_KEYDOWN:
2264
            switch(event.key.keysym.sym) {
2265
            case SDLK_ESCAPE:
2266
            case SDLK_q:
2267
                do_exit();
2268
                break;
2269
            case SDLK_f:
2270
                toggle_full_screen();
2271
                break;
2272
            case SDLK_p:
2273
            case SDLK_SPACE:
2274
                toggle_pause();
2275
                break;
2276
            case SDLK_s: //S: Step to next frame
2277
                step_to_next_frame();
2278
                break;
2279
            case SDLK_a:
2280
                if (cur_stream)
2281
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2282
                break;
2283
            case SDLK_v:
2284
                if (cur_stream)
2285
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2286
                break;
2287
            case SDLK_t:
2288
                if (cur_stream)
2289
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2290
                break;
2291
            case SDLK_w:
2292
                toggle_audio_display();
2293
                break;
2294
            case SDLK_LEFT:
2295
                incr = -10.0;
2296
                goto do_seek;
2297
            case SDLK_RIGHT:
2298
                incr = 10.0;
2299
                goto do_seek;
2300
            case SDLK_UP:
2301
                incr = 60.0;
2302
                goto do_seek;
2303
            case SDLK_DOWN:
2304
                incr = -60.0;
2305
            do_seek:
2306
                if (cur_stream) {
2307
                    if (seek_by_bytes) {
2308
                        pos = url_ftell(&cur_stream->ic->pb);
2309
                        if (cur_stream->ic->bit_rate)
2310
                            incr *= cur_stream->ic->bit_rate / 60.0;
2311
                        else
2312
                            incr *= 180000.0;
2313
                        pos += incr;
2314
                        stream_seek(cur_stream, pos, incr);
2315
                    } else {
2316
                        pos = get_master_clock(cur_stream);
2317
                        pos += incr;
2318
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2319
                    }
2320
                }
2321
                break;
2322
            default:
2323
                break;
2324
            }
2325
            break;
2326
        case SDL_MOUSEBUTTONDOWN:
2327
            if (cur_stream) {
2328
                int ns, hh, mm, ss;
2329
                int tns, thh, tmm, tss;
2330
                tns = cur_stream->ic->duration/1000000LL;
2331
                thh = tns/3600;
2332
                tmm = (tns%3600)/60;
2333
                tss = (tns%60);
2334
                frac = (double)event.button.x/(double)cur_stream->width;
2335
                ns = frac*tns;
2336
                hh = ns/3600;
2337
                mm = (ns%3600)/60;
2338
                ss = (ns%60);
2339
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2340
                        hh, mm, ss, thh, tmm, tss);
2341
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2342
            }
2343
            break;
2344
        case SDL_VIDEORESIZE:
2345
            if (cur_stream) {
2346
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2347
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2348
                screen_width = cur_stream->width = event.resize.w;
2349
                screen_height= cur_stream->height= event.resize.h;
2350
            }
2351
            break;
2352
        case SDL_QUIT:
2353
        case FF_QUIT_EVENT:
2354
            do_exit();
2355
            break;
2356
        case FF_ALLOC_EVENT:
2357
            video_open(event.user.data1);
2358
            alloc_picture(event.user.data1);
2359
            break;
2360
        case FF_REFRESH_EVENT:
2361
            video_refresh_timer(event.user.data1);
2362
            break;
2363
        default:
2364
            break;
2365
        }
2366
    }
2367
}
2368

    
2369
static void opt_frame_size(const char *arg)
2370
{
2371
    if (parse_image_size(&frame_width, &frame_height, arg) < 0) {
2372
        fprintf(stderr, "Incorrect frame size\n");
2373
        exit(1);
2374
    }
2375
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2376
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2377
        exit(1);
2378
    }
2379
}
2380

    
2381
static void opt_width(const char *arg)
2382
{
2383
    screen_width = atoi(arg);
2384
    if(screen_width<=0){
2385
        fprintf(stderr, "invalid width\n");
2386
        exit(1);
2387
    }
2388
}
2389

    
2390
static void opt_height(const char *arg)
2391
{
2392
    screen_height = atoi(arg);
2393
    if(screen_height<=0){
2394
        fprintf(stderr, "invalid height\n");
2395
        exit(1);
2396
    }
2397
}
2398

    
2399
static void opt_format(const char *arg)
2400
{
2401
    file_iformat = av_find_input_format(arg);
2402
    if (!file_iformat) {
2403
        fprintf(stderr, "Unknown input format: %s\n", arg);
2404
        exit(1);
2405
    }
2406
}
2407

    
2408
static void opt_frame_pix_fmt(const char *arg)
2409
{
2410
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2411
}
2412

    
2413
#ifdef CONFIG_RTSP_DEMUXER
2414
static void opt_rtp_tcp(void)
2415
{
2416
    /* only tcp protocol */
2417
    rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2418
}
2419
#endif
2420

    
2421
static void opt_sync(const char *arg)
2422
{
2423
    if (!strcmp(arg, "audio"))
2424
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2425
    else if (!strcmp(arg, "video"))
2426
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2427
    else if (!strcmp(arg, "ext"))
2428
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2429
    else
2430
        show_help();
2431
}
2432

    
2433
static void opt_seek(const char *arg)
2434
{
2435
    start_time = parse_date(arg, 1);
2436
}
2437

    
2438
static void opt_debug(const char *arg)
2439
{
2440
    av_log_level = 99;
2441
    debug = atoi(arg);
2442
}
2443

    
2444
static void opt_vismv(const char *arg)
2445
{
2446
    debug_mv = atoi(arg);
2447
}
2448

    
2449
static void opt_thread_count(const char *arg)
2450
{
2451
    thread_count= atoi(arg);
2452
#if !defined(HAVE_THREADS)
2453
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2454
#endif
2455
}
2456

    
2457
const OptionDef options[] = {
2458
    { "h", 0, {(void*)show_help}, "show help" },
2459
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2460
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2461
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2462
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2463
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2464
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2465
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2466
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2467
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2468
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2469
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2470
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2471
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2472
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2473
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2474
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2475
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2476
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2477
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2478
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2479
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2480
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2481
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2482
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2483
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2484
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2485
#ifdef CONFIG_RTSP_DEMUXER
2486
    { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2487
#endif
2488
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2489
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2490
    { NULL, },
2491
};
2492

    
2493
void show_help(void)
2494
{
2495
    printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2006 Fabrice Bellard, et al.\n"
2496
           "usage: ffplay [options] input_file\n"
2497
           "Simple media player\n");
2498
    printf("\n");
2499
    show_help_options(options, "Main options:\n",
2500
                      OPT_EXPERT, 0);
2501
    show_help_options(options, "\nAdvanced options:\n",
2502
                      OPT_EXPERT, OPT_EXPERT);
2503
    printf("\nWhile playing:\n"
2504
           "q, ESC              quit\n"
2505
           "f                   toggle full screen\n"
2506
           "p, SPC              pause\n"
2507
           "a                   cycle audio channel\n"
2508
           "v                   cycle video channel\n"
2509
           "t                   cycle subtitle channel\n"
2510
           "w                   show audio waves\n"
2511
           "left/right          seek backward/forward 10 seconds\n"
2512
           "down/up             seek backward/forward 1 minute\n"
2513
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2514
           );
2515
    exit(1);
2516
}
2517

    
2518
void parse_arg_file(const char *filename)
2519
{
2520
    if (!strcmp(filename, "-"))
2521
                    filename = "pipe:";
2522
    input_filename = filename;
2523
}
2524

    
2525
/* Called from the main */
2526
int main(int argc, char **argv)
2527
{
2528
    int flags;
2529

    
2530
    /* register all codecs, demux and protocols */
2531
    av_register_all();
2532

    
2533
    #ifdef CONFIG_OS2
2534
      MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2535

    
2536
      // Make stdout and stderr unbuffered
2537
      setbuf( stdout, NULL );
2538
      setbuf( stderr, NULL );
2539
    #endif
2540

    
2541
    parse_options(argc, argv, options);
2542

    
2543
    if (!input_filename)
2544
        show_help();
2545

    
2546
    if (display_disable) {
2547
        video_disable = 1;
2548
    }
2549
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2550
#if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2551
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2552
#endif
2553
    if (SDL_Init (flags)) {
2554
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2555
        exit(1);
2556
    }
2557

    
2558
    if (!display_disable) {
2559
#ifdef HAVE_SDL_VIDEO_SIZE
2560
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2561
        fs_screen_width = vi->current_w;
2562
        fs_screen_height = vi->current_h;
2563
#endif
2564
    }
2565

    
2566
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2567
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2568
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2569
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2570

    
2571
    av_init_packet(&flush_pkt);
2572
    flush_pkt.data= "FLUSH";
2573

    
2574
    cur_stream = stream_open(input_filename, file_iformat);
2575

    
2576
    event_loop();
2577

    
2578
    /* never returns */
2579

    
2580
    return 0;
2581
}