Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ dab57ec5

History | View | Annotate | Download (77 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "avformat.h"
23
#include "swscale.h"
24
#include "avstring.h"
25

    
26
#include "version.h"
27
#include "cmdutils.h"
28

    
29
#include <SDL.h>
30
#include <SDL_thread.h>
31

    
32
#ifdef __MINGW32__
33
#undef main /* We don't want SDL to override our main() */
34
#endif
35

    
36
#ifdef CONFIG_OS2
37
#define INCL_DOS
38
 #include <os2.h>
39
 #include <stdio.h>
40

    
41
 void MorphToPM()
42
 {
43
   PPIB pib;
44
   PTIB tib;
45

    
46
   DosGetInfoBlocks(&tib, &pib);
47

    
48
   // Change flag from VIO to PM:
49
   if (pib->pib_ultype==2) pib->pib_ultype = 3;
50
 }
51
#endif
52

    
53
#undef exit
54

    
55
//#define DEBUG_SYNC
56

    
57
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
58
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
59
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
60

    
61
/* SDL audio buffer size, in samples. Should be small to have precise
62
   A/V sync as SDL does not have hardware buffer fullness info. */
63
#define SDL_AUDIO_BUFFER_SIZE 1024
64

    
65
/* no AV sync correction is done if below the AV sync threshold */
66
#define AV_SYNC_THRESHOLD 0.01
67
/* no AV correction is done if too big error */
68
#define AV_NOSYNC_THRESHOLD 10.0
69

    
70
/* maximum audio speed change to get correct sync */
71
#define SAMPLE_CORRECTION_PERCENT_MAX 10
72

    
73
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
74
#define AUDIO_DIFF_AVG_NB   20
75

    
76
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
77
#define SAMPLE_ARRAY_SIZE (2*65536)
78

    
79
static int sws_flags = SWS_BICUBIC;
80

    
81
typedef struct PacketQueue {
82
    AVPacketList *first_pkt, *last_pkt;
83
    int nb_packets;
84
    int size;
85
    int abort_request;
86
    SDL_mutex *mutex;
87
    SDL_cond *cond;
88
} PacketQueue;
89

    
90
#define VIDEO_PICTURE_QUEUE_SIZE 1
91
#define SUBPICTURE_QUEUE_SIZE 4
92

    
93
typedef struct VideoPicture {
94
    double pts;                                  ///<presentation time stamp for this picture
95
    SDL_Overlay *bmp;
96
    int width, height; /* source height & width */
97
    int allocated;
98
} VideoPicture;
99

    
100
typedef struct SubPicture {
101
    double pts; /* presentation time stamp for this picture */
102
    AVSubtitle sub;
103
} SubPicture;
104

    
105
enum {
106
    AV_SYNC_AUDIO_MASTER, /* default choice */
107
    AV_SYNC_VIDEO_MASTER,
108
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
109
};
110

    
111
typedef struct VideoState {
112
    SDL_Thread *parse_tid;
113
    SDL_Thread *video_tid;
114
    AVInputFormat *iformat;
115
    int no_background;
116
    int abort_request;
117
    int paused;
118
    int last_paused;
119
    int seek_req;
120
    int seek_flags;
121
    int64_t seek_pos;
122
    AVFormatContext *ic;
123
    int dtg_active_format;
124

    
125
    int audio_stream;
126

    
127
    int av_sync_type;
128
    double external_clock; /* external clock base */
129
    int64_t external_clock_time;
130

    
131
    double audio_clock;
132
    double audio_diff_cum; /* used for AV difference average computation */
133
    double audio_diff_avg_coef;
134
    double audio_diff_threshold;
135
    int audio_diff_avg_count;
136
    AVStream *audio_st;
137
    PacketQueue audioq;
138
    int audio_hw_buf_size;
139
    /* samples output by the codec. we reserve more space for avsync
140
       compensation */
141
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
142
    unsigned int audio_buf_size; /* in bytes */
143
    int audio_buf_index; /* in bytes */
144
    AVPacket audio_pkt;
145
    uint8_t *audio_pkt_data;
146
    int audio_pkt_size;
147

    
148
    int show_audio; /* if true, display audio samples */
149
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
150
    int sample_array_index;
151
    int last_i_start;
152

    
153
    SDL_Thread *subtitle_tid;
154
    int subtitle_stream;
155
    int subtitle_stream_changed;
156
    AVStream *subtitle_st;
157
    PacketQueue subtitleq;
158
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
159
    int subpq_size, subpq_rindex, subpq_windex;
160
    SDL_mutex *subpq_mutex;
161
    SDL_cond *subpq_cond;
162

    
163
    double frame_timer;
164
    double frame_last_pts;
165
    double frame_last_delay;
166
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
167
    int video_stream;
168
    AVStream *video_st;
169
    PacketQueue videoq;
170
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
171
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
172
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
173
    int pictq_size, pictq_rindex, pictq_windex;
174
    SDL_mutex *pictq_mutex;
175
    SDL_cond *pictq_cond;
176

    
177
    //    QETimer *video_timer;
178
    char filename[1024];
179
    int width, height, xleft, ytop;
180
} VideoState;
181

    
182
void show_help(void);
183
static int audio_write_get_buf_size(VideoState *is);
184

    
185
/* options specified by the user */
186
static AVInputFormat *file_iformat;
187
static const char *input_filename;
188
static int fs_screen_width;
189
static int fs_screen_height;
190
static int screen_width = 0;
191
static int screen_height = 0;
192
static int frame_width = 0;
193
static int frame_height = 0;
194
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
195
static int audio_disable;
196
static int video_disable;
197
static int wanted_audio_stream= 0;
198
static int seek_by_bytes;
199
static int display_disable;
200
static int show_status;
201
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
202
static int64_t start_time = AV_NOPTS_VALUE;
203
static int debug = 0;
204
static int debug_mv = 0;
205
static int step = 0;
206
static int thread_count = 1;
207
static int workaround_bugs = 1;
208
static int fast = 0;
209
static int genpts = 0;
210
static int lowres = 0;
211
static int idct = FF_IDCT_AUTO;
212
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
213
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
214
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
215
static int error_resilience = FF_ER_CAREFUL;
216
static int error_concealment = 3;
217
static int decoder_reorder_pts= 0;
218

    
219
/* current context */
220
static int is_full_screen;
221
static VideoState *cur_stream;
222
static int64_t audio_callback_time;
223

    
224
AVPacket flush_pkt;
225

    
226
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
227
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
228
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
229

    
230
SDL_Surface *screen;
231

    
232
/* packet queue handling */
233
static void packet_queue_init(PacketQueue *q)
234
{
235
    memset(q, 0, sizeof(PacketQueue));
236
    q->mutex = SDL_CreateMutex();
237
    q->cond = SDL_CreateCond();
238
}
239

    
240
static void packet_queue_flush(PacketQueue *q)
241
{
242
    AVPacketList *pkt, *pkt1;
243

    
244
    SDL_LockMutex(q->mutex);
245
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
246
        pkt1 = pkt->next;
247
        av_free_packet(&pkt->pkt);
248
        av_freep(&pkt);
249
    }
250
    q->last_pkt = NULL;
251
    q->first_pkt = NULL;
252
    q->nb_packets = 0;
253
    q->size = 0;
254
    SDL_UnlockMutex(q->mutex);
255
}
256

    
257
static void packet_queue_end(PacketQueue *q)
258
{
259
    packet_queue_flush(q);
260
    SDL_DestroyMutex(q->mutex);
261
    SDL_DestroyCond(q->cond);
262
}
263

    
264
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
265
{
266
    AVPacketList *pkt1;
267

    
268
    /* duplicate the packet */
269
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
270
        return -1;
271

    
272
    pkt1 = av_malloc(sizeof(AVPacketList));
273
    if (!pkt1)
274
        return -1;
275
    pkt1->pkt = *pkt;
276
    pkt1->next = NULL;
277

    
278

    
279
    SDL_LockMutex(q->mutex);
280

    
281
    if (!q->last_pkt)
282

    
283
        q->first_pkt = pkt1;
284
    else
285
        q->last_pkt->next = pkt1;
286
    q->last_pkt = pkt1;
287
    q->nb_packets++;
288
    q->size += pkt1->pkt.size;
289
    /* XXX: should duplicate packet data in DV case */
290
    SDL_CondSignal(q->cond);
291

    
292
    SDL_UnlockMutex(q->mutex);
293
    return 0;
294
}
295

    
296
static void packet_queue_abort(PacketQueue *q)
297
{
298
    SDL_LockMutex(q->mutex);
299

    
300
    q->abort_request = 1;
301

    
302
    SDL_CondSignal(q->cond);
303

    
304
    SDL_UnlockMutex(q->mutex);
305
}
306

    
307
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
308
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
309
{
310
    AVPacketList *pkt1;
311
    int ret;
312

    
313
    SDL_LockMutex(q->mutex);
314

    
315
    for(;;) {
316
        if (q->abort_request) {
317
            ret = -1;
318
            break;
319
        }
320

    
321
        pkt1 = q->first_pkt;
322
        if (pkt1) {
323
            q->first_pkt = pkt1->next;
324
            if (!q->first_pkt)
325
                q->last_pkt = NULL;
326
            q->nb_packets--;
327
            q->size -= pkt1->pkt.size;
328
            *pkt = pkt1->pkt;
329
            av_free(pkt1);
330
            ret = 1;
331
            break;
332
        } else if (!block) {
333
            ret = 0;
334
            break;
335
        } else {
336
            SDL_CondWait(q->cond, q->mutex);
337
        }
338
    }
339
    SDL_UnlockMutex(q->mutex);
340
    return ret;
341
}
342

    
343
static inline void fill_rectangle(SDL_Surface *screen,
344
                                  int x, int y, int w, int h, int color)
345
{
346
    SDL_Rect rect;
347
    rect.x = x;
348
    rect.y = y;
349
    rect.w = w;
350
    rect.h = h;
351
    SDL_FillRect(screen, &rect, color);
352
}
353

    
354
#if 0
355
/* draw only the border of a rectangle */
356
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
357
{
358
    int w1, w2, h1, h2;
359

360
    /* fill the background */
361
    w1 = x;
362
    if (w1 < 0)
363
        w1 = 0;
364
    w2 = s->width - (x + w);
365
    if (w2 < 0)
366
        w2 = 0;
367
    h1 = y;
368
    if (h1 < 0)
369
        h1 = 0;
370
    h2 = s->height - (y + h);
371
    if (h2 < 0)
372
        h2 = 0;
373
    fill_rectangle(screen,
374
                   s->xleft, s->ytop,
375
                   w1, s->height,
376
                   color);
377
    fill_rectangle(screen,
378
                   s->xleft + s->width - w2, s->ytop,
379
                   w2, s->height,
380
                   color);
381
    fill_rectangle(screen,
382
                   s->xleft + w1, s->ytop,
383
                   s->width - w1 - w2, h1,
384
                   color);
385
    fill_rectangle(screen,
386
                   s->xleft + w1, s->ytop + s->height - h2,
387
                   s->width - w1 - w2, h2,
388
                   color);
389
}
390
#endif
391

    
392

    
393

    
394
#define SCALEBITS 10
395
#define ONE_HALF  (1 << (SCALEBITS - 1))
396
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
397

    
398
#define RGB_TO_Y_CCIR(r, g, b) \
399
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
400
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
401

    
402
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
403
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
404
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405

    
406
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
407
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
408
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
409

    
410
#define ALPHA_BLEND(a, oldp, newp, s)\
411
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
412

    
413
#define RGBA_IN(r, g, b, a, s)\
414
{\
415
    unsigned int v = ((const uint32_t *)(s))[0];\
416
    a = (v >> 24) & 0xff;\
417
    r = (v >> 16) & 0xff;\
418
    g = (v >> 8) & 0xff;\
419
    b = v & 0xff;\
420
}
421

    
422
#define YUVA_IN(y, u, v, a, s, pal)\
423
{\
424
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
425
    a = (val >> 24) & 0xff;\
426
    y = (val >> 16) & 0xff;\
427
    u = (val >> 8) & 0xff;\
428
    v = val & 0xff;\
429
}
430

    
431
#define YUVA_OUT(d, y, u, v, a)\
432
{\
433
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
434
}
435

    
436

    
437
#define BPP 1
438

    
439
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
440
{
441
    int wrap, wrap3, width2, skip2;
442
    int y, u, v, a, u1, v1, a1, w, h;
443
    uint8_t *lum, *cb, *cr;
444
    const uint8_t *p;
445
    const uint32_t *pal;
446

    
447
    lum = dst->data[0] + rect->y * dst->linesize[0];
448
    cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
449
    cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
450

    
451
    width2 = (rect->w + 1) >> 1;
452
    skip2 = rect->x >> 1;
453
    wrap = dst->linesize[0];
454
    wrap3 = rect->linesize;
455
    p = rect->bitmap;
456
    pal = rect->rgba_palette;  /* Now in YCrCb! */
457

    
458
    if (rect->y & 1) {
459
        lum += rect->x;
460
        cb += skip2;
461
        cr += skip2;
462

    
463
        if (rect->x & 1) {
464
            YUVA_IN(y, u, v, a, p, pal);
465
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468
            cb++;
469
            cr++;
470
            lum++;
471
            p += BPP;
472
        }
473
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
474
            YUVA_IN(y, u, v, a, p, pal);
475
            u1 = u;
476
            v1 = v;
477
            a1 = a;
478
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479

    
480
            YUVA_IN(y, u, v, a, p + BPP, pal);
481
            u1 += u;
482
            v1 += v;
483
            a1 += a;
484
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487
            cb++;
488
            cr++;
489
            p += 2 * BPP;
490
            lum += 2;
491
        }
492
        if (w) {
493
            YUVA_IN(y, u, v, a, p, pal);
494
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
497
        }
498
        p += wrap3 + (wrap3 - rect->w * BPP);
499
        lum += wrap + (wrap - rect->w - rect->x);
500
        cb += dst->linesize[1] - width2 - skip2;
501
        cr += dst->linesize[2] - width2 - skip2;
502
    }
503
    for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
504
        lum += rect->x;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (rect->x & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            u1 = u;
511
            v1 = v;
512
            a1 = a;
513
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514
            p += wrap3;
515
            lum += wrap;
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 += u;
518
            v1 += v;
519
            a1 += a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523
            cb++;
524
            cr++;
525
            p += -wrap3 + BPP;
526
            lum += -wrap + 1;
527
        }
528
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
529
            YUVA_IN(y, u, v, a, p, pal);
530
            u1 = u;
531
            v1 = v;
532
            a1 = a;
533
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534

    
535
            YUVA_IN(y, u, v, a, p, pal);
536
            u1 += u;
537
            v1 += v;
538
            a1 += a;
539
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540
            p += wrap3;
541
            lum += wrap;
542

    
543
            YUVA_IN(y, u, v, a, p, pal);
544
            u1 += u;
545
            v1 += v;
546
            a1 += a;
547
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548

    
549
            YUVA_IN(y, u, v, a, p, pal);
550
            u1 += u;
551
            v1 += v;
552
            a1 += a;
553
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554

    
555
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557

    
558
            cb++;
559
            cr++;
560
            p += -wrap3 + 2 * BPP;
561
            lum += -wrap + 2;
562
        }
563
        if (w) {
564
            YUVA_IN(y, u, v, a, p, pal);
565
            u1 = u;
566
            v1 = v;
567
            a1 = a;
568
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569
            p += wrap3;
570
            lum += wrap;
571
            YUVA_IN(y, u, v, a, p, pal);
572
            u1 += u;
573
            v1 += v;
574
            a1 += a;
575
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578
            cb++;
579
            cr++;
580
            p += -wrap3 + BPP;
581
            lum += -wrap + 1;
582
        }
583
        p += wrap3 + (wrap3 - rect->w * BPP);
584
        lum += wrap + (wrap - rect->w - rect->x);
585
        cb += dst->linesize[1] - width2 - skip2;
586
        cr += dst->linesize[2] - width2 - skip2;
587
    }
588
    /* handle odd height */
589
    if (h) {
590
        lum += rect->x;
591
        cb += skip2;
592
        cr += skip2;
593

    
594
        if (rect->x & 1) {
595
            YUVA_IN(y, u, v, a, p, pal);
596
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599
            cb++;
600
            cr++;
601
            lum++;
602
            p += BPP;
603
        }
604
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
605
            YUVA_IN(y, u, v, a, p, pal);
606
            u1 = u;
607
            v1 = v;
608
            a1 = a;
609
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610

    
611
            YUVA_IN(y, u, v, a, p + BPP, pal);
612
            u1 += u;
613
            v1 += v;
614
            a1 += a;
615
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618
            cb++;
619
            cr++;
620
            p += 2 * BPP;
621
            lum += 2;
622
        }
623
        if (w) {
624
            YUVA_IN(y, u, v, a, p, pal);
625
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628
        }
629
    }
630
}
631

    
632
static void free_subpicture(SubPicture *sp)
633
{
634
    int i;
635

    
636
    for (i = 0; i < sp->sub.num_rects; i++)
637
    {
638
        av_free(sp->sub.rects[i].bitmap);
639
        av_free(sp->sub.rects[i].rgba_palette);
640
    }
641

    
642
    av_free(sp->sub.rects);
643

    
644
    memset(&sp->sub, 0, sizeof(AVSubtitle));
645
}
646

    
647
static void video_image_display(VideoState *is)
648
{
649
    VideoPicture *vp;
650
    SubPicture *sp;
651
    AVPicture pict;
652
    float aspect_ratio;
653
    int width, height, x, y;
654
    SDL_Rect rect;
655
    int i;
656

    
657
    vp = &is->pictq[is->pictq_rindex];
658
    if (vp->bmp) {
659
        /* XXX: use variable in the frame */
660
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
661
            aspect_ratio = 0;
662
        else
663
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
664
                * is->video_st->codec->width / is->video_st->codec->height;;
665
        if (aspect_ratio <= 0.0)
666
            aspect_ratio = (float)is->video_st->codec->width /
667
                (float)is->video_st->codec->height;
668
        /* if an active format is indicated, then it overrides the
669
           mpeg format */
670
#if 0
671
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
672
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
673
            printf("dtg_active_format=%d\n", is->dtg_active_format);
674
        }
675
#endif
676
#if 0
677
        switch(is->video_st->codec->dtg_active_format) {
678
        case FF_DTG_AFD_SAME:
679
        default:
680
            /* nothing to do */
681
            break;
682
        case FF_DTG_AFD_4_3:
683
            aspect_ratio = 4.0 / 3.0;
684
            break;
685
        case FF_DTG_AFD_16_9:
686
            aspect_ratio = 16.0 / 9.0;
687
            break;
688
        case FF_DTG_AFD_14_9:
689
            aspect_ratio = 14.0 / 9.0;
690
            break;
691
        case FF_DTG_AFD_4_3_SP_14_9:
692
            aspect_ratio = 14.0 / 9.0;
693
            break;
694
        case FF_DTG_AFD_16_9_SP_14_9:
695
            aspect_ratio = 14.0 / 9.0;
696
            break;
697
        case FF_DTG_AFD_SP_4_3:
698
            aspect_ratio = 4.0 / 3.0;
699
            break;
700
        }
701
#endif
702

    
703
        if (is->subtitle_st)
704
        {
705
            if (is->subpq_size > 0)
706
            {
707
                sp = &is->subpq[is->subpq_rindex];
708

    
709
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
710
                {
711
                    SDL_LockYUVOverlay (vp->bmp);
712

    
713
                    pict.data[0] = vp->bmp->pixels[0];
714
                    pict.data[1] = vp->bmp->pixels[2];
715
                    pict.data[2] = vp->bmp->pixels[1];
716

    
717
                    pict.linesize[0] = vp->bmp->pitches[0];
718
                    pict.linesize[1] = vp->bmp->pitches[2];
719
                    pict.linesize[2] = vp->bmp->pitches[1];
720

    
721
                    for (i = 0; i < sp->sub.num_rects; i++)
722
                        blend_subrect(&pict, &sp->sub.rects[i]);
723

    
724
                    SDL_UnlockYUVOverlay (vp->bmp);
725
                }
726
            }
727
        }
728

    
729

    
730
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
731
        height = is->height;
732
        width = ((int)rint(height * aspect_ratio)) & -3;
733
        if (width > is->width) {
734
            width = is->width;
735
            height = ((int)rint(width / aspect_ratio)) & -3;
736
        }
737
        x = (is->width - width) / 2;
738
        y = (is->height - height) / 2;
739
        if (!is->no_background) {
740
            /* fill the background */
741
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
742
        } else {
743
            is->no_background = 0;
744
        }
745
        rect.x = is->xleft + x;
746
        rect.y = is->ytop  + y;
747
        rect.w = width;
748
        rect.h = height;
749
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
750
    } else {
751
#if 0
752
        fill_rectangle(screen,
753
                       is->xleft, is->ytop, is->width, is->height,
754
                       QERGB(0x00, 0x00, 0x00));
755
#endif
756
    }
757
}
758

    
759
static inline int compute_mod(int a, int b)
760
{
761
    a = a % b;
762
    if (a >= 0)
763
        return a;
764
    else
765
        return a + b;
766
}
767

    
768
static void video_audio_display(VideoState *s)
769
{
770
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
771
    int ch, channels, h, h2, bgcolor, fgcolor;
772
    int16_t time_diff;
773

    
774
    /* compute display index : center on currently output samples */
775
    channels = s->audio_st->codec->channels;
776
    nb_display_channels = channels;
777
    if (!s->paused) {
778
        n = 2 * channels;
779
        delay = audio_write_get_buf_size(s);
780
        delay /= n;
781

    
782
        /* to be more precise, we take into account the time spent since
783
           the last buffer computation */
784
        if (audio_callback_time) {
785
            time_diff = av_gettime() - audio_callback_time;
786
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
787
        }
788

    
789
        delay -= s->width / 2;
790
        if (delay < s->width)
791
            delay = s->width;
792

    
793
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
794

    
795
        h= INT_MIN;
796
        for(i=0; i<1000; i+=channels){
797
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
798
            int a= s->sample_array[idx];
799
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
800
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
801
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
802
            int score= a-d;
803
            if(h<score && (b^c)<0){
804
                h= score;
805
                i_start= idx;
806
            }
807
        }
808

    
809
        s->last_i_start = i_start;
810
    } else {
811
        i_start = s->last_i_start;
812
    }
813

    
814
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
815
    fill_rectangle(screen,
816
                   s->xleft, s->ytop, s->width, s->height,
817
                   bgcolor);
818

    
819
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
820

    
821
    /* total height for one channel */
822
    h = s->height / nb_display_channels;
823
    /* graph height / 2 */
824
    h2 = (h * 9) / 20;
825
    for(ch = 0;ch < nb_display_channels; ch++) {
826
        i = i_start + ch;
827
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
828
        for(x = 0; x < s->width; x++) {
829
            y = (s->sample_array[i] * h2) >> 15;
830
            if (y < 0) {
831
                y = -y;
832
                ys = y1 - y;
833
            } else {
834
                ys = y1;
835
            }
836
            fill_rectangle(screen,
837
                           s->xleft + x, ys, 1, y,
838
                           fgcolor);
839
            i += channels;
840
            if (i >= SAMPLE_ARRAY_SIZE)
841
                i -= SAMPLE_ARRAY_SIZE;
842
        }
843
    }
844

    
845
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
846

    
847
    for(ch = 1;ch < nb_display_channels; ch++) {
848
        y = s->ytop + ch * h;
849
        fill_rectangle(screen,
850
                       s->xleft, y, s->width, 1,
851
                       fgcolor);
852
    }
853
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
854
}
855

    
856
static int video_open(VideoState *is){
857
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
858
    int w,h;
859

    
860
    if(is_full_screen) flags |= SDL_FULLSCREEN;
861
    else               flags |= SDL_RESIZABLE;
862

    
863
    if (is_full_screen && fs_screen_width) {
864
        w = fs_screen_width;
865
        h = fs_screen_height;
866
    } else if(!is_full_screen && screen_width){
867
        w = screen_width;
868
        h = screen_height;
869
    }else if (is->video_st && is->video_st->codec->width){
870
        w = is->video_st->codec->width;
871
        h = is->video_st->codec->height;
872
    } else {
873
        w = 640;
874
        h = 480;
875
    }
876
#ifndef CONFIG_DARWIN
877
    screen = SDL_SetVideoMode(w, h, 0, flags);
878
#else
879
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
880
    screen = SDL_SetVideoMode(w, h, 24, flags);
881
#endif
882
    if (!screen) {
883
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
884
        return -1;
885
    }
886
    SDL_WM_SetCaption("FFplay", "FFplay");
887

    
888
    is->width = screen->w;
889
    is->height = screen->h;
890

    
891
    return 0;
892
}
893

    
894
/* display the current picture, if any */
895
static void video_display(VideoState *is)
896
{
897
    if(!screen)
898
        video_open(cur_stream);
899
    if (is->audio_st && is->show_audio)
900
        video_audio_display(is);
901
    else if (is->video_st)
902
        video_image_display(is);
903
}
904

    
905
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
906
{
907
    SDL_Event event;
908
    event.type = FF_REFRESH_EVENT;
909
    event.user.data1 = opaque;
910
    SDL_PushEvent(&event);
911
    return 0; /* 0 means stop timer */
912
}
913

    
914
/* schedule a video refresh in 'delay' ms */
915
static void schedule_refresh(VideoState *is, int delay)
916
{
917
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
918
}
919

    
920
/* get the current audio clock value */
921
static double get_audio_clock(VideoState *is)
922
{
923
    double pts;
924
    int hw_buf_size, bytes_per_sec;
925
    pts = is->audio_clock;
926
    hw_buf_size = audio_write_get_buf_size(is);
927
    bytes_per_sec = 0;
928
    if (is->audio_st) {
929
        bytes_per_sec = is->audio_st->codec->sample_rate *
930
            2 * is->audio_st->codec->channels;
931
    }
932
    if (bytes_per_sec)
933
        pts -= (double)hw_buf_size / bytes_per_sec;
934
    return pts;
935
}
936

    
937
/* get the current video clock value */
938
static double get_video_clock(VideoState *is)
939
{
940
    double delta;
941
    if (is->paused) {
942
        delta = 0;
943
    } else {
944
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
945
    }
946
    return is->video_current_pts + delta;
947
}
948

    
949
/* get the current external clock value */
950
static double get_external_clock(VideoState *is)
951
{
952
    int64_t ti;
953
    ti = av_gettime();
954
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
955
}
956

    
957
/* get the current master clock value */
958
static double get_master_clock(VideoState *is)
959
{
960
    double val;
961

    
962
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
963
        if (is->video_st)
964
            val = get_video_clock(is);
965
        else
966
            val = get_audio_clock(is);
967
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
968
        if (is->audio_st)
969
            val = get_audio_clock(is);
970
        else
971
            val = get_video_clock(is);
972
    } else {
973
        val = get_external_clock(is);
974
    }
975
    return val;
976
}
977

    
978
/* seek in the stream */
979
static void stream_seek(VideoState *is, int64_t pos, int rel)
980
{
981
    if (!is->seek_req) {
982
        is->seek_pos = pos;
983
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
984
        if (seek_by_bytes)
985
            is->seek_flags |= AVSEEK_FLAG_BYTE;
986
        is->seek_req = 1;
987
    }
988
}
989

    
990
/* pause or resume the video */
991
static void stream_pause(VideoState *is)
992
{
993
    is->paused = !is->paused;
994
    if (!is->paused) {
995
        is->video_current_pts = get_video_clock(is);
996
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
997
    }
998
}
999

    
1000
/* called to display each frame */
1001
static void video_refresh_timer(void *opaque)
1002
{
1003
    VideoState *is = opaque;
1004
    VideoPicture *vp;
1005
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1006

    
1007
    SubPicture *sp, *sp2;
1008

    
1009
    if (is->video_st) {
1010
        if (is->pictq_size == 0) {
1011
            /* if no picture, need to wait */
1012
            schedule_refresh(is, 1);
1013
        } else {
1014
            /* dequeue the picture */
1015
            vp = &is->pictq[is->pictq_rindex];
1016

    
1017
            /* update current video pts */
1018
            is->video_current_pts = vp->pts;
1019
            is->video_current_pts_time = av_gettime();
1020

    
1021
            /* compute nominal delay */
1022
            delay = vp->pts - is->frame_last_pts;
1023
            if (delay <= 0 || delay >= 1.0) {
1024
                /* if incorrect delay, use previous one */
1025
                delay = is->frame_last_delay;
1026
            }
1027
            is->frame_last_delay = delay;
1028
            is->frame_last_pts = vp->pts;
1029

    
1030
            /* update delay to follow master synchronisation source */
1031
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1032
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1033
                /* if video is slave, we try to correct big delays by
1034
                   duplicating or deleting a frame */
1035
                ref_clock = get_master_clock(is);
1036
                diff = vp->pts - ref_clock;
1037

    
1038
                /* skip or repeat frame. We take into account the
1039
                   delay to compute the threshold. I still don't know
1040
                   if it is the best guess */
1041
                sync_threshold = AV_SYNC_THRESHOLD;
1042
                if (delay > sync_threshold)
1043
                    sync_threshold = delay;
1044
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1045
                    if (diff <= -sync_threshold)
1046
                        delay = 0;
1047
                    else if (diff >= sync_threshold)
1048
                        delay = 2 * delay;
1049
                }
1050
            }
1051

    
1052
            is->frame_timer += delay;
1053
            /* compute the REAL delay (we need to do that to avoid
1054
               long term errors */
1055
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1056
            if (actual_delay < 0.010) {
1057
                /* XXX: should skip picture */
1058
                actual_delay = 0.010;
1059
            }
1060
            /* launch timer for next picture */
1061
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1062

    
1063
#if defined(DEBUG_SYNC)
1064
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1065
                   delay, actual_delay, vp->pts, -diff);
1066
#endif
1067

    
1068
            if(is->subtitle_st) {
1069
                if (is->subtitle_stream_changed) {
1070
                    SDL_LockMutex(is->subpq_mutex);
1071

    
1072
                    while (is->subpq_size) {
1073
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1074

    
1075
                        /* update queue size and signal for next picture */
1076
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1077
                            is->subpq_rindex = 0;
1078

    
1079
                        is->subpq_size--;
1080
                    }
1081
                    is->subtitle_stream_changed = 0;
1082

    
1083
                    SDL_CondSignal(is->subpq_cond);
1084
                    SDL_UnlockMutex(is->subpq_mutex);
1085
                } else {
1086
                    if (is->subpq_size > 0) {
1087
                        sp = &is->subpq[is->subpq_rindex];
1088

    
1089
                        if (is->subpq_size > 1)
1090
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1091
                        else
1092
                            sp2 = NULL;
1093

    
1094
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1095
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1096
                        {
1097
                            free_subpicture(sp);
1098

    
1099
                            /* update queue size and signal for next picture */
1100
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1101
                                is->subpq_rindex = 0;
1102

    
1103
                            SDL_LockMutex(is->subpq_mutex);
1104
                            is->subpq_size--;
1105
                            SDL_CondSignal(is->subpq_cond);
1106
                            SDL_UnlockMutex(is->subpq_mutex);
1107
                        }
1108
                    }
1109
                }
1110
            }
1111

    
1112
            /* display picture */
1113
            video_display(is);
1114

    
1115
            /* update queue size and signal for next picture */
1116
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1117
                is->pictq_rindex = 0;
1118

    
1119
            SDL_LockMutex(is->pictq_mutex);
1120
            is->pictq_size--;
1121
            SDL_CondSignal(is->pictq_cond);
1122
            SDL_UnlockMutex(is->pictq_mutex);
1123
        }
1124
    } else if (is->audio_st) {
1125
        /* draw the next audio frame */
1126

    
1127
        schedule_refresh(is, 40);
1128

    
1129
        /* if only audio stream, then display the audio bars (better
1130
           than nothing, just to test the implementation */
1131

    
1132
        /* display picture */
1133
        video_display(is);
1134
    } else {
1135
        schedule_refresh(is, 100);
1136
    }
1137
    if (show_status) {
1138
        static int64_t last_time;
1139
        int64_t cur_time;
1140
        int aqsize, vqsize, sqsize;
1141
        double av_diff;
1142

    
1143
        cur_time = av_gettime();
1144
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1145
            aqsize = 0;
1146
            vqsize = 0;
1147
            sqsize = 0;
1148
            if (is->audio_st)
1149
                aqsize = is->audioq.size;
1150
            if (is->video_st)
1151
                vqsize = is->videoq.size;
1152
            if (is->subtitle_st)
1153
                sqsize = is->subtitleq.size;
1154
            av_diff = 0;
1155
            if (is->audio_st && is->video_st)
1156
                av_diff = get_audio_clock(is) - get_video_clock(is);
1157
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1158
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1159
            fflush(stdout);
1160
            last_time = cur_time;
1161
        }
1162
    }
1163
}
1164

    
1165
/* allocate a picture (needs to do that in main thread to avoid
1166
   potential locking problems */
1167
static void alloc_picture(void *opaque)
1168
{
1169
    VideoState *is = opaque;
1170
    VideoPicture *vp;
1171

    
1172
    vp = &is->pictq[is->pictq_windex];
1173

    
1174
    if (vp->bmp)
1175
        SDL_FreeYUVOverlay(vp->bmp);
1176

    
1177
#if 0
1178
    /* XXX: use generic function */
1179
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1180
    switch(is->video_st->codec->pix_fmt) {
1181
    case PIX_FMT_YUV420P:
1182
    case PIX_FMT_YUV422P:
1183
    case PIX_FMT_YUV444P:
1184
    case PIX_FMT_YUYV422:
1185
    case PIX_FMT_YUV410P:
1186
    case PIX_FMT_YUV411P:
1187
        is_yuv = 1;
1188
        break;
1189
    default:
1190
        is_yuv = 0;
1191
        break;
1192
    }
1193
#endif
1194
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1195
                                   is->video_st->codec->height,
1196
                                   SDL_YV12_OVERLAY,
1197
                                   screen);
1198
    vp->width = is->video_st->codec->width;
1199
    vp->height = is->video_st->codec->height;
1200

    
1201
    SDL_LockMutex(is->pictq_mutex);
1202
    vp->allocated = 1;
1203
    SDL_CondSignal(is->pictq_cond);
1204
    SDL_UnlockMutex(is->pictq_mutex);
1205
}
1206

    
1207
/**
1208
 *
1209
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1210
 */
1211
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1212
{
1213
    VideoPicture *vp;
1214
    int dst_pix_fmt;
1215
    AVPicture pict;
1216
    static struct SwsContext *img_convert_ctx;
1217

    
1218
    /* wait until we have space to put a new picture */
1219
    SDL_LockMutex(is->pictq_mutex);
1220
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1221
           !is->videoq.abort_request) {
1222
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1223
    }
1224
    SDL_UnlockMutex(is->pictq_mutex);
1225

    
1226
    if (is->videoq.abort_request)
1227
        return -1;
1228

    
1229
    vp = &is->pictq[is->pictq_windex];
1230

    
1231
    /* alloc or resize hardware picture buffer */
1232
    if (!vp->bmp ||
1233
        vp->width != is->video_st->codec->width ||
1234
        vp->height != is->video_st->codec->height) {
1235
        SDL_Event event;
1236

    
1237
        vp->allocated = 0;
1238

    
1239
        /* the allocation must be done in the main thread to avoid
1240
           locking problems */
1241
        event.type = FF_ALLOC_EVENT;
1242
        event.user.data1 = is;
1243
        SDL_PushEvent(&event);
1244

    
1245
        /* wait until the picture is allocated */
1246
        SDL_LockMutex(is->pictq_mutex);
1247
        while (!vp->allocated && !is->videoq.abort_request) {
1248
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1249
        }
1250
        SDL_UnlockMutex(is->pictq_mutex);
1251

    
1252
        if (is->videoq.abort_request)
1253
            return -1;
1254
    }
1255

    
1256
    /* if the frame is not skipped, then display it */
1257
    if (vp->bmp) {
1258
        /* get a pointer on the bitmap */
1259
        SDL_LockYUVOverlay (vp->bmp);
1260

    
1261
        dst_pix_fmt = PIX_FMT_YUV420P;
1262
        pict.data[0] = vp->bmp->pixels[0];
1263
        pict.data[1] = vp->bmp->pixels[2];
1264
        pict.data[2] = vp->bmp->pixels[1];
1265

    
1266
        pict.linesize[0] = vp->bmp->pitches[0];
1267
        pict.linesize[1] = vp->bmp->pitches[2];
1268
        pict.linesize[2] = vp->bmp->pitches[1];
1269
        if (img_convert_ctx == NULL) {
1270
            img_convert_ctx = sws_getContext(is->video_st->codec->width,
1271
                    is->video_st->codec->height, is->video_st->codec->pix_fmt,
1272
                    is->video_st->codec->width, is->video_st->codec->height,
1273
                    dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1274
            if (img_convert_ctx == NULL) {
1275
                fprintf(stderr, "Cannot initialize the conversion context\n");
1276
                exit(1);
1277
            }
1278
        }
1279
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1280
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1281
        /* update the bitmap content */
1282
        SDL_UnlockYUVOverlay(vp->bmp);
1283

    
1284
        vp->pts = pts;
1285

    
1286
        /* now we can update the picture count */
1287
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1288
            is->pictq_windex = 0;
1289
        SDL_LockMutex(is->pictq_mutex);
1290
        is->pictq_size++;
1291
        SDL_UnlockMutex(is->pictq_mutex);
1292
    }
1293
    return 0;
1294
}
1295

    
1296
/**
1297
 * compute the exact PTS for the picture if it is omitted in the stream
1298
 * @param pts1 the dts of the pkt / pts of the frame
1299
 */
1300
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1301
{
1302
    double frame_delay, pts;
1303

    
1304
    pts = pts1;
1305

    
1306
    if (pts != 0) {
1307
        /* update video clock with pts, if present */
1308
        is->video_clock = pts;
1309
    } else {
1310
        pts = is->video_clock;
1311
    }
1312
    /* update video clock for next frame */
1313
    frame_delay = av_q2d(is->video_st->codec->time_base);
1314
    /* for MPEG2, the frame can be repeated, so we update the
1315
       clock accordingly */
1316
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1317
    is->video_clock += frame_delay;
1318

    
1319
#if defined(DEBUG_SYNC) && 0
1320
    {
1321
        int ftype;
1322
        if (src_frame->pict_type == FF_B_TYPE)
1323
            ftype = 'B';
1324
        else if (src_frame->pict_type == FF_I_TYPE)
1325
            ftype = 'I';
1326
        else
1327
            ftype = 'P';
1328
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1329
               ftype, pts, pts1);
1330
    }
1331
#endif
1332
    return queue_picture(is, src_frame, pts);
1333
}
1334

    
1335
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1336

    
1337
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1338
    int ret= avcodec_default_get_buffer(c, pic);
1339
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1340
    *pts= global_video_pkt_pts;
1341
    pic->opaque= pts;
1342
    return ret;
1343
}
1344

    
1345
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1346
    if(pic) av_freep(&pic->opaque);
1347
    avcodec_default_release_buffer(c, pic);
1348
}
1349

    
1350
static int video_thread(void *arg)
1351
{
1352
    VideoState *is = arg;
1353
    AVPacket pkt1, *pkt = &pkt1;
1354
    int len1, got_picture;
1355
    AVFrame *frame= avcodec_alloc_frame();
1356
    double pts;
1357

    
1358
    for(;;) {
1359
        while (is->paused && !is->videoq.abort_request) {
1360
            SDL_Delay(10);
1361
        }
1362
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1363
            break;
1364

    
1365
        if(pkt->data == flush_pkt.data){
1366
            avcodec_flush_buffers(is->video_st->codec);
1367
            continue;
1368
        }
1369

    
1370
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1371
           this packet, if any */
1372
        global_video_pkt_pts= pkt->pts;
1373
        len1 = avcodec_decode_video(is->video_st->codec,
1374
                                    frame, &got_picture,
1375
                                    pkt->data, pkt->size);
1376

    
1377
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1378
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1379
            pts= *(uint64_t*)frame->opaque;
1380
        else if(pkt->dts != AV_NOPTS_VALUE)
1381
            pts= pkt->dts;
1382
        else
1383
            pts= 0;
1384
        pts *= av_q2d(is->video_st->time_base);
1385

    
1386
//            if (len1 < 0)
1387
//                break;
1388
        if (got_picture) {
1389
            if (output_picture2(is, frame, pts) < 0)
1390
                goto the_end;
1391
        }
1392
        av_free_packet(pkt);
1393
        if (step)
1394
            if (cur_stream)
1395
                stream_pause(cur_stream);
1396
    }
1397
 the_end:
1398
    av_free(frame);
1399
    return 0;
1400
}
1401

    
1402
static int subtitle_thread(void *arg)
1403
{
1404
    VideoState *is = arg;
1405
    SubPicture *sp;
1406
    AVPacket pkt1, *pkt = &pkt1;
1407
    int len1, got_subtitle;
1408
    double pts;
1409
    int i, j;
1410
    int r, g, b, y, u, v, a;
1411

    
1412
    for(;;) {
1413
        while (is->paused && !is->subtitleq.abort_request) {
1414
            SDL_Delay(10);
1415
        }
1416
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1417
            break;
1418

    
1419
        if(pkt->data == flush_pkt.data){
1420
            avcodec_flush_buffers(is->subtitle_st->codec);
1421
            continue;
1422
        }
1423
        SDL_LockMutex(is->subpq_mutex);
1424
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1425
               !is->subtitleq.abort_request) {
1426
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1427
        }
1428
        SDL_UnlockMutex(is->subpq_mutex);
1429

    
1430
        if (is->subtitleq.abort_request)
1431
            goto the_end;
1432

    
1433
        sp = &is->subpq[is->subpq_windex];
1434

    
1435
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1436
           this packet, if any */
1437
        pts = 0;
1438
        if (pkt->pts != AV_NOPTS_VALUE)
1439
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1440

    
1441
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1442
                                    &sp->sub, &got_subtitle,
1443
                                    pkt->data, pkt->size);
1444
//            if (len1 < 0)
1445
//                break;
1446
        if (got_subtitle && sp->sub.format == 0) {
1447
            sp->pts = pts;
1448

    
1449
            for (i = 0; i < sp->sub.num_rects; i++)
1450
            {
1451
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1452
                {
1453
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1454
                    y = RGB_TO_Y_CCIR(r, g, b);
1455
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1456
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1457
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1458
                }
1459
            }
1460

    
1461
            /* now we can update the picture count */
1462
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1463
                is->subpq_windex = 0;
1464
            SDL_LockMutex(is->subpq_mutex);
1465
            is->subpq_size++;
1466
            SDL_UnlockMutex(is->subpq_mutex);
1467
        }
1468
        av_free_packet(pkt);
1469
//        if (step)
1470
//            if (cur_stream)
1471
//                stream_pause(cur_stream);
1472
    }
1473
 the_end:
1474
    return 0;
1475
}
1476

    
1477
/* copy samples for viewing in editor window */
1478
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1479
{
1480
    int size, len, channels;
1481

    
1482
    channels = is->audio_st->codec->channels;
1483

    
1484
    size = samples_size / sizeof(short);
1485
    while (size > 0) {
1486
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1487
        if (len > size)
1488
            len = size;
1489
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1490
        samples += len;
1491
        is->sample_array_index += len;
1492
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1493
            is->sample_array_index = 0;
1494
        size -= len;
1495
    }
1496
}
1497

    
1498
/* return the new audio buffer size (samples can be added or deleted
1499
   to get better sync if video or external master clock) */
1500
static int synchronize_audio(VideoState *is, short *samples,
1501
                             int samples_size1, double pts)
1502
{
1503
    int n, samples_size;
1504
    double ref_clock;
1505

    
1506
    n = 2 * is->audio_st->codec->channels;
1507
    samples_size = samples_size1;
1508

    
1509
    /* if not master, then we try to remove or add samples to correct the clock */
1510
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1511
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1512
        double diff, avg_diff;
1513
        int wanted_size, min_size, max_size, nb_samples;
1514

    
1515
        ref_clock = get_master_clock(is);
1516
        diff = get_audio_clock(is) - ref_clock;
1517

    
1518
        if (diff < AV_NOSYNC_THRESHOLD) {
1519
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1520
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1521
                /* not enough measures to have a correct estimate */
1522
                is->audio_diff_avg_count++;
1523
            } else {
1524
                /* estimate the A-V difference */
1525
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1526

    
1527
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1528
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1529
                    nb_samples = samples_size / n;
1530

    
1531
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1532
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1533
                    if (wanted_size < min_size)
1534
                        wanted_size = min_size;
1535
                    else if (wanted_size > max_size)
1536
                        wanted_size = max_size;
1537

    
1538
                    /* add or remove samples to correction the synchro */
1539
                    if (wanted_size < samples_size) {
1540
                        /* remove samples */
1541
                        samples_size = wanted_size;
1542
                    } else if (wanted_size > samples_size) {
1543
                        uint8_t *samples_end, *q;
1544
                        int nb;
1545

    
1546
                        /* add samples */
1547
                        nb = (samples_size - wanted_size);
1548
                        samples_end = (uint8_t *)samples + samples_size - n;
1549
                        q = samples_end + n;
1550
                        while (nb > 0) {
1551
                            memcpy(q, samples_end, n);
1552
                            q += n;
1553
                            nb -= n;
1554
                        }
1555
                        samples_size = wanted_size;
1556
                    }
1557
                }
1558
#if 0
1559
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1560
                       diff, avg_diff, samples_size - samples_size1,
1561
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1562
#endif
1563
            }
1564
        } else {
1565
            /* too big difference : may be initial PTS errors, so
1566
               reset A-V filter */
1567
            is->audio_diff_avg_count = 0;
1568
            is->audio_diff_cum = 0;
1569
        }
1570
    }
1571

    
1572
    return samples_size;
1573
}
1574

    
1575
/* decode one audio frame and returns its uncompressed size */
1576
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1577
{
1578
    AVPacket *pkt = &is->audio_pkt;
1579
    int n, len1, data_size;
1580
    double pts;
1581

    
1582
    for(;;) {
1583
        /* NOTE: the audio packet can contain several frames */
1584
        while (is->audio_pkt_size > 0) {
1585
            data_size = buf_size;
1586
            len1 = avcodec_decode_audio2(is->audio_st->codec,
1587
                                        (int16_t *)audio_buf, &data_size,
1588
                                        is->audio_pkt_data, is->audio_pkt_size);
1589
            if (len1 < 0) {
1590
                /* if error, we skip the frame */
1591
                is->audio_pkt_size = 0;
1592
                break;
1593
            }
1594

    
1595
            is->audio_pkt_data += len1;
1596
            is->audio_pkt_size -= len1;
1597
            if (data_size <= 0)
1598
                continue;
1599
            /* if no pts, then compute it */
1600
            pts = is->audio_clock;
1601
            *pts_ptr = pts;
1602
            n = 2 * is->audio_st->codec->channels;
1603
            is->audio_clock += (double)data_size /
1604
                (double)(n * is->audio_st->codec->sample_rate);
1605
#if defined(DEBUG_SYNC)
1606
            {
1607
                static double last_clock;
1608
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1609
                       is->audio_clock - last_clock,
1610
                       is->audio_clock, pts);
1611
                last_clock = is->audio_clock;
1612
            }
1613
#endif
1614
            return data_size;
1615
        }
1616

    
1617
        /* free the current packet */
1618
        if (pkt->data)
1619
            av_free_packet(pkt);
1620

    
1621
        if (is->paused || is->audioq.abort_request) {
1622
            return -1;
1623
        }
1624

    
1625
        /* read next packet */
1626
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1627
            return -1;
1628
        if(pkt->data == flush_pkt.data){
1629
            avcodec_flush_buffers(is->audio_st->codec);
1630
            continue;
1631
        }
1632

    
1633
        is->audio_pkt_data = pkt->data;
1634
        is->audio_pkt_size = pkt->size;
1635

    
1636
        /* if update the audio clock with the pts */
1637
        if (pkt->pts != AV_NOPTS_VALUE) {
1638
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1639
        }
1640
    }
1641
}
1642

    
1643
/* get the current audio output buffer size, in samples. With SDL, we
1644
   cannot have a precise information */
1645
static int audio_write_get_buf_size(VideoState *is)
1646
{
1647
    return is->audio_buf_size - is->audio_buf_index;
1648
}
1649

    
1650

    
1651
/* prepare a new audio buffer */
1652
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1653
{
1654
    VideoState *is = opaque;
1655
    int audio_size, len1;
1656
    double pts;
1657

    
1658
    audio_callback_time = av_gettime();
1659

    
1660
    while (len > 0) {
1661
        if (is->audio_buf_index >= is->audio_buf_size) {
1662
           audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1663
           if (audio_size < 0) {
1664
                /* if error, just output silence */
1665
               is->audio_buf_size = 1024;
1666
               memset(is->audio_buf, 0, is->audio_buf_size);
1667
           } else {
1668
               if (is->show_audio)
1669
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1670
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1671
                                              pts);
1672
               is->audio_buf_size = audio_size;
1673
           }
1674
           is->audio_buf_index = 0;
1675
        }
1676
        len1 = is->audio_buf_size - is->audio_buf_index;
1677
        if (len1 > len)
1678
            len1 = len;
1679
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1680
        len -= len1;
1681
        stream += len1;
1682
        is->audio_buf_index += len1;
1683
    }
1684
}
1685

    
1686
/* open a given stream. Return 0 if OK */
1687
static int stream_component_open(VideoState *is, int stream_index)
1688
{
1689
    AVFormatContext *ic = is->ic;
1690
    AVCodecContext *enc;
1691
    AVCodec *codec;
1692
    SDL_AudioSpec wanted_spec, spec;
1693

    
1694
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1695
        return -1;
1696
    enc = ic->streams[stream_index]->codec;
1697

    
1698
    /* prepare audio output */
1699
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1700
        wanted_spec.freq = enc->sample_rate;
1701
        wanted_spec.format = AUDIO_S16SYS;
1702
        /* hack for AC3. XXX: suppress that */
1703
        if (enc->channels > 2)
1704
            enc->channels = 2;
1705
        wanted_spec.channels = enc->channels;
1706
        wanted_spec.silence = 0;
1707
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1708
        wanted_spec.callback = sdl_audio_callback;
1709
        wanted_spec.userdata = is;
1710
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1711
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1712
            return -1;
1713
        }
1714
        is->audio_hw_buf_size = spec.size;
1715
    }
1716

    
1717
    codec = avcodec_find_decoder(enc->codec_id);
1718
    enc->debug_mv = debug_mv;
1719
    enc->debug = debug;
1720
    enc->workaround_bugs = workaround_bugs;
1721
    enc->lowres = lowres;
1722
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1723
    enc->idct_algo= idct;
1724
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1725
    enc->skip_frame= skip_frame;
1726
    enc->skip_idct= skip_idct;
1727
    enc->skip_loop_filter= skip_loop_filter;
1728
    enc->error_resilience= error_resilience;
1729
    enc->error_concealment= error_concealment;
1730
    if (!codec ||
1731
        avcodec_open(enc, codec) < 0)
1732
        return -1;
1733
    if(thread_count>1)
1734
        avcodec_thread_init(enc, thread_count);
1735
    enc->thread_count= thread_count;
1736
    switch(enc->codec_type) {
1737
    case CODEC_TYPE_AUDIO:
1738
        is->audio_stream = stream_index;
1739
        is->audio_st = ic->streams[stream_index];
1740
        is->audio_buf_size = 0;
1741
        is->audio_buf_index = 0;
1742

    
1743
        /* init averaging filter */
1744
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1745
        is->audio_diff_avg_count = 0;
1746
        /* since we do not have a precise anough audio fifo fullness,
1747
           we correct audio sync only if larger than this threshold */
1748
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1749

    
1750
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1751
        packet_queue_init(&is->audioq);
1752
        SDL_PauseAudio(0);
1753
        break;
1754
    case CODEC_TYPE_VIDEO:
1755
        is->video_stream = stream_index;
1756
        is->video_st = ic->streams[stream_index];
1757

    
1758
        is->frame_last_delay = 40e-3;
1759
        is->frame_timer = (double)av_gettime() / 1000000.0;
1760
        is->video_current_pts_time = av_gettime();
1761

    
1762
        packet_queue_init(&is->videoq);
1763
        is->video_tid = SDL_CreateThread(video_thread, is);
1764

    
1765
        enc->    get_buffer=     my_get_buffer;
1766
        enc->release_buffer= my_release_buffer;
1767
        break;
1768
    case CODEC_TYPE_SUBTITLE:
1769
        is->subtitle_stream = stream_index;
1770
        is->subtitle_st = ic->streams[stream_index];
1771
        packet_queue_init(&is->subtitleq);
1772

    
1773
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1774
        break;
1775
    default:
1776
        break;
1777
    }
1778
    return 0;
1779
}
1780

    
1781
static void stream_component_close(VideoState *is, int stream_index)
1782
{
1783
    AVFormatContext *ic = is->ic;
1784
    AVCodecContext *enc;
1785

    
1786
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1787
        return;
1788
    enc = ic->streams[stream_index]->codec;
1789

    
1790
    switch(enc->codec_type) {
1791
    case CODEC_TYPE_AUDIO:
1792
        packet_queue_abort(&is->audioq);
1793

    
1794
        SDL_CloseAudio();
1795

    
1796
        packet_queue_end(&is->audioq);
1797
        break;
1798
    case CODEC_TYPE_VIDEO:
1799
        packet_queue_abort(&is->videoq);
1800

    
1801
        /* note: we also signal this mutex to make sure we deblock the
1802
           video thread in all cases */
1803
        SDL_LockMutex(is->pictq_mutex);
1804
        SDL_CondSignal(is->pictq_cond);
1805
        SDL_UnlockMutex(is->pictq_mutex);
1806

    
1807
        SDL_WaitThread(is->video_tid, NULL);
1808

    
1809
        packet_queue_end(&is->videoq);
1810
        break;
1811
    case CODEC_TYPE_SUBTITLE:
1812
        packet_queue_abort(&is->subtitleq);
1813

    
1814
        /* note: we also signal this mutex to make sure we deblock the
1815
           video thread in all cases */
1816
        SDL_LockMutex(is->subpq_mutex);
1817
        is->subtitle_stream_changed = 1;
1818

    
1819
        SDL_CondSignal(is->subpq_cond);
1820
        SDL_UnlockMutex(is->subpq_mutex);
1821

    
1822
        SDL_WaitThread(is->subtitle_tid, NULL);
1823

    
1824
        packet_queue_end(&is->subtitleq);
1825
        break;
1826
    default:
1827
        break;
1828
    }
1829

    
1830
    avcodec_close(enc);
1831
    switch(enc->codec_type) {
1832
    case CODEC_TYPE_AUDIO:
1833
        is->audio_st = NULL;
1834
        is->audio_stream = -1;
1835
        break;
1836
    case CODEC_TYPE_VIDEO:
1837
        is->video_st = NULL;
1838
        is->video_stream = -1;
1839
        break;
1840
    case CODEC_TYPE_SUBTITLE:
1841
        is->subtitle_st = NULL;
1842
        is->subtitle_stream = -1;
1843
        break;
1844
    default:
1845
        break;
1846
    }
1847
}
1848

    
1849
static void dump_stream_info(const AVFormatContext *s)
1850
{
1851
    if (s->track != 0)
1852
        fprintf(stderr, "Track: %d\n", s->track);
1853
    if (s->title[0] != '\0')
1854
        fprintf(stderr, "Title: %s\n", s->title);
1855
    if (s->author[0] != '\0')
1856
        fprintf(stderr, "Author: %s\n", s->author);
1857
    if (s->copyright[0] != '\0')
1858
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1859
    if (s->comment[0] != '\0')
1860
        fprintf(stderr, "Comment: %s\n", s->comment);
1861
    if (s->album[0] != '\0')
1862
        fprintf(stderr, "Album: %s\n", s->album);
1863
    if (s->year != 0)
1864
        fprintf(stderr, "Year: %d\n", s->year);
1865
    if (s->genre[0] != '\0')
1866
        fprintf(stderr, "Genre: %s\n", s->genre);
1867
}
1868

    
1869
/* since we have only one decoding thread, we can use a global
1870
   variable instead of a thread local variable */
1871
static VideoState *global_video_state;
1872

    
1873
static int decode_interrupt_cb(void)
1874
{
1875
    return (global_video_state && global_video_state->abort_request);
1876
}
1877

    
1878
/* this thread gets the stream from the disk or the network */
1879
static int decode_thread(void *arg)
1880
{
1881
    VideoState *is = arg;
1882
    AVFormatContext *ic;
1883
    int err, i, ret, video_index, audio_index, use_play;
1884
    AVPacket pkt1, *pkt = &pkt1;
1885
    AVFormatParameters params, *ap = &params;
1886

    
1887
    video_index = -1;
1888
    audio_index = -1;
1889
    is->video_stream = -1;
1890
    is->audio_stream = -1;
1891
    is->subtitle_stream = -1;
1892

    
1893
    global_video_state = is;
1894
    url_set_interrupt_cb(decode_interrupt_cb);
1895

    
1896
    memset(ap, 0, sizeof(*ap));
1897
    ap->initial_pause = 1; /* we force a pause when starting an RTSP
1898
                              stream */
1899

    
1900
    ap->width = frame_width;
1901
    ap->height= frame_height;
1902
    ap->time_base= (AVRational){1, 25};
1903
    ap->pix_fmt = frame_pix_fmt;
1904

    
1905
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1906
    if (err < 0) {
1907
        print_error(is->filename, err);
1908
        ret = -1;
1909
        goto fail;
1910
    }
1911
    is->ic = ic;
1912
#ifdef CONFIG_RTSP_DEMUXER
1913
    use_play = (ic->iformat == &rtsp_demuxer);
1914
#else
1915
    use_play = 0;
1916
#endif
1917

    
1918
    if(genpts)
1919
        ic->flags |= AVFMT_FLAG_GENPTS;
1920

    
1921
    if (!use_play) {
1922
        err = av_find_stream_info(ic);
1923
        if (err < 0) {
1924
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1925
            ret = -1;
1926
            goto fail;
1927
        }
1928
        ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1929
    }
1930

    
1931
    /* if seeking requested, we execute it */
1932
    if (start_time != AV_NOPTS_VALUE) {
1933
        int64_t timestamp;
1934

    
1935
        timestamp = start_time;
1936
        /* add the stream start time */
1937
        if (ic->start_time != AV_NOPTS_VALUE)
1938
            timestamp += ic->start_time;
1939
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1940
        if (ret < 0) {
1941
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1942
                    is->filename, (double)timestamp / AV_TIME_BASE);
1943
        }
1944
    }
1945

    
1946
    /* now we can begin to play (RTSP stream only) */
1947
    av_read_play(ic);
1948

    
1949
    if (use_play) {
1950
        err = av_find_stream_info(ic);
1951
        if (err < 0) {
1952
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1953
            ret = -1;
1954
            goto fail;
1955
        }
1956
    }
1957

    
1958
    for(i = 0; i < ic->nb_streams; i++) {
1959
        AVCodecContext *enc = ic->streams[i]->codec;
1960
        switch(enc->codec_type) {
1961
        case CODEC_TYPE_AUDIO:
1962
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1963
                audio_index = i;
1964
            break;
1965
        case CODEC_TYPE_VIDEO:
1966
            if (video_index < 0 && !video_disable)
1967
                video_index = i;
1968
            break;
1969
        default:
1970
            break;
1971
        }
1972
    }
1973
    if (show_status) {
1974
        dump_format(ic, 0, is->filename, 0);
1975
        dump_stream_info(ic);
1976
    }
1977

    
1978
    /* open the streams */
1979
    if (audio_index >= 0) {
1980
        stream_component_open(is, audio_index);
1981
    }
1982

    
1983
    if (video_index >= 0) {
1984
        stream_component_open(is, video_index);
1985
    } else {
1986
        if (!display_disable)
1987
            is->show_audio = 1;
1988
    }
1989

    
1990
    if (is->video_stream < 0 && is->audio_stream < 0) {
1991
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1992
        ret = -1;
1993
        goto fail;
1994
    }
1995

    
1996
    for(;;) {
1997
        if (is->abort_request)
1998
            break;
1999
        if (is->paused != is->last_paused) {
2000
            is->last_paused = is->paused;
2001
            if (is->paused)
2002
                av_read_pause(ic);
2003
            else
2004
                av_read_play(ic);
2005
        }
2006
#ifdef CONFIG_RTSP_DEMUXER
2007
        if (is->paused && ic->iformat == &rtsp_demuxer) {
2008
            /* wait 10 ms to avoid trying to get another packet */
2009
            /* XXX: horrible */
2010
            SDL_Delay(10);
2011
            continue;
2012
        }
2013
#endif
2014
        if (is->seek_req) {
2015
            int stream_index= -1;
2016
            int64_t seek_target= is->seek_pos;
2017

    
2018
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
2019
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
2020
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2021

    
2022
            if(stream_index>=0){
2023
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2024
            }
2025

    
2026
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2027
            if (ret < 0) {
2028
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2029
            }else{
2030
                if (is->audio_stream >= 0) {
2031
                    packet_queue_flush(&is->audioq);
2032
                    packet_queue_put(&is->audioq, &flush_pkt);
2033
                }
2034
                if (is->subtitle_stream >= 0) {
2035
                    packet_queue_flush(&is->subtitleq);
2036
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2037
                }
2038
                if (is->video_stream >= 0) {
2039
                    packet_queue_flush(&is->videoq);
2040
                    packet_queue_put(&is->videoq, &flush_pkt);
2041
                }
2042
            }
2043
            is->seek_req = 0;
2044
        }
2045

    
2046
        /* if the queue are full, no need to read more */
2047
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2048
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2049
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2050
            url_feof(&ic->pb)) {
2051
            /* wait 10 ms */
2052
            SDL_Delay(10);
2053
            continue;
2054
        }
2055
        ret = av_read_frame(ic, pkt);
2056
        if (ret < 0) {
2057
            if (url_ferror(&ic->pb) == 0) {
2058
                SDL_Delay(100); /* wait for user event */
2059
                continue;
2060
            } else
2061
                break;
2062
        }
2063
        if (pkt->stream_index == is->audio_stream) {
2064
            packet_queue_put(&is->audioq, pkt);
2065
        } else if (pkt->stream_index == is->video_stream) {
2066
            packet_queue_put(&is->videoq, pkt);
2067
        } else if (pkt->stream_index == is->subtitle_stream) {
2068
            packet_queue_put(&is->subtitleq, pkt);
2069
        } else {
2070
            av_free_packet(pkt);
2071
        }
2072
    }
2073
    /* wait until the end */
2074
    while (!is->abort_request) {
2075
        SDL_Delay(100);
2076
    }
2077

    
2078
    ret = 0;
2079
 fail:
2080
    /* disable interrupting */
2081
    global_video_state = NULL;
2082

    
2083
    /* close each stream */
2084
    if (is->audio_stream >= 0)
2085
        stream_component_close(is, is->audio_stream);
2086
    if (is->video_stream >= 0)
2087
        stream_component_close(is, is->video_stream);
2088
    if (is->subtitle_stream >= 0)
2089
        stream_component_close(is, is->subtitle_stream);
2090
    if (is->ic) {
2091
        av_close_input_file(is->ic);
2092
        is->ic = NULL; /* safety */
2093
    }
2094
    url_set_interrupt_cb(NULL);
2095

    
2096
    if (ret != 0) {
2097
        SDL_Event event;
2098

    
2099
        event.type = FF_QUIT_EVENT;
2100
        event.user.data1 = is;
2101
        SDL_PushEvent(&event);
2102
    }
2103
    return 0;
2104
}
2105

    
2106
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2107
{
2108
    VideoState *is;
2109

    
2110
    is = av_mallocz(sizeof(VideoState));
2111
    if (!is)
2112
        return NULL;
2113
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2114
    is->iformat = iformat;
2115
    is->ytop = 0;
2116
    is->xleft = 0;
2117

    
2118
    /* start video display */
2119
    is->pictq_mutex = SDL_CreateMutex();
2120
    is->pictq_cond = SDL_CreateCond();
2121

    
2122
    is->subpq_mutex = SDL_CreateMutex();
2123
    is->subpq_cond = SDL_CreateCond();
2124

    
2125
    /* add the refresh timer to draw the picture */
2126
    schedule_refresh(is, 40);
2127

    
2128
    is->av_sync_type = av_sync_type;
2129
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2130
    if (!is->parse_tid) {
2131
        av_free(is);
2132
        return NULL;
2133
    }
2134
    return is;
2135
}
2136

    
2137
static void stream_close(VideoState *is)
2138
{
2139
    VideoPicture *vp;
2140
    int i;
2141
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2142
    is->abort_request = 1;
2143
    SDL_WaitThread(is->parse_tid, NULL);
2144

    
2145
    /* free all pictures */
2146
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2147
        vp = &is->pictq[i];
2148
        if (vp->bmp) {
2149
            SDL_FreeYUVOverlay(vp->bmp);
2150
            vp->bmp = NULL;
2151
        }
2152
    }
2153
    SDL_DestroyMutex(is->pictq_mutex);
2154
    SDL_DestroyCond(is->pictq_cond);
2155
    SDL_DestroyMutex(is->subpq_mutex);
2156
    SDL_DestroyCond(is->subpq_cond);
2157
}
2158

    
2159
static void stream_cycle_channel(VideoState *is, int codec_type)
2160
{
2161
    AVFormatContext *ic = is->ic;
2162
    int start_index, stream_index;
2163
    AVStream *st;
2164

    
2165
    if (codec_type == CODEC_TYPE_VIDEO)
2166
        start_index = is->video_stream;
2167
    else if (codec_type == CODEC_TYPE_AUDIO)
2168
        start_index = is->audio_stream;
2169
    else
2170
        start_index = is->subtitle_stream;
2171
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2172
        return;
2173
    stream_index = start_index;
2174
    for(;;) {
2175
        if (++stream_index >= is->ic->nb_streams)
2176
        {
2177
            if (codec_type == CODEC_TYPE_SUBTITLE)
2178
            {
2179
                stream_index = -1;
2180
                goto the_end;
2181
            } else
2182
                stream_index = 0;
2183
        }
2184
        if (stream_index == start_index)
2185
            return;
2186
        st = ic->streams[stream_index];
2187
        if (st->codec->codec_type == codec_type) {
2188
            /* check that parameters are OK */
2189
            switch(codec_type) {
2190
            case CODEC_TYPE_AUDIO:
2191
                if (st->codec->sample_rate != 0 &&
2192
                    st->codec->channels != 0)
2193
                    goto the_end;
2194
                break;
2195
            case CODEC_TYPE_VIDEO:
2196
            case CODEC_TYPE_SUBTITLE:
2197
                goto the_end;
2198
            default:
2199
                break;
2200
            }
2201
        }
2202
    }
2203
 the_end:
2204
    stream_component_close(is, start_index);
2205
    stream_component_open(is, stream_index);
2206
}
2207

    
2208

    
2209
static void toggle_full_screen(void)
2210
{
2211
    is_full_screen = !is_full_screen;
2212
    if (!fs_screen_width) {
2213
        /* use default SDL method */
2214
//        SDL_WM_ToggleFullScreen(screen);
2215
    }
2216
    video_open(cur_stream);
2217
}
2218

    
2219
static void toggle_pause(void)
2220
{
2221
    if (cur_stream)
2222
        stream_pause(cur_stream);
2223
    step = 0;
2224
}
2225

    
2226
static void step_to_next_frame(void)
2227
{
2228
    if (cur_stream) {
2229
        if (cur_stream->paused)
2230
            cur_stream->paused=0;
2231
        cur_stream->video_current_pts = get_video_clock(cur_stream);
2232
    }
2233
    step = 1;
2234
}
2235

    
2236
static void do_exit(void)
2237
{
2238
    if (cur_stream) {
2239
        stream_close(cur_stream);
2240
        cur_stream = NULL;
2241
    }
2242
    if (show_status)
2243
        printf("\n");
2244
    SDL_Quit();
2245
    exit(0);
2246
}
2247

    
2248
static void toggle_audio_display(void)
2249
{
2250
    if (cur_stream) {
2251
        cur_stream->show_audio = !cur_stream->show_audio;
2252
    }
2253
}
2254

    
2255
/* handle an event sent by the GUI */
2256
static void event_loop(void)
2257
{
2258
    SDL_Event event;
2259
    double incr, pos, frac;
2260

    
2261
    for(;;) {
2262
        SDL_WaitEvent(&event);
2263
        switch(event.type) {
2264
        case SDL_KEYDOWN:
2265
            switch(event.key.keysym.sym) {
2266
            case SDLK_ESCAPE:
2267
            case SDLK_q:
2268
                do_exit();
2269
                break;
2270
            case SDLK_f:
2271
                toggle_full_screen();
2272
                break;
2273
            case SDLK_p:
2274
            case SDLK_SPACE:
2275
                toggle_pause();
2276
                break;
2277
            case SDLK_s: //S: Step to next frame
2278
                step_to_next_frame();
2279
                break;
2280
            case SDLK_a:
2281
                if (cur_stream)
2282
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2283
                break;
2284
            case SDLK_v:
2285
                if (cur_stream)
2286
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2287
                break;
2288
            case SDLK_t:
2289
                if (cur_stream)
2290
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2291
                break;
2292
            case SDLK_w:
2293
                toggle_audio_display();
2294
                break;
2295
            case SDLK_LEFT:
2296
                incr = -10.0;
2297
                goto do_seek;
2298
            case SDLK_RIGHT:
2299
                incr = 10.0;
2300
                goto do_seek;
2301
            case SDLK_UP:
2302
                incr = 60.0;
2303
                goto do_seek;
2304
            case SDLK_DOWN:
2305
                incr = -60.0;
2306
            do_seek:
2307
                if (cur_stream) {
2308
                    if (seek_by_bytes) {
2309
                        pos = url_ftell(&cur_stream->ic->pb);
2310
                        if (cur_stream->ic->bit_rate)
2311
                            incr *= cur_stream->ic->bit_rate / 60.0;
2312
                        else
2313
                            incr *= 180000.0;
2314
                        pos += incr;
2315
                        stream_seek(cur_stream, pos, incr);
2316
                    } else {
2317
                        pos = get_master_clock(cur_stream);
2318
                        pos += incr;
2319
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2320
                    }
2321
                }
2322
                break;
2323
            default:
2324
                break;
2325
            }
2326
            break;
2327
        case SDL_MOUSEBUTTONDOWN:
2328
            if (cur_stream) {
2329
                int ns, hh, mm, ss;
2330
                int tns, thh, tmm, tss;
2331
                tns = cur_stream->ic->duration/1000000LL;
2332
                thh = tns/3600;
2333
                tmm = (tns%3600)/60;
2334
                tss = (tns%60);
2335
                frac = (double)event.button.x/(double)cur_stream->width;
2336
                ns = frac*tns;
2337
                hh = ns/3600;
2338
                mm = (ns%3600)/60;
2339
                ss = (ns%60);
2340
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2341
                        hh, mm, ss, thh, tmm, tss);
2342
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2343
            }
2344
            break;
2345
        case SDL_VIDEORESIZE:
2346
            if (cur_stream) {
2347
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2348
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2349
                screen_width = cur_stream->width = event.resize.w;
2350
                screen_height= cur_stream->height= event.resize.h;
2351
            }
2352
            break;
2353
        case SDL_QUIT:
2354
        case FF_QUIT_EVENT:
2355
            do_exit();
2356
            break;
2357
        case FF_ALLOC_EVENT:
2358
            video_open(event.user.data1);
2359
            alloc_picture(event.user.data1);
2360
            break;
2361
        case FF_REFRESH_EVENT:
2362
            video_refresh_timer(event.user.data1);
2363
            break;
2364
        default:
2365
            break;
2366
        }
2367
    }
2368
}
2369

    
2370
static void opt_frame_size(const char *arg)
2371
{
2372
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2373
        fprintf(stderr, "Incorrect frame size\n");
2374
        exit(1);
2375
    }
2376
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2377
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2378
        exit(1);
2379
    }
2380
}
2381

    
2382
static void opt_width(const char *arg)
2383
{
2384
    screen_width = atoi(arg);
2385
    if(screen_width<=0){
2386
        fprintf(stderr, "invalid width\n");
2387
        exit(1);
2388
    }
2389
}
2390

    
2391
static void opt_height(const char *arg)
2392
{
2393
    screen_height = atoi(arg);
2394
    if(screen_height<=0){
2395
        fprintf(stderr, "invalid height\n");
2396
        exit(1);
2397
    }
2398
}
2399

    
2400
static void opt_format(const char *arg)
2401
{
2402
    file_iformat = av_find_input_format(arg);
2403
    if (!file_iformat) {
2404
        fprintf(stderr, "Unknown input format: %s\n", arg);
2405
        exit(1);
2406
    }
2407
}
2408

    
2409
static void opt_frame_pix_fmt(const char *arg)
2410
{
2411
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2412
}
2413

    
2414
#ifdef CONFIG_RTSP_DEMUXER
2415
static void opt_rtp_tcp(void)
2416
{
2417
    /* only tcp protocol */
2418
    rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2419
}
2420
#endif
2421

    
2422
static void opt_sync(const char *arg)
2423
{
2424
    if (!strcmp(arg, "audio"))
2425
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2426
    else if (!strcmp(arg, "video"))
2427
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2428
    else if (!strcmp(arg, "ext"))
2429
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2430
    else
2431
        show_help();
2432
}
2433

    
2434
static void opt_seek(const char *arg)
2435
{
2436
    start_time = parse_date(arg, 1);
2437
}
2438

    
2439
static void opt_debug(const char *arg)
2440
{
2441
    av_log_level = 99;
2442
    debug = atoi(arg);
2443
}
2444

    
2445
static void opt_vismv(const char *arg)
2446
{
2447
    debug_mv = atoi(arg);
2448
}
2449

    
2450
static void opt_thread_count(const char *arg)
2451
{
2452
    thread_count= atoi(arg);
2453
#if !defined(HAVE_THREADS)
2454
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2455
#endif
2456
}
2457

    
2458
const OptionDef options[] = {
2459
    { "h", 0, {(void*)show_help}, "show help" },
2460
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2461
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2462
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2463
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2464
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2465
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2466
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2467
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2468
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2469
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2470
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2471
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2472
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2473
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2474
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2475
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2476
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2477
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2478
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2479
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2480
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2481
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2482
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2483
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2484
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2485
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2486
#ifdef CONFIG_RTSP_DEMUXER
2487
    { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2488
#endif
2489
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2490
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2491
    { NULL, },
2492
};
2493

    
2494
void show_help(void)
2495
{
2496
    printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2007 Fabrice Bellard, et al.\n"
2497
           "usage: ffplay [options] input_file\n"
2498
           "Simple media player\n");
2499
    printf("\n");
2500
    show_help_options(options, "Main options:\n",
2501
                      OPT_EXPERT, 0);
2502
    show_help_options(options, "\nAdvanced options:\n",
2503
                      OPT_EXPERT, OPT_EXPERT);
2504
    printf("\nWhile playing:\n"
2505
           "q, ESC              quit\n"
2506
           "f                   toggle full screen\n"
2507
           "p, SPC              pause\n"
2508
           "a                   cycle audio channel\n"
2509
           "v                   cycle video channel\n"
2510
           "t                   cycle subtitle channel\n"
2511
           "w                   show audio waves\n"
2512
           "left/right          seek backward/forward 10 seconds\n"
2513
           "down/up             seek backward/forward 1 minute\n"
2514
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2515
           );
2516
    exit(1);
2517
}
2518

    
2519
void parse_arg_file(const char *filename)
2520
{
2521
    if (!strcmp(filename, "-"))
2522
                    filename = "pipe:";
2523
    input_filename = filename;
2524
}
2525

    
2526
/* Called from the main */
2527
int main(int argc, char **argv)
2528
{
2529
    int flags;
2530

    
2531
    /* register all codecs, demux and protocols */
2532
    av_register_all();
2533

    
2534
    #ifdef CONFIG_OS2
2535
      MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2536

    
2537
      // Make stdout and stderr unbuffered
2538
      setbuf( stdout, NULL );
2539
      setbuf( stderr, NULL );
2540
    #endif
2541

    
2542
    parse_options(argc, argv, options);
2543

    
2544
    if (!input_filename)
2545
        show_help();
2546

    
2547
    if (display_disable) {
2548
        video_disable = 1;
2549
    }
2550
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2551
#if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2552
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2553
#endif
2554
    if (SDL_Init (flags)) {
2555
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2556
        exit(1);
2557
    }
2558

    
2559
    if (!display_disable) {
2560
#ifdef HAVE_SDL_VIDEO_SIZE
2561
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2562
        fs_screen_width = vi->current_w;
2563
        fs_screen_height = vi->current_h;
2564
#endif
2565
    }
2566

    
2567
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2568
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2569
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2570
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2571

    
2572
    av_init_packet(&flush_pkt);
2573
    flush_pkt.data= "FLUSH";
2574

    
2575
    cur_stream = stream_open(input_filename, file_iformat);
2576

    
2577
    event_loop();
2578

    
2579
    /* never returns */
2580

    
2581
    return 0;
2582
}