Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 3f3fe38d

History | View | Annotate | Download (78.9 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "libavutil/avstring.h"
25
#include "libavformat/avformat.h"
26
#include "libavformat/rtsp.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30

    
31
#include "cmdutils.h"
32

    
33
#include <SDL.h>
34
#include <SDL_thread.h>
35

    
36
#ifdef __MINGW32__
37
#undef main /* We don't want SDL to override our main() */
38
#endif
39

    
40
#undef exit
41

    
42
const char program_name[] = "FFplay";
43
const int program_birth_year = 2003;
44

    
45
//#define DEBUG_SYNC
46

    
47
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
50

    
51
/* SDL audio buffer size, in samples. Should be small to have precise
52
   A/V sync as SDL does not have hardware buffer fullness info. */
53
#define SDL_AUDIO_BUFFER_SIZE 1024
54

    
55
/* no AV sync correction is done if below the AV sync threshold */
56
#define AV_SYNC_THRESHOLD 0.01
57
/* no AV correction is done if too big error */
58
#define AV_NOSYNC_THRESHOLD 10.0
59

    
60
/* maximum audio speed change to get correct sync */
61
#define SAMPLE_CORRECTION_PERCENT_MAX 10
62

    
63
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64
#define AUDIO_DIFF_AVG_NB   20
65

    
66
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67
#define SAMPLE_ARRAY_SIZE (2*65536)
68

    
69
static int sws_flags = SWS_BICUBIC;
70

    
71
typedef struct PacketQueue {
72
    AVPacketList *first_pkt, *last_pkt;
73
    int nb_packets;
74
    int size;
75
    int abort_request;
76
    SDL_mutex *mutex;
77
    SDL_cond *cond;
78
} PacketQueue;
79

    
80
#define VIDEO_PICTURE_QUEUE_SIZE 1
81
#define SUBPICTURE_QUEUE_SIZE 4
82

    
83
typedef struct VideoPicture {
84
    double pts;                                  ///<presentation time stamp for this picture
85
    SDL_Overlay *bmp;
86
    int width, height; /* source height & width */
87
    int allocated;
88
} VideoPicture;
89

    
90
typedef struct SubPicture {
91
    double pts; /* presentation time stamp for this picture */
92
    AVSubtitle sub;
93
} SubPicture;
94

    
95
enum {
96
    AV_SYNC_AUDIO_MASTER, /* default choice */
97
    AV_SYNC_VIDEO_MASTER,
98
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
99
};
100

    
101
typedef struct VideoState {
102
    SDL_Thread *parse_tid;
103
    SDL_Thread *video_tid;
104
    AVInputFormat *iformat;
105
    int no_background;
106
    int abort_request;
107
    int paused;
108
    int last_paused;
109
    int seek_req;
110
    int seek_flags;
111
    int64_t seek_pos;
112
    AVFormatContext *ic;
113
    int dtg_active_format;
114

    
115
    int audio_stream;
116

    
117
    int av_sync_type;
118
    double external_clock; /* external clock base */
119
    int64_t external_clock_time;
120

    
121
    double audio_clock;
122
    double audio_diff_cum; /* used for AV difference average computation */
123
    double audio_diff_avg_coef;
124
    double audio_diff_threshold;
125
    int audio_diff_avg_count;
126
    AVStream *audio_st;
127
    PacketQueue audioq;
128
    int audio_hw_buf_size;
129
    /* samples output by the codec. we reserve more space for avsync
130
       compensation */
131
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133
    uint8_t *audio_buf;
134
    unsigned int audio_buf_size; /* in bytes */
135
    int audio_buf_index; /* in bytes */
136
    AVPacket audio_pkt;
137
    uint8_t *audio_pkt_data;
138
    int audio_pkt_size;
139
    enum SampleFormat audio_src_fmt;
140
    AVAudioConvert *reformat_ctx;
141

    
142
    int show_audio; /* if true, display audio samples */
143
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
144
    int sample_array_index;
145
    int last_i_start;
146

    
147
    SDL_Thread *subtitle_tid;
148
    int subtitle_stream;
149
    int subtitle_stream_changed;
150
    AVStream *subtitle_st;
151
    PacketQueue subtitleq;
152
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
153
    int subpq_size, subpq_rindex, subpq_windex;
154
    SDL_mutex *subpq_mutex;
155
    SDL_cond *subpq_cond;
156

    
157
    double frame_timer;
158
    double frame_last_pts;
159
    double frame_last_delay;
160
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
161
    int video_stream;
162
    AVStream *video_st;
163
    PacketQueue videoq;
164
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
165
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
166
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
167
    int pictq_size, pictq_rindex, pictq_windex;
168
    SDL_mutex *pictq_mutex;
169
    SDL_cond *pictq_cond;
170

    
171
    //    QETimer *video_timer;
172
    char filename[1024];
173
    int width, height, xleft, ytop;
174
} VideoState;
175

    
176
static void show_help(void);
177
static int audio_write_get_buf_size(VideoState *is);
178

    
179
/* options specified by the user */
180
static AVInputFormat *file_iformat;
181
static const char *input_filename;
182
static int fs_screen_width;
183
static int fs_screen_height;
184
static int screen_width = 0;
185
static int screen_height = 0;
186
static int frame_width = 0;
187
static int frame_height = 0;
188
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
189
static int audio_disable;
190
static int video_disable;
191
static int wanted_audio_stream= 0;
192
static int wanted_video_stream= 0;
193
static int seek_by_bytes;
194
static int display_disable;
195
static int show_status;
196
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
197
static int64_t start_time = AV_NOPTS_VALUE;
198
static int debug = 0;
199
static int debug_mv = 0;
200
static int step = 0;
201
static int thread_count = 1;
202
static int workaround_bugs = 1;
203
static int fast = 0;
204
static int genpts = 0;
205
static int lowres = 0;
206
static int idct = FF_IDCT_AUTO;
207
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
208
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
209
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
210
static int error_resilience = FF_ER_CAREFUL;
211
static int error_concealment = 3;
212
static int decoder_reorder_pts= 0;
213

    
214
/* current context */
215
static int is_full_screen;
216
static VideoState *cur_stream;
217
static int64_t audio_callback_time;
218

    
219
AVPacket flush_pkt;
220

    
221
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
222
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
223
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
224

    
225
SDL_Surface *screen;
226

    
227
/* packet queue handling */
228
static void packet_queue_init(PacketQueue *q)
229
{
230
    memset(q, 0, sizeof(PacketQueue));
231
    q->mutex = SDL_CreateMutex();
232
    q->cond = SDL_CreateCond();
233
}
234

    
235
static void packet_queue_flush(PacketQueue *q)
236
{
237
    AVPacketList *pkt, *pkt1;
238

    
239
    SDL_LockMutex(q->mutex);
240
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
241
        pkt1 = pkt->next;
242
        av_free_packet(&pkt->pkt);
243
        av_freep(&pkt);
244
    }
245
    q->last_pkt = NULL;
246
    q->first_pkt = NULL;
247
    q->nb_packets = 0;
248
    q->size = 0;
249
    SDL_UnlockMutex(q->mutex);
250
}
251

    
252
static void packet_queue_end(PacketQueue *q)
253
{
254
    packet_queue_flush(q);
255
    SDL_DestroyMutex(q->mutex);
256
    SDL_DestroyCond(q->cond);
257
}
258

    
259
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
260
{
261
    AVPacketList *pkt1;
262

    
263
    /* duplicate the packet */
264
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
265
        return -1;
266

    
267
    pkt1 = av_malloc(sizeof(AVPacketList));
268
    if (!pkt1)
269
        return -1;
270
    pkt1->pkt = *pkt;
271
    pkt1->next = NULL;
272

    
273

    
274
    SDL_LockMutex(q->mutex);
275

    
276
    if (!q->last_pkt)
277

    
278
        q->first_pkt = pkt1;
279
    else
280
        q->last_pkt->next = pkt1;
281
    q->last_pkt = pkt1;
282
    q->nb_packets++;
283
    q->size += pkt1->pkt.size;
284
    /* XXX: should duplicate packet data in DV case */
285
    SDL_CondSignal(q->cond);
286

    
287
    SDL_UnlockMutex(q->mutex);
288
    return 0;
289
}
290

    
291
static void packet_queue_abort(PacketQueue *q)
292
{
293
    SDL_LockMutex(q->mutex);
294

    
295
    q->abort_request = 1;
296

    
297
    SDL_CondSignal(q->cond);
298

    
299
    SDL_UnlockMutex(q->mutex);
300
}
301

    
302
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
303
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
304
{
305
    AVPacketList *pkt1;
306
    int ret;
307

    
308
    SDL_LockMutex(q->mutex);
309

    
310
    for(;;) {
311
        if (q->abort_request) {
312
            ret = -1;
313
            break;
314
        }
315

    
316
        pkt1 = q->first_pkt;
317
        if (pkt1) {
318
            q->first_pkt = pkt1->next;
319
            if (!q->first_pkt)
320
                q->last_pkt = NULL;
321
            q->nb_packets--;
322
            q->size -= pkt1->pkt.size;
323
            *pkt = pkt1->pkt;
324
            av_free(pkt1);
325
            ret = 1;
326
            break;
327
        } else if (!block) {
328
            ret = 0;
329
            break;
330
        } else {
331
            SDL_CondWait(q->cond, q->mutex);
332
        }
333
    }
334
    SDL_UnlockMutex(q->mutex);
335
    return ret;
336
}
337

    
338
static inline void fill_rectangle(SDL_Surface *screen,
339
                                  int x, int y, int w, int h, int color)
340
{
341
    SDL_Rect rect;
342
    rect.x = x;
343
    rect.y = y;
344
    rect.w = w;
345
    rect.h = h;
346
    SDL_FillRect(screen, &rect, color);
347
}
348

    
349
#if 0
350
/* draw only the border of a rectangle */
351
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
352
{
353
    int w1, w2, h1, h2;
354

355
    /* fill the background */
356
    w1 = x;
357
    if (w1 < 0)
358
        w1 = 0;
359
    w2 = s->width - (x + w);
360
    if (w2 < 0)
361
        w2 = 0;
362
    h1 = y;
363
    if (h1 < 0)
364
        h1 = 0;
365
    h2 = s->height - (y + h);
366
    if (h2 < 0)
367
        h2 = 0;
368
    fill_rectangle(screen,
369
                   s->xleft, s->ytop,
370
                   w1, s->height,
371
                   color);
372
    fill_rectangle(screen,
373
                   s->xleft + s->width - w2, s->ytop,
374
                   w2, s->height,
375
                   color);
376
    fill_rectangle(screen,
377
                   s->xleft + w1, s->ytop,
378
                   s->width - w1 - w2, h1,
379
                   color);
380
    fill_rectangle(screen,
381
                   s->xleft + w1, s->ytop + s->height - h2,
382
                   s->width - w1 - w2, h2,
383
                   color);
384
}
385
#endif
386

    
387

    
388

    
389
#define SCALEBITS 10
390
#define ONE_HALF  (1 << (SCALEBITS - 1))
391
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
392

    
393
#define RGB_TO_Y_CCIR(r, g, b) \
394
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
395
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
396

    
397
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
398
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
399
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
400

    
401
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
402
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
403
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
404

    
405
#define ALPHA_BLEND(a, oldp, newp, s)\
406
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
407

    
408
#define RGBA_IN(r, g, b, a, s)\
409
{\
410
    unsigned int v = ((const uint32_t *)(s))[0];\
411
    a = (v >> 24) & 0xff;\
412
    r = (v >> 16) & 0xff;\
413
    g = (v >> 8) & 0xff;\
414
    b = v & 0xff;\
415
}
416

    
417
#define YUVA_IN(y, u, v, a, s, pal)\
418
{\
419
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
420
    a = (val >> 24) & 0xff;\
421
    y = (val >> 16) & 0xff;\
422
    u = (val >> 8) & 0xff;\
423
    v = val & 0xff;\
424
}
425

    
426
#define YUVA_OUT(d, y, u, v, a)\
427
{\
428
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
429
}
430

    
431

    
432
#define BPP 1
433

    
434
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
435
{
436
    int wrap, wrap3, width2, skip2;
437
    int y, u, v, a, u1, v1, a1, w, h;
438
    uint8_t *lum, *cb, *cr;
439
    const uint8_t *p;
440
    const uint32_t *pal;
441
    int dstx, dsty, dstw, dsth;
442

    
443
    dstx = FFMIN(FFMAX(rect->x, 0), imgw);
444
    dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
445
    dsty = FFMIN(FFMAX(rect->y, 0), imgh);
446
    dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
447
    lum = dst->data[0] + dsty * dst->linesize[0];
448
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
449
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
450

    
451
    width2 = (dstw + 1) >> 1;
452
    skip2 = dstx >> 1;
453
    wrap = dst->linesize[0];
454
    wrap3 = rect->linesize;
455
    p = rect->bitmap;
456
    pal = rect->rgba_palette;  /* Now in YCrCb! */
457

    
458
    if (dsty & 1) {
459
        lum += dstx;
460
        cb += skip2;
461
        cr += skip2;
462

    
463
        if (dstx & 1) {
464
            YUVA_IN(y, u, v, a, p, pal);
465
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
466
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
467
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
468
            cb++;
469
            cr++;
470
            lum++;
471
            p += BPP;
472
        }
473
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
474
            YUVA_IN(y, u, v, a, p, pal);
475
            u1 = u;
476
            v1 = v;
477
            a1 = a;
478
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
479

    
480
            YUVA_IN(y, u, v, a, p + BPP, pal);
481
            u1 += u;
482
            v1 += v;
483
            a1 += a;
484
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
485
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
486
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
487
            cb++;
488
            cr++;
489
            p += 2 * BPP;
490
            lum += 2;
491
        }
492
        if (w) {
493
            YUVA_IN(y, u, v, a, p, pal);
494
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
495
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
496
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
497
        }
498
        p += wrap3 + (wrap3 - dstw * BPP);
499
        lum += wrap + (wrap - dstw - dstx);
500
        cb += dst->linesize[1] - width2 - skip2;
501
        cr += dst->linesize[2] - width2 - skip2;
502
    }
503
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
504
        lum += dstx;
505
        cb += skip2;
506
        cr += skip2;
507

    
508
        if (dstx & 1) {
509
            YUVA_IN(y, u, v, a, p, pal);
510
            u1 = u;
511
            v1 = v;
512
            a1 = a;
513
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
514
            p += wrap3;
515
            lum += wrap;
516
            YUVA_IN(y, u, v, a, p, pal);
517
            u1 += u;
518
            v1 += v;
519
            a1 += a;
520
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
521
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
522
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
523
            cb++;
524
            cr++;
525
            p += -wrap3 + BPP;
526
            lum += -wrap + 1;
527
        }
528
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
529
            YUVA_IN(y, u, v, a, p, pal);
530
            u1 = u;
531
            v1 = v;
532
            a1 = a;
533
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
534

    
535
            YUVA_IN(y, u, v, a, p, pal);
536
            u1 += u;
537
            v1 += v;
538
            a1 += a;
539
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
540
            p += wrap3;
541
            lum += wrap;
542

    
543
            YUVA_IN(y, u, v, a, p, pal);
544
            u1 += u;
545
            v1 += v;
546
            a1 += a;
547
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
548

    
549
            YUVA_IN(y, u, v, a, p, pal);
550
            u1 += u;
551
            v1 += v;
552
            a1 += a;
553
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
554

    
555
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
556
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
557

    
558
            cb++;
559
            cr++;
560
            p += -wrap3 + 2 * BPP;
561
            lum += -wrap + 2;
562
        }
563
        if (w) {
564
            YUVA_IN(y, u, v, a, p, pal);
565
            u1 = u;
566
            v1 = v;
567
            a1 = a;
568
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
569
            p += wrap3;
570
            lum += wrap;
571
            YUVA_IN(y, u, v, a, p, pal);
572
            u1 += u;
573
            v1 += v;
574
            a1 += a;
575
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
576
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
577
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
578
            cb++;
579
            cr++;
580
            p += -wrap3 + BPP;
581
            lum += -wrap + 1;
582
        }
583
        p += wrap3 + (wrap3 - dstw * BPP);
584
        lum += wrap + (wrap - dstw - dstx);
585
        cb += dst->linesize[1] - width2 - skip2;
586
        cr += dst->linesize[2] - width2 - skip2;
587
    }
588
    /* handle odd height */
589
    if (h) {
590
        lum += dstx;
591
        cb += skip2;
592
        cr += skip2;
593

    
594
        if (dstx & 1) {
595
            YUVA_IN(y, u, v, a, p, pal);
596
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
597
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
598
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
599
            cb++;
600
            cr++;
601
            lum++;
602
            p += BPP;
603
        }
604
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
605
            YUVA_IN(y, u, v, a, p, pal);
606
            u1 = u;
607
            v1 = v;
608
            a1 = a;
609
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
610

    
611
            YUVA_IN(y, u, v, a, p + BPP, pal);
612
            u1 += u;
613
            v1 += v;
614
            a1 += a;
615
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
616
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
617
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
618
            cb++;
619
            cr++;
620
            p += 2 * BPP;
621
            lum += 2;
622
        }
623
        if (w) {
624
            YUVA_IN(y, u, v, a, p, pal);
625
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
626
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
627
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628
        }
629
    }
630
}
631

    
632
static void free_subpicture(SubPicture *sp)
633
{
634
    int i;
635

    
636
    for (i = 0; i < sp->sub.num_rects; i++)
637
    {
638
        av_free(sp->sub.rects[i].bitmap);
639
        av_free(sp->sub.rects[i].rgba_palette);
640
    }
641

    
642
    av_free(sp->sub.rects);
643

    
644
    memset(&sp->sub, 0, sizeof(AVSubtitle));
645
}
646

    
647
static void video_image_display(VideoState *is)
648
{
649
    VideoPicture *vp;
650
    SubPicture *sp;
651
    AVPicture pict;
652
    float aspect_ratio;
653
    int width, height, x, y;
654
    SDL_Rect rect;
655
    int i;
656

    
657
    vp = &is->pictq[is->pictq_rindex];
658
    if (vp->bmp) {
659
        /* XXX: use variable in the frame */
660
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
661
            aspect_ratio = 0;
662
        else
663
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
664
                * is->video_st->codec->width / is->video_st->codec->height;
665
        if (aspect_ratio <= 0.0)
666
            aspect_ratio = (float)is->video_st->codec->width /
667
                (float)is->video_st->codec->height;
668
        /* if an active format is indicated, then it overrides the
669
           mpeg format */
670
#if 0
671
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
672
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
673
            printf("dtg_active_format=%d\n", is->dtg_active_format);
674
        }
675
#endif
676
#if 0
677
        switch(is->video_st->codec->dtg_active_format) {
678
        case FF_DTG_AFD_SAME:
679
        default:
680
            /* nothing to do */
681
            break;
682
        case FF_DTG_AFD_4_3:
683
            aspect_ratio = 4.0 / 3.0;
684
            break;
685
        case FF_DTG_AFD_16_9:
686
            aspect_ratio = 16.0 / 9.0;
687
            break;
688
        case FF_DTG_AFD_14_9:
689
            aspect_ratio = 14.0 / 9.0;
690
            break;
691
        case FF_DTG_AFD_4_3_SP_14_9:
692
            aspect_ratio = 14.0 / 9.0;
693
            break;
694
        case FF_DTG_AFD_16_9_SP_14_9:
695
            aspect_ratio = 14.0 / 9.0;
696
            break;
697
        case FF_DTG_AFD_SP_4_3:
698
            aspect_ratio = 4.0 / 3.0;
699
            break;
700
        }
701
#endif
702

    
703
        if (is->subtitle_st)
704
        {
705
            if (is->subpq_size > 0)
706
            {
707
                sp = &is->subpq[is->subpq_rindex];
708

    
709
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
710
                {
711
                    SDL_LockYUVOverlay (vp->bmp);
712

    
713
                    pict.data[0] = vp->bmp->pixels[0];
714
                    pict.data[1] = vp->bmp->pixels[2];
715
                    pict.data[2] = vp->bmp->pixels[1];
716

    
717
                    pict.linesize[0] = vp->bmp->pitches[0];
718
                    pict.linesize[1] = vp->bmp->pitches[2];
719
                    pict.linesize[2] = vp->bmp->pitches[1];
720

    
721
                    for (i = 0; i < sp->sub.num_rects; i++)
722
                        blend_subrect(&pict, &sp->sub.rects[i],
723
                                      vp->bmp->w, vp->bmp->h);
724

    
725
                    SDL_UnlockYUVOverlay (vp->bmp);
726
                }
727
            }
728
        }
729

    
730

    
731
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
732
        height = is->height;
733
        width = ((int)rint(height * aspect_ratio)) & -3;
734
        if (width > is->width) {
735
            width = is->width;
736
            height = ((int)rint(width / aspect_ratio)) & -3;
737
        }
738
        x = (is->width - width) / 2;
739
        y = (is->height - height) / 2;
740
        if (!is->no_background) {
741
            /* fill the background */
742
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
743
        } else {
744
            is->no_background = 0;
745
        }
746
        rect.x = is->xleft + x;
747
        rect.y = is->ytop  + y;
748
        rect.w = width;
749
        rect.h = height;
750
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
751
    } else {
752
#if 0
753
        fill_rectangle(screen,
754
                       is->xleft, is->ytop, is->width, is->height,
755
                       QERGB(0x00, 0x00, 0x00));
756
#endif
757
    }
758
}
759

    
760
static inline int compute_mod(int a, int b)
761
{
762
    a = a % b;
763
    if (a >= 0)
764
        return a;
765
    else
766
        return a + b;
767
}
768

    
769
static void video_audio_display(VideoState *s)
770
{
771
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
772
    int ch, channels, h, h2, bgcolor, fgcolor;
773
    int16_t time_diff;
774

    
775
    /* compute display index : center on currently output samples */
776
    channels = s->audio_st->codec->channels;
777
    nb_display_channels = channels;
778
    if (!s->paused) {
779
        n = 2 * channels;
780
        delay = audio_write_get_buf_size(s);
781
        delay /= n;
782

    
783
        /* to be more precise, we take into account the time spent since
784
           the last buffer computation */
785
        if (audio_callback_time) {
786
            time_diff = av_gettime() - audio_callback_time;
787
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
788
        }
789

    
790
        delay -= s->width / 2;
791
        if (delay < s->width)
792
            delay = s->width;
793

    
794
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
795

    
796
        h= INT_MIN;
797
        for(i=0; i<1000; i+=channels){
798
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
799
            int a= s->sample_array[idx];
800
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
801
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
802
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
803
            int score= a-d;
804
            if(h<score && (b^c)<0){
805
                h= score;
806
                i_start= idx;
807
            }
808
        }
809

    
810
        s->last_i_start = i_start;
811
    } else {
812
        i_start = s->last_i_start;
813
    }
814

    
815
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
816
    fill_rectangle(screen,
817
                   s->xleft, s->ytop, s->width, s->height,
818
                   bgcolor);
819

    
820
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
821

    
822
    /* total height for one channel */
823
    h = s->height / nb_display_channels;
824
    /* graph height / 2 */
825
    h2 = (h * 9) / 20;
826
    for(ch = 0;ch < nb_display_channels; ch++) {
827
        i = i_start + ch;
828
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
829
        for(x = 0; x < s->width; x++) {
830
            y = (s->sample_array[i] * h2) >> 15;
831
            if (y < 0) {
832
                y = -y;
833
                ys = y1 - y;
834
            } else {
835
                ys = y1;
836
            }
837
            fill_rectangle(screen,
838
                           s->xleft + x, ys, 1, y,
839
                           fgcolor);
840
            i += channels;
841
            if (i >= SAMPLE_ARRAY_SIZE)
842
                i -= SAMPLE_ARRAY_SIZE;
843
        }
844
    }
845

    
846
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
847

    
848
    for(ch = 1;ch < nb_display_channels; ch++) {
849
        y = s->ytop + ch * h;
850
        fill_rectangle(screen,
851
                       s->xleft, y, s->width, 1,
852
                       fgcolor);
853
    }
854
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
855
}
856

    
857
static int video_open(VideoState *is){
858
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
859
    int w,h;
860

    
861
    if(is_full_screen) flags |= SDL_FULLSCREEN;
862
    else               flags |= SDL_RESIZABLE;
863

    
864
    if (is_full_screen && fs_screen_width) {
865
        w = fs_screen_width;
866
        h = fs_screen_height;
867
    } else if(!is_full_screen && screen_width){
868
        w = screen_width;
869
        h = screen_height;
870
    }else if (is->video_st && is->video_st->codec->width){
871
        w = is->video_st->codec->width;
872
        h = is->video_st->codec->height;
873
    } else {
874
        w = 640;
875
        h = 480;
876
    }
877
#ifndef __APPLE__
878
    screen = SDL_SetVideoMode(w, h, 0, flags);
879
#else
880
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
881
    screen = SDL_SetVideoMode(w, h, 24, flags);
882
#endif
883
    if (!screen) {
884
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
885
        return -1;
886
    }
887
    SDL_WM_SetCaption("FFplay", "FFplay");
888

    
889
    is->width = screen->w;
890
    is->height = screen->h;
891

    
892
    return 0;
893
}
894

    
895
/* display the current picture, if any */
896
static void video_display(VideoState *is)
897
{
898
    if(!screen)
899
        video_open(cur_stream);
900
    if (is->audio_st && is->show_audio)
901
        video_audio_display(is);
902
    else if (is->video_st)
903
        video_image_display(is);
904
}
905

    
906
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
907
{
908
    SDL_Event event;
909
    event.type = FF_REFRESH_EVENT;
910
    event.user.data1 = opaque;
911
    SDL_PushEvent(&event);
912
    return 0; /* 0 means stop timer */
913
}
914

    
915
/* schedule a video refresh in 'delay' ms */
916
static void schedule_refresh(VideoState *is, int delay)
917
{
918
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
919
}
920

    
921
/* get the current audio clock value */
922
static double get_audio_clock(VideoState *is)
923
{
924
    double pts;
925
    int hw_buf_size, bytes_per_sec;
926
    pts = is->audio_clock;
927
    hw_buf_size = audio_write_get_buf_size(is);
928
    bytes_per_sec = 0;
929
    if (is->audio_st) {
930
        bytes_per_sec = is->audio_st->codec->sample_rate *
931
            2 * is->audio_st->codec->channels;
932
    }
933
    if (bytes_per_sec)
934
        pts -= (double)hw_buf_size / bytes_per_sec;
935
    return pts;
936
}
937

    
938
/* get the current video clock value */
939
static double get_video_clock(VideoState *is)
940
{
941
    double delta;
942
    if (is->paused) {
943
        delta = 0;
944
    } else {
945
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
946
    }
947
    return is->video_current_pts + delta;
948
}
949

    
950
/* get the current external clock value */
951
static double get_external_clock(VideoState *is)
952
{
953
    int64_t ti;
954
    ti = av_gettime();
955
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
956
}
957

    
958
/* get the current master clock value */
959
static double get_master_clock(VideoState *is)
960
{
961
    double val;
962

    
963
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
964
        if (is->video_st)
965
            val = get_video_clock(is);
966
        else
967
            val = get_audio_clock(is);
968
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
969
        if (is->audio_st)
970
            val = get_audio_clock(is);
971
        else
972
            val = get_video_clock(is);
973
    } else {
974
        val = get_external_clock(is);
975
    }
976
    return val;
977
}
978

    
979
/* seek in the stream */
980
static void stream_seek(VideoState *is, int64_t pos, int rel)
981
{
982
    if (!is->seek_req) {
983
        is->seek_pos = pos;
984
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
985
        if (seek_by_bytes)
986
            is->seek_flags |= AVSEEK_FLAG_BYTE;
987
        is->seek_req = 1;
988
    }
989
}
990

    
991
/* pause or resume the video */
992
static void stream_pause(VideoState *is)
993
{
994
    is->paused = !is->paused;
995
    if (!is->paused) {
996
        is->video_current_pts = get_video_clock(is);
997
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
998
    }
999
}
1000

    
1001
/* called to display each frame */
1002
static void video_refresh_timer(void *opaque)
1003
{
1004
    VideoState *is = opaque;
1005
    VideoPicture *vp;
1006
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1007

    
1008
    SubPicture *sp, *sp2;
1009

    
1010
    if (is->video_st) {
1011
        if (is->pictq_size == 0) {
1012
            /* if no picture, need to wait */
1013
            schedule_refresh(is, 1);
1014
        } else {
1015
            /* dequeue the picture */
1016
            vp = &is->pictq[is->pictq_rindex];
1017

    
1018
            /* update current video pts */
1019
            is->video_current_pts = vp->pts;
1020
            is->video_current_pts_time = av_gettime();
1021

    
1022
            /* compute nominal delay */
1023
            delay = vp->pts - is->frame_last_pts;
1024
            if (delay <= 0 || delay >= 2.0) {
1025
                /* if incorrect delay, use previous one */
1026
                delay = is->frame_last_delay;
1027
            }
1028
            is->frame_last_delay = delay;
1029
            is->frame_last_pts = vp->pts;
1030

    
1031
            /* update delay to follow master synchronisation source */
1032
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1033
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1034
                /* if video is slave, we try to correct big delays by
1035
                   duplicating or deleting a frame */
1036
                ref_clock = get_master_clock(is);
1037
                diff = vp->pts - ref_clock;
1038

    
1039
                /* skip or repeat frame. We take into account the
1040
                   delay to compute the threshold. I still don't know
1041
                   if it is the best guess */
1042
                sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1043
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1044
                    if (diff <= -sync_threshold)
1045
                        delay = 0;
1046
                    else if (diff >= sync_threshold)
1047
                        delay = 2 * delay;
1048
                }
1049
            }
1050

    
1051
            is->frame_timer += delay;
1052
            /* compute the REAL delay (we need to do that to avoid
1053
               long term errors */
1054
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1055
            if (actual_delay < 0.010) {
1056
                /* XXX: should skip picture */
1057
                actual_delay = 0.010;
1058
            }
1059
            /* launch timer for next picture */
1060
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1061

    
1062
#if defined(DEBUG_SYNC)
1063
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1064
                   delay, actual_delay, vp->pts, -diff);
1065
#endif
1066

    
1067
            if(is->subtitle_st) {
1068
                if (is->subtitle_stream_changed) {
1069
                    SDL_LockMutex(is->subpq_mutex);
1070

    
1071
                    while (is->subpq_size) {
1072
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1073

    
1074
                        /* update queue size and signal for next picture */
1075
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1076
                            is->subpq_rindex = 0;
1077

    
1078
                        is->subpq_size--;
1079
                    }
1080
                    is->subtitle_stream_changed = 0;
1081

    
1082
                    SDL_CondSignal(is->subpq_cond);
1083
                    SDL_UnlockMutex(is->subpq_mutex);
1084
                } else {
1085
                    if (is->subpq_size > 0) {
1086
                        sp = &is->subpq[is->subpq_rindex];
1087

    
1088
                        if (is->subpq_size > 1)
1089
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1090
                        else
1091
                            sp2 = NULL;
1092

    
1093
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1094
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1095
                        {
1096
                            free_subpicture(sp);
1097

    
1098
                            /* update queue size and signal for next picture */
1099
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1100
                                is->subpq_rindex = 0;
1101

    
1102
                            SDL_LockMutex(is->subpq_mutex);
1103
                            is->subpq_size--;
1104
                            SDL_CondSignal(is->subpq_cond);
1105
                            SDL_UnlockMutex(is->subpq_mutex);
1106
                        }
1107
                    }
1108
                }
1109
            }
1110

    
1111
            /* display picture */
1112
            video_display(is);
1113

    
1114
            /* update queue size and signal for next picture */
1115
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1116
                is->pictq_rindex = 0;
1117

    
1118
            SDL_LockMutex(is->pictq_mutex);
1119
            is->pictq_size--;
1120
            SDL_CondSignal(is->pictq_cond);
1121
            SDL_UnlockMutex(is->pictq_mutex);
1122
        }
1123
    } else if (is->audio_st) {
1124
        /* draw the next audio frame */
1125

    
1126
        schedule_refresh(is, 40);
1127

    
1128
        /* if only audio stream, then display the audio bars (better
1129
           than nothing, just to test the implementation */
1130

    
1131
        /* display picture */
1132
        video_display(is);
1133
    } else {
1134
        schedule_refresh(is, 100);
1135
    }
1136
    if (show_status) {
1137
        static int64_t last_time;
1138
        int64_t cur_time;
1139
        int aqsize, vqsize, sqsize;
1140
        double av_diff;
1141

    
1142
        cur_time = av_gettime();
1143
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1144
            aqsize = 0;
1145
            vqsize = 0;
1146
            sqsize = 0;
1147
            if (is->audio_st)
1148
                aqsize = is->audioq.size;
1149
            if (is->video_st)
1150
                vqsize = is->videoq.size;
1151
            if (is->subtitle_st)
1152
                sqsize = is->subtitleq.size;
1153
            av_diff = 0;
1154
            if (is->audio_st && is->video_st)
1155
                av_diff = get_audio_clock(is) - get_video_clock(is);
1156
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1157
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1158
            fflush(stdout);
1159
            last_time = cur_time;
1160
        }
1161
    }
1162
}
1163

    
1164
/* allocate a picture (needs to do that in main thread to avoid
1165
   potential locking problems */
1166
static void alloc_picture(void *opaque)
1167
{
1168
    VideoState *is = opaque;
1169
    VideoPicture *vp;
1170

    
1171
    vp = &is->pictq[is->pictq_windex];
1172

    
1173
    if (vp->bmp)
1174
        SDL_FreeYUVOverlay(vp->bmp);
1175

    
1176
#if 0
1177
    /* XXX: use generic function */
1178
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1179
    switch(is->video_st->codec->pix_fmt) {
1180
    case PIX_FMT_YUV420P:
1181
    case PIX_FMT_YUV422P:
1182
    case PIX_FMT_YUV444P:
1183
    case PIX_FMT_YUYV422:
1184
    case PIX_FMT_YUV410P:
1185
    case PIX_FMT_YUV411P:
1186
        is_yuv = 1;
1187
        break;
1188
    default:
1189
        is_yuv = 0;
1190
        break;
1191
    }
1192
#endif
1193
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1194
                                   is->video_st->codec->height,
1195
                                   SDL_YV12_OVERLAY,
1196
                                   screen);
1197
    vp->width = is->video_st->codec->width;
1198
    vp->height = is->video_st->codec->height;
1199

    
1200
    SDL_LockMutex(is->pictq_mutex);
1201
    vp->allocated = 1;
1202
    SDL_CondSignal(is->pictq_cond);
1203
    SDL_UnlockMutex(is->pictq_mutex);
1204
}
1205

    
1206
/**
1207
 *
1208
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1209
 */
1210
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1211
{
1212
    VideoPicture *vp;
1213
    int dst_pix_fmt;
1214
    AVPicture pict;
1215
    static struct SwsContext *img_convert_ctx;
1216

    
1217
    /* wait until we have space to put a new picture */
1218
    SDL_LockMutex(is->pictq_mutex);
1219
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1220
           !is->videoq.abort_request) {
1221
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1222
    }
1223
    SDL_UnlockMutex(is->pictq_mutex);
1224

    
1225
    if (is->videoq.abort_request)
1226
        return -1;
1227

    
1228
    vp = &is->pictq[is->pictq_windex];
1229

    
1230
    /* alloc or resize hardware picture buffer */
1231
    if (!vp->bmp ||
1232
        vp->width != is->video_st->codec->width ||
1233
        vp->height != is->video_st->codec->height) {
1234
        SDL_Event event;
1235

    
1236
        vp->allocated = 0;
1237

    
1238
        /* the allocation must be done in the main thread to avoid
1239
           locking problems */
1240
        event.type = FF_ALLOC_EVENT;
1241
        event.user.data1 = is;
1242
        SDL_PushEvent(&event);
1243

    
1244
        /* wait until the picture is allocated */
1245
        SDL_LockMutex(is->pictq_mutex);
1246
        while (!vp->allocated && !is->videoq.abort_request) {
1247
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1248
        }
1249
        SDL_UnlockMutex(is->pictq_mutex);
1250

    
1251
        if (is->videoq.abort_request)
1252
            return -1;
1253
    }
1254

    
1255
    /* if the frame is not skipped, then display it */
1256
    if (vp->bmp) {
1257
        /* get a pointer on the bitmap */
1258
        SDL_LockYUVOverlay (vp->bmp);
1259

    
1260
        dst_pix_fmt = PIX_FMT_YUV420P;
1261
        pict.data[0] = vp->bmp->pixels[0];
1262
        pict.data[1] = vp->bmp->pixels[2];
1263
        pict.data[2] = vp->bmp->pixels[1];
1264

    
1265
        pict.linesize[0] = vp->bmp->pitches[0];
1266
        pict.linesize[1] = vp->bmp->pitches[2];
1267
        pict.linesize[2] = vp->bmp->pitches[1];
1268
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1269
            is->video_st->codec->width, is->video_st->codec->height,
1270
            is->video_st->codec->pix_fmt,
1271
            is->video_st->codec->width, is->video_st->codec->height,
1272
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1273
        if (img_convert_ctx == NULL) {
1274
            fprintf(stderr, "Cannot initialize the conversion context\n");
1275
            exit(1);
1276
        }
1277
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1278
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1279
        /* update the bitmap content */
1280
        SDL_UnlockYUVOverlay(vp->bmp);
1281

    
1282
        vp->pts = pts;
1283

    
1284
        /* now we can update the picture count */
1285
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1286
            is->pictq_windex = 0;
1287
        SDL_LockMutex(is->pictq_mutex);
1288
        is->pictq_size++;
1289
        SDL_UnlockMutex(is->pictq_mutex);
1290
    }
1291
    return 0;
1292
}
1293

    
1294
/**
1295
 * compute the exact PTS for the picture if it is omitted in the stream
1296
 * @param pts1 the dts of the pkt / pts of the frame
1297
 */
1298
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1299
{
1300
    double frame_delay, pts;
1301

    
1302
    pts = pts1;
1303

    
1304
    if (pts != 0) {
1305
        /* update video clock with pts, if present */
1306
        is->video_clock = pts;
1307
    } else {
1308
        pts = is->video_clock;
1309
    }
1310
    /* update video clock for next frame */
1311
    frame_delay = av_q2d(is->video_st->codec->time_base);
1312
    /* for MPEG2, the frame can be repeated, so we update the
1313
       clock accordingly */
1314
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1315
    is->video_clock += frame_delay;
1316

    
1317
#if defined(DEBUG_SYNC) && 0
1318
    {
1319
        int ftype;
1320
        if (src_frame->pict_type == FF_B_TYPE)
1321
            ftype = 'B';
1322
        else if (src_frame->pict_type == FF_I_TYPE)
1323
            ftype = 'I';
1324
        else
1325
            ftype = 'P';
1326
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1327
               ftype, pts, pts1);
1328
    }
1329
#endif
1330
    return queue_picture(is, src_frame, pts);
1331
}
1332

    
1333
static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1334

    
1335
static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1336
    int ret= avcodec_default_get_buffer(c, pic);
1337
    uint64_t *pts= av_malloc(sizeof(uint64_t));
1338
    *pts= global_video_pkt_pts;
1339
    pic->opaque= pts;
1340
    return ret;
1341
}
1342

    
1343
static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1344
    if(pic) av_freep(&pic->opaque);
1345
    avcodec_default_release_buffer(c, pic);
1346
}
1347

    
1348
static int video_thread(void *arg)
1349
{
1350
    VideoState *is = arg;
1351
    AVPacket pkt1, *pkt = &pkt1;
1352
    int len1, got_picture;
1353
    AVFrame *frame= avcodec_alloc_frame();
1354
    double pts;
1355

    
1356
    for(;;) {
1357
        while (is->paused && !is->videoq.abort_request) {
1358
            SDL_Delay(10);
1359
        }
1360
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1361
            break;
1362

    
1363
        if(pkt->data == flush_pkt.data){
1364
            avcodec_flush_buffers(is->video_st->codec);
1365
            continue;
1366
        }
1367

    
1368
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1369
           this packet, if any */
1370
        global_video_pkt_pts= pkt->pts;
1371
        len1 = avcodec_decode_video(is->video_st->codec,
1372
                                    frame, &got_picture,
1373
                                    pkt->data, pkt->size);
1374

    
1375
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1376
           && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1377
            pts= *(uint64_t*)frame->opaque;
1378
        else if(pkt->dts != AV_NOPTS_VALUE)
1379
            pts= pkt->dts;
1380
        else
1381
            pts= 0;
1382
        pts *= av_q2d(is->video_st->time_base);
1383

    
1384
//            if (len1 < 0)
1385
//                break;
1386
        if (got_picture) {
1387
            if (output_picture2(is, frame, pts) < 0)
1388
                goto the_end;
1389
        }
1390
        av_free_packet(pkt);
1391
        if (step)
1392
            if (cur_stream)
1393
                stream_pause(cur_stream);
1394
    }
1395
 the_end:
1396
    av_free(frame);
1397
    return 0;
1398
}
1399

    
1400
static int subtitle_thread(void *arg)
1401
{
1402
    VideoState *is = arg;
1403
    SubPicture *sp;
1404
    AVPacket pkt1, *pkt = &pkt1;
1405
    int len1, got_subtitle;
1406
    double pts;
1407
    int i, j;
1408
    int r, g, b, y, u, v, a;
1409

    
1410
    for(;;) {
1411
        while (is->paused && !is->subtitleq.abort_request) {
1412
            SDL_Delay(10);
1413
        }
1414
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1415
            break;
1416

    
1417
        if(pkt->data == flush_pkt.data){
1418
            avcodec_flush_buffers(is->subtitle_st->codec);
1419
            continue;
1420
        }
1421
        SDL_LockMutex(is->subpq_mutex);
1422
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1423
               !is->subtitleq.abort_request) {
1424
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1425
        }
1426
        SDL_UnlockMutex(is->subpq_mutex);
1427

    
1428
        if (is->subtitleq.abort_request)
1429
            goto the_end;
1430

    
1431
        sp = &is->subpq[is->subpq_windex];
1432

    
1433
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1434
           this packet, if any */
1435
        pts = 0;
1436
        if (pkt->pts != AV_NOPTS_VALUE)
1437
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1438

    
1439
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1440
                                    &sp->sub, &got_subtitle,
1441
                                    pkt->data, pkt->size);
1442
//            if (len1 < 0)
1443
//                break;
1444
        if (got_subtitle && sp->sub.format == 0) {
1445
            sp->pts = pts;
1446

    
1447
            for (i = 0; i < sp->sub.num_rects; i++)
1448
            {
1449
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1450
                {
1451
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1452
                    y = RGB_TO_Y_CCIR(r, g, b);
1453
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1454
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1455
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1456
                }
1457
            }
1458

    
1459
            /* now we can update the picture count */
1460
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1461
                is->subpq_windex = 0;
1462
            SDL_LockMutex(is->subpq_mutex);
1463
            is->subpq_size++;
1464
            SDL_UnlockMutex(is->subpq_mutex);
1465
        }
1466
        av_free_packet(pkt);
1467
//        if (step)
1468
//            if (cur_stream)
1469
//                stream_pause(cur_stream);
1470
    }
1471
 the_end:
1472
    return 0;
1473
}
1474

    
1475
/* copy samples for viewing in editor window */
1476
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1477
{
1478
    int size, len, channels;
1479

    
1480
    channels = is->audio_st->codec->channels;
1481

    
1482
    size = samples_size / sizeof(short);
1483
    while (size > 0) {
1484
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1485
        if (len > size)
1486
            len = size;
1487
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1488
        samples += len;
1489
        is->sample_array_index += len;
1490
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1491
            is->sample_array_index = 0;
1492
        size -= len;
1493
    }
1494
}
1495

    
1496
/* return the new audio buffer size (samples can be added or deleted
1497
   to get better sync if video or external master clock) */
1498
static int synchronize_audio(VideoState *is, short *samples,
1499
                             int samples_size1, double pts)
1500
{
1501
    int n, samples_size;
1502
    double ref_clock;
1503

    
1504
    n = 2 * is->audio_st->codec->channels;
1505
    samples_size = samples_size1;
1506

    
1507
    /* if not master, then we try to remove or add samples to correct the clock */
1508
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1509
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1510
        double diff, avg_diff;
1511
        int wanted_size, min_size, max_size, nb_samples;
1512

    
1513
        ref_clock = get_master_clock(is);
1514
        diff = get_audio_clock(is) - ref_clock;
1515

    
1516
        if (diff < AV_NOSYNC_THRESHOLD) {
1517
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1518
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1519
                /* not enough measures to have a correct estimate */
1520
                is->audio_diff_avg_count++;
1521
            } else {
1522
                /* estimate the A-V difference */
1523
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1524

    
1525
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1526
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1527
                    nb_samples = samples_size / n;
1528

    
1529
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1530
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1531
                    if (wanted_size < min_size)
1532
                        wanted_size = min_size;
1533
                    else if (wanted_size > max_size)
1534
                        wanted_size = max_size;
1535

    
1536
                    /* add or remove samples to correction the synchro */
1537
                    if (wanted_size < samples_size) {
1538
                        /* remove samples */
1539
                        samples_size = wanted_size;
1540
                    } else if (wanted_size > samples_size) {
1541
                        uint8_t *samples_end, *q;
1542
                        int nb;
1543

    
1544
                        /* add samples */
1545
                        nb = (samples_size - wanted_size);
1546
                        samples_end = (uint8_t *)samples + samples_size - n;
1547
                        q = samples_end + n;
1548
                        while (nb > 0) {
1549
                            memcpy(q, samples_end, n);
1550
                            q += n;
1551
                            nb -= n;
1552
                        }
1553
                        samples_size = wanted_size;
1554
                    }
1555
                }
1556
#if 0
1557
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1558
                       diff, avg_diff, samples_size - samples_size1,
1559
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1560
#endif
1561
            }
1562
        } else {
1563
            /* too big difference : may be initial PTS errors, so
1564
               reset A-V filter */
1565
            is->audio_diff_avg_count = 0;
1566
            is->audio_diff_cum = 0;
1567
        }
1568
    }
1569

    
1570
    return samples_size;
1571
}
1572

    
1573
/* decode one audio frame and returns its uncompressed size */
1574
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1575
{
1576
    AVPacket *pkt = &is->audio_pkt;
1577
    AVCodecContext *dec= is->audio_st->codec;
1578
    int n, len1, data_size;
1579
    double pts;
1580

    
1581
    for(;;) {
1582
        /* NOTE: the audio packet can contain several frames */
1583
        while (is->audio_pkt_size > 0) {
1584
            data_size = sizeof(is->audio_buf1);
1585
            len1 = avcodec_decode_audio2(dec,
1586
                                        (int16_t *)is->audio_buf1, &data_size,
1587
                                        is->audio_pkt_data, is->audio_pkt_size);
1588
            if (len1 < 0) {
1589
                /* if error, we skip the frame */
1590
                is->audio_pkt_size = 0;
1591
                break;
1592
            }
1593

    
1594
            is->audio_pkt_data += len1;
1595
            is->audio_pkt_size -= len1;
1596
            if (data_size <= 0)
1597
                continue;
1598

    
1599
            if (dec->sample_fmt != is->audio_src_fmt) {
1600
                if (is->reformat_ctx)
1601
                    av_audio_convert_free(is->reformat_ctx);
1602
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1603
                                                         dec->sample_fmt, 1, NULL, 0);
1604
                if (!is->reformat_ctx) {
1605
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1606
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1607
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1608
                        break;
1609
                }
1610
                is->audio_src_fmt= dec->sample_fmt;
1611
            }
1612

    
1613
            if (is->reformat_ctx) {
1614
                const void *ibuf[6]= {is->audio_buf1};
1615
                void *obuf[6]= {is->audio_buf2};
1616
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1617
                int ostride[6]= {2};
1618
                int len= data_size/istride[0];
1619
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1620
                    printf("av_audio_convert() failed\n");
1621
                    break;
1622
                }
1623
                is->audio_buf= is->audio_buf2;
1624
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1625
                          remove this legacy cruft */
1626
                data_size= len*2;
1627
            }else{
1628
                is->audio_buf= is->audio_buf1;
1629
            }
1630

    
1631
            /* if no pts, then compute it */
1632
            pts = is->audio_clock;
1633
            *pts_ptr = pts;
1634
            n = 2 * dec->channels;
1635
            is->audio_clock += (double)data_size /
1636
                (double)(n * dec->sample_rate);
1637
#if defined(DEBUG_SYNC)
1638
            {
1639
                static double last_clock;
1640
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1641
                       is->audio_clock - last_clock,
1642
                       is->audio_clock, pts);
1643
                last_clock = is->audio_clock;
1644
            }
1645
#endif
1646
            return data_size;
1647
        }
1648

    
1649
        /* free the current packet */
1650
        if (pkt->data)
1651
            av_free_packet(pkt);
1652

    
1653
        if (is->paused || is->audioq.abort_request) {
1654
            return -1;
1655
        }
1656

    
1657
        /* read next packet */
1658
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1659
            return -1;
1660
        if(pkt->data == flush_pkt.data){
1661
            avcodec_flush_buffers(dec);
1662
            continue;
1663
        }
1664

    
1665
        is->audio_pkt_data = pkt->data;
1666
        is->audio_pkt_size = pkt->size;
1667

    
1668
        /* if update the audio clock with the pts */
1669
        if (pkt->pts != AV_NOPTS_VALUE) {
1670
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1671
        }
1672
    }
1673
}
1674

    
1675
/* get the current audio output buffer size, in samples. With SDL, we
1676
   cannot have a precise information */
1677
static int audio_write_get_buf_size(VideoState *is)
1678
{
1679
    return is->audio_buf_size - is->audio_buf_index;
1680
}
1681

    
1682

    
1683
/* prepare a new audio buffer */
1684
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1685
{
1686
    VideoState *is = opaque;
1687
    int audio_size, len1;
1688
    double pts;
1689

    
1690
    audio_callback_time = av_gettime();
1691

    
1692
    while (len > 0) {
1693
        if (is->audio_buf_index >= is->audio_buf_size) {
1694
           audio_size = audio_decode_frame(is, &pts);
1695
           if (audio_size < 0) {
1696
                /* if error, just output silence */
1697
               is->audio_buf_size = 1024;
1698
               memset(is->audio_buf, 0, is->audio_buf_size);
1699
           } else {
1700
               if (is->show_audio)
1701
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1702
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1703
                                              pts);
1704
               is->audio_buf_size = audio_size;
1705
           }
1706
           is->audio_buf_index = 0;
1707
        }
1708
        len1 = is->audio_buf_size - is->audio_buf_index;
1709
        if (len1 > len)
1710
            len1 = len;
1711
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1712
        len -= len1;
1713
        stream += len1;
1714
        is->audio_buf_index += len1;
1715
    }
1716
}
1717

    
1718
/* open a given stream. Return 0 if OK */
1719
static int stream_component_open(VideoState *is, int stream_index)
1720
{
1721
    AVFormatContext *ic = is->ic;
1722
    AVCodecContext *enc;
1723
    AVCodec *codec;
1724
    SDL_AudioSpec wanted_spec, spec;
1725

    
1726
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1727
        return -1;
1728
    enc = ic->streams[stream_index]->codec;
1729

    
1730
    /* prepare audio output */
1731
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1732
        if (enc->channels > 0) {
1733
            enc->request_channels = FFMIN(2, enc->channels);
1734
        } else {
1735
            enc->request_channels = 2;
1736
        }
1737
    }
1738

    
1739
    codec = avcodec_find_decoder(enc->codec_id);
1740
    enc->debug_mv = debug_mv;
1741
    enc->debug = debug;
1742
    enc->workaround_bugs = workaround_bugs;
1743
    enc->lowres = lowres;
1744
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1745
    enc->idct_algo= idct;
1746
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1747
    enc->skip_frame= skip_frame;
1748
    enc->skip_idct= skip_idct;
1749
    enc->skip_loop_filter= skip_loop_filter;
1750
    enc->error_resilience= error_resilience;
1751
    enc->error_concealment= error_concealment;
1752
    if (!codec ||
1753
        avcodec_open(enc, codec) < 0)
1754
        return -1;
1755

    
1756
    /* prepare audio output */
1757
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1758
        wanted_spec.freq = enc->sample_rate;
1759
        wanted_spec.format = AUDIO_S16SYS;
1760
        wanted_spec.channels = enc->channels;
1761
        wanted_spec.silence = 0;
1762
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1763
        wanted_spec.callback = sdl_audio_callback;
1764
        wanted_spec.userdata = is;
1765
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1766
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1767
            return -1;
1768
        }
1769
        is->audio_hw_buf_size = spec.size;
1770
        is->audio_src_fmt= SAMPLE_FMT_S16;
1771
    }
1772

    
1773
    if(thread_count>1)
1774
        avcodec_thread_init(enc, thread_count);
1775
    enc->thread_count= thread_count;
1776
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1777
    switch(enc->codec_type) {
1778
    case CODEC_TYPE_AUDIO:
1779
        is->audio_stream = stream_index;
1780
        is->audio_st = ic->streams[stream_index];
1781
        is->audio_buf_size = 0;
1782
        is->audio_buf_index = 0;
1783

    
1784
        /* init averaging filter */
1785
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1786
        is->audio_diff_avg_count = 0;
1787
        /* since we do not have a precise anough audio fifo fullness,
1788
           we correct audio sync only if larger than this threshold */
1789
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1790

    
1791
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1792
        packet_queue_init(&is->audioq);
1793
        SDL_PauseAudio(0);
1794
        break;
1795
    case CODEC_TYPE_VIDEO:
1796
        is->video_stream = stream_index;
1797
        is->video_st = ic->streams[stream_index];
1798

    
1799
        is->frame_last_delay = 40e-3;
1800
        is->frame_timer = (double)av_gettime() / 1000000.0;
1801
        is->video_current_pts_time = av_gettime();
1802

    
1803
        packet_queue_init(&is->videoq);
1804
        is->video_tid = SDL_CreateThread(video_thread, is);
1805

    
1806
        enc->    get_buffer=     my_get_buffer;
1807
        enc->release_buffer= my_release_buffer;
1808
        break;
1809
    case CODEC_TYPE_SUBTITLE:
1810
        is->subtitle_stream = stream_index;
1811
        is->subtitle_st = ic->streams[stream_index];
1812
        packet_queue_init(&is->subtitleq);
1813

    
1814
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1815
        break;
1816
    default:
1817
        break;
1818
    }
1819
    return 0;
1820
}
1821

    
1822
static void stream_component_close(VideoState *is, int stream_index)
1823
{
1824
    AVFormatContext *ic = is->ic;
1825
    AVCodecContext *enc;
1826

    
1827
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1828
        return;
1829
    enc = ic->streams[stream_index]->codec;
1830

    
1831
    switch(enc->codec_type) {
1832
    case CODEC_TYPE_AUDIO:
1833
        packet_queue_abort(&is->audioq);
1834

    
1835
        SDL_CloseAudio();
1836

    
1837
        packet_queue_end(&is->audioq);
1838
        if (is->reformat_ctx)
1839
            av_audio_convert_free(is->reformat_ctx);
1840
        break;
1841
    case CODEC_TYPE_VIDEO:
1842
        packet_queue_abort(&is->videoq);
1843

    
1844
        /* note: we also signal this mutex to make sure we deblock the
1845
           video thread in all cases */
1846
        SDL_LockMutex(is->pictq_mutex);
1847
        SDL_CondSignal(is->pictq_cond);
1848
        SDL_UnlockMutex(is->pictq_mutex);
1849

    
1850
        SDL_WaitThread(is->video_tid, NULL);
1851

    
1852
        packet_queue_end(&is->videoq);
1853
        break;
1854
    case CODEC_TYPE_SUBTITLE:
1855
        packet_queue_abort(&is->subtitleq);
1856

    
1857
        /* note: we also signal this mutex to make sure we deblock the
1858
           video thread in all cases */
1859
        SDL_LockMutex(is->subpq_mutex);
1860
        is->subtitle_stream_changed = 1;
1861

    
1862
        SDL_CondSignal(is->subpq_cond);
1863
        SDL_UnlockMutex(is->subpq_mutex);
1864

    
1865
        SDL_WaitThread(is->subtitle_tid, NULL);
1866

    
1867
        packet_queue_end(&is->subtitleq);
1868
        break;
1869
    default:
1870
        break;
1871
    }
1872

    
1873
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1874
    avcodec_close(enc);
1875
    switch(enc->codec_type) {
1876
    case CODEC_TYPE_AUDIO:
1877
        is->audio_st = NULL;
1878
        is->audio_stream = -1;
1879
        break;
1880
    case CODEC_TYPE_VIDEO:
1881
        is->video_st = NULL;
1882
        is->video_stream = -1;
1883
        break;
1884
    case CODEC_TYPE_SUBTITLE:
1885
        is->subtitle_st = NULL;
1886
        is->subtitle_stream = -1;
1887
        break;
1888
    default:
1889
        break;
1890
    }
1891
}
1892

    
1893
static void dump_stream_info(const AVFormatContext *s)
1894
{
1895
    if (s->track != 0)
1896
        fprintf(stderr, "Track: %d\n", s->track);
1897
    if (s->title[0] != '\0')
1898
        fprintf(stderr, "Title: %s\n", s->title);
1899
    if (s->author[0] != '\0')
1900
        fprintf(stderr, "Author: %s\n", s->author);
1901
    if (s->copyright[0] != '\0')
1902
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1903
    if (s->comment[0] != '\0')
1904
        fprintf(stderr, "Comment: %s\n", s->comment);
1905
    if (s->album[0] != '\0')
1906
        fprintf(stderr, "Album: %s\n", s->album);
1907
    if (s->year != 0)
1908
        fprintf(stderr, "Year: %d\n", s->year);
1909
    if (s->genre[0] != '\0')
1910
        fprintf(stderr, "Genre: %s\n", s->genre);
1911
}
1912

    
1913
/* since we have only one decoding thread, we can use a global
1914
   variable instead of a thread local variable */
1915
static VideoState *global_video_state;
1916

    
1917
static int decode_interrupt_cb(void)
1918
{
1919
    return (global_video_state && global_video_state->abort_request);
1920
}
1921

    
1922
/* this thread gets the stream from the disk or the network */
1923
static int decode_thread(void *arg)
1924
{
1925
    VideoState *is = arg;
1926
    AVFormatContext *ic;
1927
    int err, i, ret, video_index, audio_index;
1928
    AVPacket pkt1, *pkt = &pkt1;
1929
    AVFormatParameters params, *ap = &params;
1930

    
1931
    video_index = -1;
1932
    audio_index = -1;
1933
    is->video_stream = -1;
1934
    is->audio_stream = -1;
1935
    is->subtitle_stream = -1;
1936

    
1937
    global_video_state = is;
1938
    url_set_interrupt_cb(decode_interrupt_cb);
1939

    
1940
    memset(ap, 0, sizeof(*ap));
1941

    
1942
    ap->width = frame_width;
1943
    ap->height= frame_height;
1944
    ap->time_base= (AVRational){1, 25};
1945
    ap->pix_fmt = frame_pix_fmt;
1946

    
1947
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1948
    if (err < 0) {
1949
        print_error(is->filename, err);
1950
        ret = -1;
1951
        goto fail;
1952
    }
1953
    is->ic = ic;
1954

    
1955
    if(genpts)
1956
        ic->flags |= AVFMT_FLAG_GENPTS;
1957

    
1958
    err = av_find_stream_info(ic);
1959
    if (err < 0) {
1960
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1961
        ret = -1;
1962
        goto fail;
1963
    }
1964
    if(ic->pb)
1965
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1966

    
1967
    /* if seeking requested, we execute it */
1968
    if (start_time != AV_NOPTS_VALUE) {
1969
        int64_t timestamp;
1970

    
1971
        timestamp = start_time;
1972
        /* add the stream start time */
1973
        if (ic->start_time != AV_NOPTS_VALUE)
1974
            timestamp += ic->start_time;
1975
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1976
        if (ret < 0) {
1977
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1978
                    is->filename, (double)timestamp / AV_TIME_BASE);
1979
        }
1980
    }
1981

    
1982
    for(i = 0; i < ic->nb_streams; i++) {
1983
        AVCodecContext *enc = ic->streams[i]->codec;
1984
        ic->streams[i]->discard = AVDISCARD_ALL;
1985
        switch(enc->codec_type) {
1986
        case CODEC_TYPE_AUDIO:
1987
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1988
                audio_index = i;
1989
            break;
1990
        case CODEC_TYPE_VIDEO:
1991
            if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1992
                video_index = i;
1993
            break;
1994
        default:
1995
            break;
1996
        }
1997
    }
1998
    if (show_status) {
1999
        dump_format(ic, 0, is->filename, 0);
2000
        dump_stream_info(ic);
2001
    }
2002

    
2003
    /* open the streams */
2004
    if (audio_index >= 0) {
2005
        stream_component_open(is, audio_index);
2006
    }
2007

    
2008
    if (video_index >= 0) {
2009
        stream_component_open(is, video_index);
2010
    } else {
2011
        if (!display_disable)
2012
            is->show_audio = 1;
2013
    }
2014

    
2015
    if (is->video_stream < 0 && is->audio_stream < 0) {
2016
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2017
        ret = -1;
2018
        goto fail;
2019
    }
2020

    
2021
    for(;;) {
2022
        if (is->abort_request)
2023
            break;
2024
        if (is->paused != is->last_paused) {
2025
            is->last_paused = is->paused;
2026
            if (is->paused)
2027
                av_read_pause(ic);
2028
            else
2029
                av_read_play(ic);
2030
        }
2031
#if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2032
        if (is->paused &&
2033
                (!strcmp(ic->iformat->name, "rtsp") ||
2034
                 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2035
            /* wait 10 ms to avoid trying to get another packet */
2036
            /* XXX: horrible */
2037
            SDL_Delay(10);
2038
            continue;
2039
        }
2040
#endif
2041
        if (is->seek_req) {
2042
            int stream_index= -1;
2043
            int64_t seek_target= is->seek_pos;
2044

    
2045
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
2046
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
2047
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2048

    
2049
            if(stream_index>=0){
2050
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2051
            }
2052

    
2053
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2054
            if (ret < 0) {
2055
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2056
            }else{
2057
                if (is->audio_stream >= 0) {
2058
                    packet_queue_flush(&is->audioq);
2059
                    packet_queue_put(&is->audioq, &flush_pkt);
2060
                }
2061
                if (is->subtitle_stream >= 0) {
2062
                    packet_queue_flush(&is->subtitleq);
2063
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2064
                }
2065
                if (is->video_stream >= 0) {
2066
                    packet_queue_flush(&is->videoq);
2067
                    packet_queue_put(&is->videoq, &flush_pkt);
2068
                }
2069
            }
2070
            is->seek_req = 0;
2071
        }
2072

    
2073
        /* if the queue are full, no need to read more */
2074
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2075
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2076
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2077
            url_feof(ic->pb)) {
2078
            /* wait 10 ms */
2079
            SDL_Delay(10);
2080
            continue;
2081
        }
2082
        ret = av_read_frame(ic, pkt);
2083
        if (ret < 0) {
2084
            if (url_ferror(ic->pb) == 0) {
2085
                SDL_Delay(100); /* wait for user event */
2086
                continue;
2087
            } else
2088
                break;
2089
        }
2090
        if (pkt->stream_index == is->audio_stream) {
2091
            packet_queue_put(&is->audioq, pkt);
2092
        } else if (pkt->stream_index == is->video_stream) {
2093
            packet_queue_put(&is->videoq, pkt);
2094
        } else if (pkt->stream_index == is->subtitle_stream) {
2095
            packet_queue_put(&is->subtitleq, pkt);
2096
        } else {
2097
            av_free_packet(pkt);
2098
        }
2099
    }
2100
    /* wait until the end */
2101
    while (!is->abort_request) {
2102
        SDL_Delay(100);
2103
    }
2104

    
2105
    ret = 0;
2106
 fail:
2107
    /* disable interrupting */
2108
    global_video_state = NULL;
2109

    
2110
    /* close each stream */
2111
    if (is->audio_stream >= 0)
2112
        stream_component_close(is, is->audio_stream);
2113
    if (is->video_stream >= 0)
2114
        stream_component_close(is, is->video_stream);
2115
    if (is->subtitle_stream >= 0)
2116
        stream_component_close(is, is->subtitle_stream);
2117
    if (is->ic) {
2118
        av_close_input_file(is->ic);
2119
        is->ic = NULL; /* safety */
2120
    }
2121
    url_set_interrupt_cb(NULL);
2122

    
2123
    if (ret != 0) {
2124
        SDL_Event event;
2125

    
2126
        event.type = FF_QUIT_EVENT;
2127
        event.user.data1 = is;
2128
        SDL_PushEvent(&event);
2129
    }
2130
    return 0;
2131
}
2132

    
2133
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2134
{
2135
    VideoState *is;
2136

    
2137
    is = av_mallocz(sizeof(VideoState));
2138
    if (!is)
2139
        return NULL;
2140
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2141
    is->iformat = iformat;
2142
    is->ytop = 0;
2143
    is->xleft = 0;
2144

    
2145
    /* start video display */
2146
    is->pictq_mutex = SDL_CreateMutex();
2147
    is->pictq_cond = SDL_CreateCond();
2148

    
2149
    is->subpq_mutex = SDL_CreateMutex();
2150
    is->subpq_cond = SDL_CreateCond();
2151

    
2152
    /* add the refresh timer to draw the picture */
2153
    schedule_refresh(is, 40);
2154

    
2155
    is->av_sync_type = av_sync_type;
2156
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2157
    if (!is->parse_tid) {
2158
        av_free(is);
2159
        return NULL;
2160
    }
2161
    return is;
2162
}
2163

    
2164
static void stream_close(VideoState *is)
2165
{
2166
    VideoPicture *vp;
2167
    int i;
2168
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2169
    is->abort_request = 1;
2170
    SDL_WaitThread(is->parse_tid, NULL);
2171

    
2172
    /* free all pictures */
2173
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2174
        vp = &is->pictq[i];
2175
        if (vp->bmp) {
2176
            SDL_FreeYUVOverlay(vp->bmp);
2177
            vp->bmp = NULL;
2178
        }
2179
    }
2180
    SDL_DestroyMutex(is->pictq_mutex);
2181
    SDL_DestroyCond(is->pictq_cond);
2182
    SDL_DestroyMutex(is->subpq_mutex);
2183
    SDL_DestroyCond(is->subpq_cond);
2184
}
2185

    
2186
static void stream_cycle_channel(VideoState *is, int codec_type)
2187
{
2188
    AVFormatContext *ic = is->ic;
2189
    int start_index, stream_index;
2190
    AVStream *st;
2191

    
2192
    if (codec_type == CODEC_TYPE_VIDEO)
2193
        start_index = is->video_stream;
2194
    else if (codec_type == CODEC_TYPE_AUDIO)
2195
        start_index = is->audio_stream;
2196
    else
2197
        start_index = is->subtitle_stream;
2198
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2199
        return;
2200
    stream_index = start_index;
2201
    for(;;) {
2202
        if (++stream_index >= is->ic->nb_streams)
2203
        {
2204
            if (codec_type == CODEC_TYPE_SUBTITLE)
2205
            {
2206
                stream_index = -1;
2207
                goto the_end;
2208
            } else
2209
                stream_index = 0;
2210
        }
2211
        if (stream_index == start_index)
2212
            return;
2213
        st = ic->streams[stream_index];
2214
        if (st->codec->codec_type == codec_type) {
2215
            /* check that parameters are OK */
2216
            switch(codec_type) {
2217
            case CODEC_TYPE_AUDIO:
2218
                if (st->codec->sample_rate != 0 &&
2219
                    st->codec->channels != 0)
2220
                    goto the_end;
2221
                break;
2222
            case CODEC_TYPE_VIDEO:
2223
            case CODEC_TYPE_SUBTITLE:
2224
                goto the_end;
2225
            default:
2226
                break;
2227
            }
2228
        }
2229
    }
2230
 the_end:
2231
    stream_component_close(is, start_index);
2232
    stream_component_open(is, stream_index);
2233
}
2234

    
2235

    
2236
static void toggle_full_screen(void)
2237
{
2238
    is_full_screen = !is_full_screen;
2239
    if (!fs_screen_width) {
2240
        /* use default SDL method */
2241
//        SDL_WM_ToggleFullScreen(screen);
2242
    }
2243
    video_open(cur_stream);
2244
}
2245

    
2246
static void toggle_pause(void)
2247
{
2248
    if (cur_stream)
2249
        stream_pause(cur_stream);
2250
    step = 0;
2251
}
2252

    
2253
static void step_to_next_frame(void)
2254
{
2255
    if (cur_stream) {
2256
        /* if the stream is paused unpause it, then step */
2257
        if (cur_stream->paused)
2258
            stream_pause(cur_stream);
2259
    }
2260
    step = 1;
2261
}
2262

    
2263
static void do_exit(void)
2264
{
2265
    if (cur_stream) {
2266
        stream_close(cur_stream);
2267
        cur_stream = NULL;
2268
    }
2269
    if (show_status)
2270
        printf("\n");
2271
    SDL_Quit();
2272
    exit(0);
2273
}
2274

    
2275
static void toggle_audio_display(void)
2276
{
2277
    if (cur_stream) {
2278
        cur_stream->show_audio = !cur_stream->show_audio;
2279
    }
2280
}
2281

    
2282
/* handle an event sent by the GUI */
2283
static void event_loop(void)
2284
{
2285
    SDL_Event event;
2286
    double incr, pos, frac;
2287

    
2288
    for(;;) {
2289
        SDL_WaitEvent(&event);
2290
        switch(event.type) {
2291
        case SDL_KEYDOWN:
2292
            switch(event.key.keysym.sym) {
2293
            case SDLK_ESCAPE:
2294
            case SDLK_q:
2295
                do_exit();
2296
                break;
2297
            case SDLK_f:
2298
                toggle_full_screen();
2299
                break;
2300
            case SDLK_p:
2301
            case SDLK_SPACE:
2302
                toggle_pause();
2303
                break;
2304
            case SDLK_s: //S: Step to next frame
2305
                step_to_next_frame();
2306
                break;
2307
            case SDLK_a:
2308
                if (cur_stream)
2309
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2310
                break;
2311
            case SDLK_v:
2312
                if (cur_stream)
2313
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2314
                break;
2315
            case SDLK_t:
2316
                if (cur_stream)
2317
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2318
                break;
2319
            case SDLK_w:
2320
                toggle_audio_display();
2321
                break;
2322
            case SDLK_LEFT:
2323
                incr = -10.0;
2324
                goto do_seek;
2325
            case SDLK_RIGHT:
2326
                incr = 10.0;
2327
                goto do_seek;
2328
            case SDLK_UP:
2329
                incr = 60.0;
2330
                goto do_seek;
2331
            case SDLK_DOWN:
2332
                incr = -60.0;
2333
            do_seek:
2334
                if (cur_stream) {
2335
                    if (seek_by_bytes) {
2336
                        pos = url_ftell(cur_stream->ic->pb);
2337
                        if (cur_stream->ic->bit_rate)
2338
                            incr *= cur_stream->ic->bit_rate / 60.0;
2339
                        else
2340
                            incr *= 180000.0;
2341
                        pos += incr;
2342
                        stream_seek(cur_stream, pos, incr);
2343
                    } else {
2344
                        pos = get_master_clock(cur_stream);
2345
                        pos += incr;
2346
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2347
                    }
2348
                }
2349
                break;
2350
            default:
2351
                break;
2352
            }
2353
            break;
2354
        case SDL_MOUSEBUTTONDOWN:
2355
            if (cur_stream) {
2356
                int ns, hh, mm, ss;
2357
                int tns, thh, tmm, tss;
2358
                tns = cur_stream->ic->duration/1000000LL;
2359
                thh = tns/3600;
2360
                tmm = (tns%3600)/60;
2361
                tss = (tns%60);
2362
                frac = (double)event.button.x/(double)cur_stream->width;
2363
                ns = frac*tns;
2364
                hh = ns/3600;
2365
                mm = (ns%3600)/60;
2366
                ss = (ns%60);
2367
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2368
                        hh, mm, ss, thh, tmm, tss);
2369
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2370
            }
2371
            break;
2372
        case SDL_VIDEORESIZE:
2373
            if (cur_stream) {
2374
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2375
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2376
                screen_width = cur_stream->width = event.resize.w;
2377
                screen_height= cur_stream->height= event.resize.h;
2378
            }
2379
            break;
2380
        case SDL_QUIT:
2381
        case FF_QUIT_EVENT:
2382
            do_exit();
2383
            break;
2384
        case FF_ALLOC_EVENT:
2385
            video_open(event.user.data1);
2386
            alloc_picture(event.user.data1);
2387
            break;
2388
        case FF_REFRESH_EVENT:
2389
            video_refresh_timer(event.user.data1);
2390
            break;
2391
        default:
2392
            break;
2393
        }
2394
    }
2395
}
2396

    
2397
static void opt_frame_size(const char *arg)
2398
{
2399
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2400
        fprintf(stderr, "Incorrect frame size\n");
2401
        exit(1);
2402
    }
2403
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2404
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2405
        exit(1);
2406
    }
2407
}
2408

    
2409
static int opt_width(const char *opt, const char *arg)
2410
{
2411
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2412
    return 0;
2413
}
2414

    
2415
static int opt_height(const char *opt, const char *arg)
2416
{
2417
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2418
    return 0;
2419
}
2420

    
2421
static void opt_format(const char *arg)
2422
{
2423
    file_iformat = av_find_input_format(arg);
2424
    if (!file_iformat) {
2425
        fprintf(stderr, "Unknown input format: %s\n", arg);
2426
        exit(1);
2427
    }
2428
}
2429

    
2430
static void opt_frame_pix_fmt(const char *arg)
2431
{
2432
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2433
}
2434

    
2435
static int opt_sync(const char *opt, const char *arg)
2436
{
2437
    if (!strcmp(arg, "audio"))
2438
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2439
    else if (!strcmp(arg, "video"))
2440
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2441
    else if (!strcmp(arg, "ext"))
2442
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2443
    else {
2444
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2445
        exit(1);
2446
    }
2447
    return 0;
2448
}
2449

    
2450
static int opt_seek(const char *opt, const char *arg)
2451
{
2452
    start_time = parse_time_or_die(opt, arg, 1);
2453
    return 0;
2454
}
2455

    
2456
static int opt_debug(const char *opt, const char *arg)
2457
{
2458
    av_log_set_level(99);
2459
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2460
    return 0;
2461
}
2462

    
2463
static int opt_vismv(const char *opt, const char *arg)
2464
{
2465
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2466
    return 0;
2467
}
2468

    
2469
static int opt_thread_count(const char *opt, const char *arg)
2470
{
2471
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2472
#if !defined(HAVE_THREADS)
2473
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2474
#endif
2475
    return 0;
2476
}
2477

    
2478
static const OptionDef options[] = {
2479
    { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2480
    { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2481
    { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2482
    { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2483
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2484
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2485
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2486
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2487
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2488
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2489
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2490
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2491
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2492
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2493
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2494
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2495
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2496
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2497
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2498
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2499
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2500
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2501
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2502
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2503
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2504
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2505
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2506
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2507
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2508
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2509
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2510
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2511
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2512
    { NULL, },
2513
};
2514

    
2515
static void show_help(void)
2516
{
2517
    printf("usage: ffplay [options] input_file\n"
2518
           "Simple media player\n");
2519
    printf("\n");
2520
    show_help_options(options, "Main options:\n",
2521
                      OPT_EXPERT, 0);
2522
    show_help_options(options, "\nAdvanced options:\n",
2523
                      OPT_EXPERT, OPT_EXPERT);
2524
    printf("\nWhile playing:\n"
2525
           "q, ESC              quit\n"
2526
           "f                   toggle full screen\n"
2527
           "p, SPC              pause\n"
2528
           "a                   cycle audio channel\n"
2529
           "v                   cycle video channel\n"
2530
           "t                   cycle subtitle channel\n"
2531
           "w                   show audio waves\n"
2532
           "left/right          seek backward/forward 10 seconds\n"
2533
           "down/up             seek backward/forward 1 minute\n"
2534
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2535
           );
2536
}
2537

    
2538
static void opt_input_file(const char *filename)
2539
{
2540
    if (!strcmp(filename, "-"))
2541
        filename = "pipe:";
2542
    input_filename = filename;
2543
}
2544

    
2545
/* Called from the main */
2546
int main(int argc, char **argv)
2547
{
2548
    int flags;
2549

    
2550
    /* register all codecs, demux and protocols */
2551
    avcodec_register_all();
2552
    avdevice_register_all();
2553
    av_register_all();
2554

    
2555
    show_banner();
2556

    
2557
    parse_options(argc, argv, options, opt_input_file);
2558

    
2559
    if (!input_filename) {
2560
        show_help();
2561
        exit(1);
2562
    }
2563

    
2564
    if (display_disable) {
2565
        video_disable = 1;
2566
    }
2567
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2568
#if !defined(__MINGW32__) && !defined(__APPLE__)
2569
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2570
#endif
2571
    if (SDL_Init (flags)) {
2572
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2573
        exit(1);
2574
    }
2575

    
2576
    if (!display_disable) {
2577
#ifdef HAVE_SDL_VIDEO_SIZE
2578
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2579
        fs_screen_width = vi->current_w;
2580
        fs_screen_height = vi->current_h;
2581
#endif
2582
    }
2583

    
2584
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2585
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2586
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2587
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2588

    
2589
    av_init_packet(&flush_pkt);
2590
    flush_pkt.data= "FLUSH";
2591

    
2592
    cur_stream = stream_open(input_filename, file_iformat);
2593

    
2594
    event_loop();
2595

    
2596
    /* never returns */
2597

    
2598
    return 0;
2599
}