Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 26534fe8

History | View | Annotate | Download (79.3 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "libavutil/avstring.h"
25
#include "libavformat/avformat.h"
26
#include "libavformat/rtsp.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30
#include "libavcodec/opt.h"
31

    
32
#include "cmdutils.h"
33

    
34
#include <SDL.h>
35
#include <SDL_thread.h>
36

    
37
#ifdef __MINGW32__
38
#undef main /* We don't want SDL to override our main() */
39
#endif
40

    
41
#undef exit
42

    
43
const char program_name[] = "FFplay";
44
const int program_birth_year = 2003;
45

    
46
//#define DEBUG_SYNC
47

    
48
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51

    
52
/* SDL audio buffer size, in samples. Should be small to have precise
53
   A/V sync as SDL does not have hardware buffer fullness info. */
54
#define SDL_AUDIO_BUFFER_SIZE 1024
55

    
56
/* no AV sync correction is done if below the AV sync threshold */
57
#define AV_SYNC_THRESHOLD 0.01
58
/* no AV correction is done if too big error */
59
#define AV_NOSYNC_THRESHOLD 10.0
60

    
61
/* maximum audio speed change to get correct sync */
62
#define SAMPLE_CORRECTION_PERCENT_MAX 10
63

    
64
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65
#define AUDIO_DIFF_AVG_NB   20
66

    
67
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68
#define SAMPLE_ARRAY_SIZE (2*65536)
69

    
70
static int sws_flags = SWS_BICUBIC;
71

    
72
typedef struct PacketQueue {
73
    AVPacketList *first_pkt, *last_pkt;
74
    int nb_packets;
75
    int size;
76
    int abort_request;
77
    SDL_mutex *mutex;
78
    SDL_cond *cond;
79
} PacketQueue;
80

    
81
#define VIDEO_PICTURE_QUEUE_SIZE 1
82
#define SUBPICTURE_QUEUE_SIZE 4
83

    
84
typedef struct VideoPicture {
85
    double pts;                                  ///<presentation time stamp for this picture
86
    SDL_Overlay *bmp;
87
    int width, height; /* source height & width */
88
    int allocated;
89
} VideoPicture;
90

    
91
typedef struct SubPicture {
92
    double pts; /* presentation time stamp for this picture */
93
    AVSubtitle sub;
94
} SubPicture;
95

    
96
enum {
97
    AV_SYNC_AUDIO_MASTER, /* default choice */
98
    AV_SYNC_VIDEO_MASTER,
99
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100
};
101

    
102
typedef struct VideoState {
103
    SDL_Thread *parse_tid;
104
    SDL_Thread *video_tid;
105
    AVInputFormat *iformat;
106
    int no_background;
107
    int abort_request;
108
    int paused;
109
    int last_paused;
110
    int seek_req;
111
    int seek_flags;
112
    int64_t seek_pos;
113
    int64_t seek_rel;
114
    AVFormatContext *ic;
115
    int dtg_active_format;
116

    
117
    int audio_stream;
118

    
119
    int av_sync_type;
120
    double external_clock; /* external clock base */
121
    int64_t external_clock_time;
122

    
123
    double audio_clock;
124
    double audio_diff_cum; /* used for AV difference average computation */
125
    double audio_diff_avg_coef;
126
    double audio_diff_threshold;
127
    int audio_diff_avg_count;
128
    AVStream *audio_st;
129
    PacketQueue audioq;
130
    int audio_hw_buf_size;
131
    /* samples output by the codec. we reserve more space for avsync
132
       compensation */
133
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
135
    uint8_t *audio_buf;
136
    unsigned int audio_buf_size; /* in bytes */
137
    int audio_buf_index; /* in bytes */
138
    AVPacket audio_pkt_temp;
139
    AVPacket audio_pkt;
140
    enum SampleFormat audio_src_fmt;
141
    AVAudioConvert *reformat_ctx;
142

    
143
    int show_audio; /* if true, display audio samples */
144
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
145
    int sample_array_index;
146
    int last_i_start;
147

    
148
    SDL_Thread *subtitle_tid;
149
    int subtitle_stream;
150
    int subtitle_stream_changed;
151
    AVStream *subtitle_st;
152
    PacketQueue subtitleq;
153
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154
    int subpq_size, subpq_rindex, subpq_windex;
155
    SDL_mutex *subpq_mutex;
156
    SDL_cond *subpq_cond;
157

    
158
    double frame_timer;
159
    double frame_last_pts;
160
    double frame_last_delay;
161
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
162
    int video_stream;
163
    AVStream *video_st;
164
    PacketQueue videoq;
165
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
166
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168
    int pictq_size, pictq_rindex, pictq_windex;
169
    SDL_mutex *pictq_mutex;
170
    SDL_cond *pictq_cond;
171

    
172
    //    QETimer *video_timer;
173
    char filename[1024];
174
    int width, height, xleft, ytop;
175
} VideoState;
176

    
177
static void show_help(void);
178
static int audio_write_get_buf_size(VideoState *is);
179

    
180
/* options specified by the user */
181
static AVInputFormat *file_iformat;
182
static const char *input_filename;
183
static int fs_screen_width;
184
static int fs_screen_height;
185
static int screen_width = 0;
186
static int screen_height = 0;
187
static int frame_width = 0;
188
static int frame_height = 0;
189
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190
static int audio_disable;
191
static int video_disable;
192
static int wanted_audio_stream= 0;
193
static int wanted_video_stream= 0;
194
static int wanted_subtitle_stream= -1;
195
static int seek_by_bytes;
196
static int display_disable;
197
static int show_status;
198
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
199
static int64_t start_time = AV_NOPTS_VALUE;
200
static int debug = 0;
201
static int debug_mv = 0;
202
static int step = 0;
203
static int thread_count = 1;
204
static int workaround_bugs = 1;
205
static int fast = 0;
206
static int genpts = 0;
207
static int lowres = 0;
208
static int idct = FF_IDCT_AUTO;
209
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
210
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
211
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
212
static int error_recognition = FF_ER_CAREFUL;
213
static int error_concealment = 3;
214
static int decoder_reorder_pts= 0;
215

    
216
/* current context */
217
static int is_full_screen;
218
static VideoState *cur_stream;
219
static int64_t audio_callback_time;
220

    
221
static AVPacket flush_pkt;
222

    
223
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
224
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
225
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
226

    
227
static SDL_Surface *screen;
228

    
229
/* packet queue handling */
230
static void packet_queue_init(PacketQueue *q)
231
{
232
    memset(q, 0, sizeof(PacketQueue));
233
    q->mutex = SDL_CreateMutex();
234
    q->cond = SDL_CreateCond();
235
}
236

    
237
static void packet_queue_flush(PacketQueue *q)
238
{
239
    AVPacketList *pkt, *pkt1;
240

    
241
    SDL_LockMutex(q->mutex);
242
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
243
        pkt1 = pkt->next;
244
        av_free_packet(&pkt->pkt);
245
        av_freep(&pkt);
246
    }
247
    q->last_pkt = NULL;
248
    q->first_pkt = NULL;
249
    q->nb_packets = 0;
250
    q->size = 0;
251
    SDL_UnlockMutex(q->mutex);
252
}
253

    
254
static void packet_queue_end(PacketQueue *q)
255
{
256
    packet_queue_flush(q);
257
    SDL_DestroyMutex(q->mutex);
258
    SDL_DestroyCond(q->cond);
259
}
260

    
261
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
262
{
263
    AVPacketList *pkt1;
264

    
265
    /* duplicate the packet */
266
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
267
        return -1;
268

    
269
    pkt1 = av_malloc(sizeof(AVPacketList));
270
    if (!pkt1)
271
        return -1;
272
    pkt1->pkt = *pkt;
273
    pkt1->next = NULL;
274

    
275

    
276
    SDL_LockMutex(q->mutex);
277

    
278
    if (!q->last_pkt)
279

    
280
        q->first_pkt = pkt1;
281
    else
282
        q->last_pkt->next = pkt1;
283
    q->last_pkt = pkt1;
284
    q->nb_packets++;
285
    q->size += pkt1->pkt.size + sizeof(*pkt1);
286
    /* XXX: should duplicate packet data in DV case */
287
    SDL_CondSignal(q->cond);
288

    
289
    SDL_UnlockMutex(q->mutex);
290
    return 0;
291
}
292

    
293
static void packet_queue_abort(PacketQueue *q)
294
{
295
    SDL_LockMutex(q->mutex);
296

    
297
    q->abort_request = 1;
298

    
299
    SDL_CondSignal(q->cond);
300

    
301
    SDL_UnlockMutex(q->mutex);
302
}
303

    
304
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
305
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
306
{
307
    AVPacketList *pkt1;
308
    int ret;
309

    
310
    SDL_LockMutex(q->mutex);
311

    
312
    for(;;) {
313
        if (q->abort_request) {
314
            ret = -1;
315
            break;
316
        }
317

    
318
        pkt1 = q->first_pkt;
319
        if (pkt1) {
320
            q->first_pkt = pkt1->next;
321
            if (!q->first_pkt)
322
                q->last_pkt = NULL;
323
            q->nb_packets--;
324
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
325
            *pkt = pkt1->pkt;
326
            av_free(pkt1);
327
            ret = 1;
328
            break;
329
        } else if (!block) {
330
            ret = 0;
331
            break;
332
        } else {
333
            SDL_CondWait(q->cond, q->mutex);
334
        }
335
    }
336
    SDL_UnlockMutex(q->mutex);
337
    return ret;
338
}
339

    
340
static inline void fill_rectangle(SDL_Surface *screen,
341
                                  int x, int y, int w, int h, int color)
342
{
343
    SDL_Rect rect;
344
    rect.x = x;
345
    rect.y = y;
346
    rect.w = w;
347
    rect.h = h;
348
    SDL_FillRect(screen, &rect, color);
349
}
350

    
351
#if 0
352
/* draw only the border of a rectangle */
353
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
354
{
355
    int w1, w2, h1, h2;
356

357
    /* fill the background */
358
    w1 = x;
359
    if (w1 < 0)
360
        w1 = 0;
361
    w2 = s->width - (x + w);
362
    if (w2 < 0)
363
        w2 = 0;
364
    h1 = y;
365
    if (h1 < 0)
366
        h1 = 0;
367
    h2 = s->height - (y + h);
368
    if (h2 < 0)
369
        h2 = 0;
370
    fill_rectangle(screen,
371
                   s->xleft, s->ytop,
372
                   w1, s->height,
373
                   color);
374
    fill_rectangle(screen,
375
                   s->xleft + s->width - w2, s->ytop,
376
                   w2, s->height,
377
                   color);
378
    fill_rectangle(screen,
379
                   s->xleft + w1, s->ytop,
380
                   s->width - w1 - w2, h1,
381
                   color);
382
    fill_rectangle(screen,
383
                   s->xleft + w1, s->ytop + s->height - h2,
384
                   s->width - w1 - w2, h2,
385
                   color);
386
}
387
#endif
388

    
389

    
390

    
391
#define SCALEBITS 10
392
#define ONE_HALF  (1 << (SCALEBITS - 1))
393
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
394

    
395
#define RGB_TO_Y_CCIR(r, g, b) \
396
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
397
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
398

    
399
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
400
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
401
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
402

    
403
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
404
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
405
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
406

    
407
#define ALPHA_BLEND(a, oldp, newp, s)\
408
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
409

    
410
#define RGBA_IN(r, g, b, a, s)\
411
{\
412
    unsigned int v = ((const uint32_t *)(s))[0];\
413
    a = (v >> 24) & 0xff;\
414
    r = (v >> 16) & 0xff;\
415
    g = (v >> 8) & 0xff;\
416
    b = v & 0xff;\
417
}
418

    
419
#define YUVA_IN(y, u, v, a, s, pal)\
420
{\
421
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
422
    a = (val >> 24) & 0xff;\
423
    y = (val >> 16) & 0xff;\
424
    u = (val >> 8) & 0xff;\
425
    v = val & 0xff;\
426
}
427

    
428
#define YUVA_OUT(d, y, u, v, a)\
429
{\
430
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
431
}
432

    
433

    
434
#define BPP 1
435

    
436
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
437
{
438
    int wrap, wrap3, width2, skip2;
439
    int y, u, v, a, u1, v1, a1, w, h;
440
    uint8_t *lum, *cb, *cr;
441
    const uint8_t *p;
442
    const uint32_t *pal;
443
    int dstx, dsty, dstw, dsth;
444

    
445
    dstw = av_clip(rect->w, 0, imgw);
446
    dsth = av_clip(rect->h, 0, imgh);
447
    dstx = av_clip(rect->x, 0, imgw - dstw);
448
    dsty = av_clip(rect->y, 0, imgh - dsth);
449
    lum = dst->data[0] + dsty * dst->linesize[0];
450
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
451
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
452

    
453
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
454
    skip2 = dstx >> 1;
455
    wrap = dst->linesize[0];
456
    wrap3 = rect->pict.linesize[0];
457
    p = rect->pict.data[0];
458
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
459

    
460
    if (dsty & 1) {
461
        lum += dstx;
462
        cb += skip2;
463
        cr += skip2;
464

    
465
        if (dstx & 1) {
466
            YUVA_IN(y, u, v, a, p, pal);
467
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
468
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
469
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
470
            cb++;
471
            cr++;
472
            lum++;
473
            p += BPP;
474
        }
475
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
476
            YUVA_IN(y, u, v, a, p, pal);
477
            u1 = u;
478
            v1 = v;
479
            a1 = a;
480
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
481

    
482
            YUVA_IN(y, u, v, a, p + BPP, pal);
483
            u1 += u;
484
            v1 += v;
485
            a1 += a;
486
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
487
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
488
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
489
            cb++;
490
            cr++;
491
            p += 2 * BPP;
492
            lum += 2;
493
        }
494
        if (w) {
495
            YUVA_IN(y, u, v, a, p, pal);
496
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
497
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
498
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
499
            p++;
500
            lum++;
501
        }
502
        p += wrap3 - dstw * BPP;
503
        lum += wrap - dstw - dstx;
504
        cb += dst->linesize[1] - width2 - skip2;
505
        cr += dst->linesize[2] - width2 - skip2;
506
    }
507
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
508
        lum += dstx;
509
        cb += skip2;
510
        cr += skip2;
511

    
512
        if (dstx & 1) {
513
            YUVA_IN(y, u, v, a, p, pal);
514
            u1 = u;
515
            v1 = v;
516
            a1 = a;
517
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
518
            p += wrap3;
519
            lum += wrap;
520
            YUVA_IN(y, u, v, a, p, pal);
521
            u1 += u;
522
            v1 += v;
523
            a1 += a;
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
526
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
527
            cb++;
528
            cr++;
529
            p += -wrap3 + BPP;
530
            lum += -wrap + 1;
531
        }
532
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
533
            YUVA_IN(y, u, v, a, p, pal);
534
            u1 = u;
535
            v1 = v;
536
            a1 = a;
537
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
538

    
539
            YUVA_IN(y, u, v, a, p + BPP, pal);
540
            u1 += u;
541
            v1 += v;
542
            a1 += a;
543
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
544
            p += wrap3;
545
            lum += wrap;
546

    
547
            YUVA_IN(y, u, v, a, p, pal);
548
            u1 += u;
549
            v1 += v;
550
            a1 += a;
551
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
552

    
553
            YUVA_IN(y, u, v, a, p + BPP, pal);
554
            u1 += u;
555
            v1 += v;
556
            a1 += a;
557
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
558

    
559
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
560
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
561

    
562
            cb++;
563
            cr++;
564
            p += -wrap3 + 2 * BPP;
565
            lum += -wrap + 2;
566
        }
567
        if (w) {
568
            YUVA_IN(y, u, v, a, p, pal);
569
            u1 = u;
570
            v1 = v;
571
            a1 = a;
572
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
573
            p += wrap3;
574
            lum += wrap;
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 += u;
577
            v1 += v;
578
            a1 += a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
581
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
582
            cb++;
583
            cr++;
584
            p += -wrap3 + BPP;
585
            lum += -wrap + 1;
586
        }
587
        p += wrap3 + (wrap3 - dstw * BPP);
588
        lum += wrap + (wrap - dstw - dstx);
589
        cb += dst->linesize[1] - width2 - skip2;
590
        cr += dst->linesize[2] - width2 - skip2;
591
    }
592
    /* handle odd height */
593
    if (h) {
594
        lum += dstx;
595
        cb += skip2;
596
        cr += skip2;
597

    
598
        if (dstx & 1) {
599
            YUVA_IN(y, u, v, a, p, pal);
600
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
601
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
602
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
603
            cb++;
604
            cr++;
605
            lum++;
606
            p += BPP;
607
        }
608
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
609
            YUVA_IN(y, u, v, a, p, pal);
610
            u1 = u;
611
            v1 = v;
612
            a1 = a;
613
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
614

    
615
            YUVA_IN(y, u, v, a, p + BPP, pal);
616
            u1 += u;
617
            v1 += v;
618
            a1 += a;
619
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
620
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
621
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
622
            cb++;
623
            cr++;
624
            p += 2 * BPP;
625
            lum += 2;
626
        }
627
        if (w) {
628
            YUVA_IN(y, u, v, a, p, pal);
629
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
630
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
631
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
632
        }
633
    }
634
}
635

    
636
static void free_subpicture(SubPicture *sp)
637
{
638
    int i;
639

    
640
    for (i = 0; i < sp->sub.num_rects; i++)
641
    {
642
        av_freep(&sp->sub.rects[i]->pict.data[0]);
643
        av_freep(&sp->sub.rects[i]->pict.data[1]);
644
        av_freep(&sp->sub.rects[i]);
645
    }
646

    
647
    av_free(sp->sub.rects);
648

    
649
    memset(&sp->sub, 0, sizeof(AVSubtitle));
650
}
651

    
652
static void video_image_display(VideoState *is)
653
{
654
    VideoPicture *vp;
655
    SubPicture *sp;
656
    AVPicture pict;
657
    float aspect_ratio;
658
    int width, height, x, y;
659
    SDL_Rect rect;
660
    int i;
661

    
662
    vp = &is->pictq[is->pictq_rindex];
663
    if (vp->bmp) {
664
        /* XXX: use variable in the frame */
665
        if (is->video_st->sample_aspect_ratio.num)
666
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
667
        else if (is->video_st->codec->sample_aspect_ratio.num)
668
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
669
        else
670
            aspect_ratio = 0;
671
        if (aspect_ratio <= 0.0)
672
            aspect_ratio = 1.0;
673
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
674
        /* if an active format is indicated, then it overrides the
675
           mpeg format */
676
#if 0
677
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
678
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
679
            printf("dtg_active_format=%d\n", is->dtg_active_format);
680
        }
681
#endif
682
#if 0
683
        switch(is->video_st->codec->dtg_active_format) {
684
        case FF_DTG_AFD_SAME:
685
        default:
686
            /* nothing to do */
687
            break;
688
        case FF_DTG_AFD_4_3:
689
            aspect_ratio = 4.0 / 3.0;
690
            break;
691
        case FF_DTG_AFD_16_9:
692
            aspect_ratio = 16.0 / 9.0;
693
            break;
694
        case FF_DTG_AFD_14_9:
695
            aspect_ratio = 14.0 / 9.0;
696
            break;
697
        case FF_DTG_AFD_4_3_SP_14_9:
698
            aspect_ratio = 14.0 / 9.0;
699
            break;
700
        case FF_DTG_AFD_16_9_SP_14_9:
701
            aspect_ratio = 14.0 / 9.0;
702
            break;
703
        case FF_DTG_AFD_SP_4_3:
704
            aspect_ratio = 4.0 / 3.0;
705
            break;
706
        }
707
#endif
708

    
709
        if (is->subtitle_st)
710
        {
711
            if (is->subpq_size > 0)
712
            {
713
                sp = &is->subpq[is->subpq_rindex];
714

    
715
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
716
                {
717
                    SDL_LockYUVOverlay (vp->bmp);
718

    
719
                    pict.data[0] = vp->bmp->pixels[0];
720
                    pict.data[1] = vp->bmp->pixels[2];
721
                    pict.data[2] = vp->bmp->pixels[1];
722

    
723
                    pict.linesize[0] = vp->bmp->pitches[0];
724
                    pict.linesize[1] = vp->bmp->pitches[2];
725
                    pict.linesize[2] = vp->bmp->pitches[1];
726

    
727
                    for (i = 0; i < sp->sub.num_rects; i++)
728
                        blend_subrect(&pict, sp->sub.rects[i],
729
                                      vp->bmp->w, vp->bmp->h);
730

    
731
                    SDL_UnlockYUVOverlay (vp->bmp);
732
                }
733
            }
734
        }
735

    
736

    
737
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
738
        height = is->height;
739
        width = ((int)rint(height * aspect_ratio)) & ~1;
740
        if (width > is->width) {
741
            width = is->width;
742
            height = ((int)rint(width / aspect_ratio)) & ~1;
743
        }
744
        x = (is->width - width) / 2;
745
        y = (is->height - height) / 2;
746
        if (!is->no_background) {
747
            /* fill the background */
748
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
749
        } else {
750
            is->no_background = 0;
751
        }
752
        rect.x = is->xleft + x;
753
        rect.y = is->ytop  + y;
754
        rect.w = width;
755
        rect.h = height;
756
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
757
    } else {
758
#if 0
759
        fill_rectangle(screen,
760
                       is->xleft, is->ytop, is->width, is->height,
761
                       QERGB(0x00, 0x00, 0x00));
762
#endif
763
    }
764
}
765

    
766
static inline int compute_mod(int a, int b)
767
{
768
    a = a % b;
769
    if (a >= 0)
770
        return a;
771
    else
772
        return a + b;
773
}
774

    
775
static void video_audio_display(VideoState *s)
776
{
777
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
778
    int ch, channels, h, h2, bgcolor, fgcolor;
779
    int16_t time_diff;
780

    
781
    /* compute display index : center on currently output samples */
782
    channels = s->audio_st->codec->channels;
783
    nb_display_channels = channels;
784
    if (!s->paused) {
785
        n = 2 * channels;
786
        delay = audio_write_get_buf_size(s);
787
        delay /= n;
788

    
789
        /* to be more precise, we take into account the time spent since
790
           the last buffer computation */
791
        if (audio_callback_time) {
792
            time_diff = av_gettime() - audio_callback_time;
793
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
794
        }
795

    
796
        delay -= s->width / 2;
797
        if (delay < s->width)
798
            delay = s->width;
799

    
800
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
801

    
802
        h= INT_MIN;
803
        for(i=0; i<1000; i+=channels){
804
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
805
            int a= s->sample_array[idx];
806
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
807
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
808
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
809
            int score= a-d;
810
            if(h<score && (b^c)<0){
811
                h= score;
812
                i_start= idx;
813
            }
814
        }
815

    
816
        s->last_i_start = i_start;
817
    } else {
818
        i_start = s->last_i_start;
819
    }
820

    
821
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
822
    fill_rectangle(screen,
823
                   s->xleft, s->ytop, s->width, s->height,
824
                   bgcolor);
825

    
826
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
827

    
828
    /* total height for one channel */
829
    h = s->height / nb_display_channels;
830
    /* graph height / 2 */
831
    h2 = (h * 9) / 20;
832
    for(ch = 0;ch < nb_display_channels; ch++) {
833
        i = i_start + ch;
834
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
835
        for(x = 0; x < s->width; x++) {
836
            y = (s->sample_array[i] * h2) >> 15;
837
            if (y < 0) {
838
                y = -y;
839
                ys = y1 - y;
840
            } else {
841
                ys = y1;
842
            }
843
            fill_rectangle(screen,
844
                           s->xleft + x, ys, 1, y,
845
                           fgcolor);
846
            i += channels;
847
            if (i >= SAMPLE_ARRAY_SIZE)
848
                i -= SAMPLE_ARRAY_SIZE;
849
        }
850
    }
851

    
852
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
853

    
854
    for(ch = 1;ch < nb_display_channels; ch++) {
855
        y = s->ytop + ch * h;
856
        fill_rectangle(screen,
857
                       s->xleft, y, s->width, 1,
858
                       fgcolor);
859
    }
860
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
861
}
862

    
863
static int video_open(VideoState *is){
864
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
865
    int w,h;
866

    
867
    if(is_full_screen) flags |= SDL_FULLSCREEN;
868
    else               flags |= SDL_RESIZABLE;
869

    
870
    if (is_full_screen && fs_screen_width) {
871
        w = fs_screen_width;
872
        h = fs_screen_height;
873
    } else if(!is_full_screen && screen_width){
874
        w = screen_width;
875
        h = screen_height;
876
    }else if (is->video_st && is->video_st->codec->width){
877
        w = is->video_st->codec->width;
878
        h = is->video_st->codec->height;
879
    } else {
880
        w = 640;
881
        h = 480;
882
    }
883
#ifndef __APPLE__
884
    screen = SDL_SetVideoMode(w, h, 0, flags);
885
#else
886
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
887
    screen = SDL_SetVideoMode(w, h, 24, flags);
888
#endif
889
    if (!screen) {
890
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
891
        return -1;
892
    }
893
    SDL_WM_SetCaption("FFplay", "FFplay");
894

    
895
    is->width = screen->w;
896
    is->height = screen->h;
897

    
898
    return 0;
899
}
900

    
901
/* display the current picture, if any */
902
static void video_display(VideoState *is)
903
{
904
    if(!screen)
905
        video_open(cur_stream);
906
    if (is->audio_st && is->show_audio)
907
        video_audio_display(is);
908
    else if (is->video_st)
909
        video_image_display(is);
910
}
911

    
912
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
913
{
914
    SDL_Event event;
915
    event.type = FF_REFRESH_EVENT;
916
    event.user.data1 = opaque;
917
    SDL_PushEvent(&event);
918
    return 0; /* 0 means stop timer */
919
}
920

    
921
/* schedule a video refresh in 'delay' ms */
922
static void schedule_refresh(VideoState *is, int delay)
923
{
924
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
925
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
926
}
927

    
928
/* get the current audio clock value */
929
static double get_audio_clock(VideoState *is)
930
{
931
    double pts;
932
    int hw_buf_size, bytes_per_sec;
933
    pts = is->audio_clock;
934
    hw_buf_size = audio_write_get_buf_size(is);
935
    bytes_per_sec = 0;
936
    if (is->audio_st) {
937
        bytes_per_sec = is->audio_st->codec->sample_rate *
938
            2 * is->audio_st->codec->channels;
939
    }
940
    if (bytes_per_sec)
941
        pts -= (double)hw_buf_size / bytes_per_sec;
942
    return pts;
943
}
944

    
945
/* get the current video clock value */
946
static double get_video_clock(VideoState *is)
947
{
948
    double delta;
949
    if (is->paused) {
950
        delta = 0;
951
    } else {
952
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
953
    }
954
    return is->video_current_pts + delta;
955
}
956

    
957
/* get the current external clock value */
958
static double get_external_clock(VideoState *is)
959
{
960
    int64_t ti;
961
    ti = av_gettime();
962
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
963
}
964

    
965
/* get the current master clock value */
966
static double get_master_clock(VideoState *is)
967
{
968
    double val;
969

    
970
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
971
        if (is->video_st)
972
            val = get_video_clock(is);
973
        else
974
            val = get_audio_clock(is);
975
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
976
        if (is->audio_st)
977
            val = get_audio_clock(is);
978
        else
979
            val = get_video_clock(is);
980
    } else {
981
        val = get_external_clock(is);
982
    }
983
    return val;
984
}
985

    
986
/* seek in the stream */
987
static void stream_seek(VideoState *is, int64_t pos, int64_t rel)
988
{
989
    if (!is->seek_req) {
990
        is->seek_pos = pos;
991
        is->seek_rel = rel;
992
        if (seek_by_bytes)
993
            is->seek_flags |= AVSEEK_FLAG_BYTE;
994
        is->seek_req = 1;
995
    }
996
}
997

    
998
/* pause or resume the video */
999
static void stream_pause(VideoState *is)
1000
{
1001
    is->paused = !is->paused;
1002
    if (!is->paused) {
1003
        is->video_current_pts = get_video_clock(is);
1004
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1005
    }
1006
}
1007

    
1008
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1009
{
1010
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1011

    
1012
    /* compute nominal delay */
1013
    delay = frame_current_pts - is->frame_last_pts;
1014
    if (delay <= 0 || delay >= 10.0) {
1015
        /* if incorrect delay, use previous one */
1016
        delay = is->frame_last_delay;
1017
    } else {
1018
        is->frame_last_delay = delay;
1019
    }
1020
    is->frame_last_pts = frame_current_pts;
1021

    
1022
    /* update delay to follow master synchronisation source */
1023
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1024
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1025
        /* if video is slave, we try to correct big delays by
1026
           duplicating or deleting a frame */
1027
        ref_clock = get_master_clock(is);
1028
        diff = frame_current_pts - ref_clock;
1029

    
1030
        /* skip or repeat frame. We take into account the
1031
           delay to compute the threshold. I still don't know
1032
           if it is the best guess */
1033
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1034
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1035
            if (diff <= -sync_threshold)
1036
                delay = 0;
1037
            else if (diff >= sync_threshold)
1038
                delay = 2 * delay;
1039
        }
1040
    }
1041

    
1042
    is->frame_timer += delay;
1043
    /* compute the REAL delay (we need to do that to avoid
1044
       long term errors */
1045
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1046
    if (actual_delay < 0.010) {
1047
        /* XXX: should skip picture */
1048
        actual_delay = 0.010;
1049
    }
1050

    
1051
#if defined(DEBUG_SYNC)
1052
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1053
            delay, actual_delay, frame_current_pts, -diff);
1054
#endif
1055

    
1056
    return actual_delay;
1057
}
1058

    
1059
/* called to display each frame */
1060
static void video_refresh_timer(void *opaque)
1061
{
1062
    VideoState *is = opaque;
1063
    VideoPicture *vp;
1064

    
1065
    SubPicture *sp, *sp2;
1066

    
1067
    if (is->video_st) {
1068
        if (is->pictq_size == 0) {
1069
            /* if no picture, need to wait */
1070
            schedule_refresh(is, 1);
1071
        } else {
1072
            /* dequeue the picture */
1073
            vp = &is->pictq[is->pictq_rindex];
1074

    
1075
            /* update current video pts */
1076
            is->video_current_pts = vp->pts;
1077
            is->video_current_pts_time = av_gettime();
1078

    
1079
            /* launch timer for next picture */
1080
            schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1081

    
1082
            if(is->subtitle_st) {
1083
                if (is->subtitle_stream_changed) {
1084
                    SDL_LockMutex(is->subpq_mutex);
1085

    
1086
                    while (is->subpq_size) {
1087
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1088

    
1089
                        /* update queue size and signal for next picture */
1090
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1091
                            is->subpq_rindex = 0;
1092

    
1093
                        is->subpq_size--;
1094
                    }
1095
                    is->subtitle_stream_changed = 0;
1096

    
1097
                    SDL_CondSignal(is->subpq_cond);
1098
                    SDL_UnlockMutex(is->subpq_mutex);
1099
                } else {
1100
                    if (is->subpq_size > 0) {
1101
                        sp = &is->subpq[is->subpq_rindex];
1102

    
1103
                        if (is->subpq_size > 1)
1104
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1105
                        else
1106
                            sp2 = NULL;
1107

    
1108
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1109
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1110
                        {
1111
                            free_subpicture(sp);
1112

    
1113
                            /* update queue size and signal for next picture */
1114
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1115
                                is->subpq_rindex = 0;
1116

    
1117
                            SDL_LockMutex(is->subpq_mutex);
1118
                            is->subpq_size--;
1119
                            SDL_CondSignal(is->subpq_cond);
1120
                            SDL_UnlockMutex(is->subpq_mutex);
1121
                        }
1122
                    }
1123
                }
1124
            }
1125

    
1126
            /* display picture */
1127
            video_display(is);
1128

    
1129
            /* update queue size and signal for next picture */
1130
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1131
                is->pictq_rindex = 0;
1132

    
1133
            SDL_LockMutex(is->pictq_mutex);
1134
            is->pictq_size--;
1135
            SDL_CondSignal(is->pictq_cond);
1136
            SDL_UnlockMutex(is->pictq_mutex);
1137
        }
1138
    } else if (is->audio_st) {
1139
        /* draw the next audio frame */
1140

    
1141
        schedule_refresh(is, 40);
1142

    
1143
        /* if only audio stream, then display the audio bars (better
1144
           than nothing, just to test the implementation */
1145

    
1146
        /* display picture */
1147
        video_display(is);
1148
    } else {
1149
        schedule_refresh(is, 100);
1150
    }
1151
    if (show_status) {
1152
        static int64_t last_time;
1153
        int64_t cur_time;
1154
        int aqsize, vqsize, sqsize;
1155
        double av_diff;
1156

    
1157
        cur_time = av_gettime();
1158
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1159
            aqsize = 0;
1160
            vqsize = 0;
1161
            sqsize = 0;
1162
            if (is->audio_st)
1163
                aqsize = is->audioq.size;
1164
            if (is->video_st)
1165
                vqsize = is->videoq.size;
1166
            if (is->subtitle_st)
1167
                sqsize = is->subtitleq.size;
1168
            av_diff = 0;
1169
            if (is->audio_st && is->video_st)
1170
                av_diff = get_audio_clock(is) - get_video_clock(is);
1171
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1172
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1173
            fflush(stdout);
1174
            last_time = cur_time;
1175
        }
1176
    }
1177
}
1178

    
1179
/* allocate a picture (needs to do that in main thread to avoid
1180
   potential locking problems */
1181
static void alloc_picture(void *opaque)
1182
{
1183
    VideoState *is = opaque;
1184
    VideoPicture *vp;
1185

    
1186
    vp = &is->pictq[is->pictq_windex];
1187

    
1188
    if (vp->bmp)
1189
        SDL_FreeYUVOverlay(vp->bmp);
1190

    
1191
#if 0
1192
    /* XXX: use generic function */
1193
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1194
    switch(is->video_st->codec->pix_fmt) {
1195
    case PIX_FMT_YUV420P:
1196
    case PIX_FMT_YUV422P:
1197
    case PIX_FMT_YUV444P:
1198
    case PIX_FMT_YUYV422:
1199
    case PIX_FMT_YUV410P:
1200
    case PIX_FMT_YUV411P:
1201
        is_yuv = 1;
1202
        break;
1203
    default:
1204
        is_yuv = 0;
1205
        break;
1206
    }
1207
#endif
1208
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1209
                                   is->video_st->codec->height,
1210
                                   SDL_YV12_OVERLAY,
1211
                                   screen);
1212
    vp->width = is->video_st->codec->width;
1213
    vp->height = is->video_st->codec->height;
1214

    
1215
    SDL_LockMutex(is->pictq_mutex);
1216
    vp->allocated = 1;
1217
    SDL_CondSignal(is->pictq_cond);
1218
    SDL_UnlockMutex(is->pictq_mutex);
1219
}
1220

    
1221
/**
1222
 *
1223
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1224
 */
1225
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1226
{
1227
    VideoPicture *vp;
1228
    int dst_pix_fmt;
1229
    static struct SwsContext *img_convert_ctx;
1230

    
1231
    /* wait until we have space to put a new picture */
1232
    SDL_LockMutex(is->pictq_mutex);
1233
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1234
           !is->videoq.abort_request) {
1235
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1236
    }
1237
    SDL_UnlockMutex(is->pictq_mutex);
1238

    
1239
    if (is->videoq.abort_request)
1240
        return -1;
1241

    
1242
    vp = &is->pictq[is->pictq_windex];
1243

    
1244
    /* alloc or resize hardware picture buffer */
1245
    if (!vp->bmp ||
1246
        vp->width != is->video_st->codec->width ||
1247
        vp->height != is->video_st->codec->height) {
1248
        SDL_Event event;
1249

    
1250
        vp->allocated = 0;
1251

    
1252
        /* the allocation must be done in the main thread to avoid
1253
           locking problems */
1254
        event.type = FF_ALLOC_EVENT;
1255
        event.user.data1 = is;
1256
        SDL_PushEvent(&event);
1257

    
1258
        /* wait until the picture is allocated */
1259
        SDL_LockMutex(is->pictq_mutex);
1260
        while (!vp->allocated && !is->videoq.abort_request) {
1261
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1262
        }
1263
        SDL_UnlockMutex(is->pictq_mutex);
1264

    
1265
        if (is->videoq.abort_request)
1266
            return -1;
1267
    }
1268

    
1269
    /* if the frame is not skipped, then display it */
1270
    if (vp->bmp) {
1271
        AVPicture pict;
1272

    
1273
        /* get a pointer on the bitmap */
1274
        SDL_LockYUVOverlay (vp->bmp);
1275

    
1276
        dst_pix_fmt = PIX_FMT_YUV420P;
1277
        memset(&pict,0,sizeof(AVPicture));
1278
        pict.data[0] = vp->bmp->pixels[0];
1279
        pict.data[1] = vp->bmp->pixels[2];
1280
        pict.data[2] = vp->bmp->pixels[1];
1281

    
1282
        pict.linesize[0] = vp->bmp->pitches[0];
1283
        pict.linesize[1] = vp->bmp->pitches[2];
1284
        pict.linesize[2] = vp->bmp->pitches[1];
1285
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1286
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1287
            is->video_st->codec->width, is->video_st->codec->height,
1288
            is->video_st->codec->pix_fmt,
1289
            is->video_st->codec->width, is->video_st->codec->height,
1290
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1291
        if (img_convert_ctx == NULL) {
1292
            fprintf(stderr, "Cannot initialize the conversion context\n");
1293
            exit(1);
1294
        }
1295
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1296
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1297
        /* update the bitmap content */
1298
        SDL_UnlockYUVOverlay(vp->bmp);
1299

    
1300
        vp->pts = pts;
1301

    
1302
        /* now we can update the picture count */
1303
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1304
            is->pictq_windex = 0;
1305
        SDL_LockMutex(is->pictq_mutex);
1306
        is->pictq_size++;
1307
        SDL_UnlockMutex(is->pictq_mutex);
1308
    }
1309
    return 0;
1310
}
1311

    
1312
/**
1313
 * compute the exact PTS for the picture if it is omitted in the stream
1314
 * @param pts1 the dts of the pkt / pts of the frame
1315
 */
1316
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1317
{
1318
    double frame_delay, pts;
1319

    
1320
    pts = pts1;
1321

    
1322
    if (pts != 0) {
1323
        /* update video clock with pts, if present */
1324
        is->video_clock = pts;
1325
    } else {
1326
        pts = is->video_clock;
1327
    }
1328
    /* update video clock for next frame */
1329
    frame_delay = av_q2d(is->video_st->codec->time_base);
1330
    /* for MPEG2, the frame can be repeated, so we update the
1331
       clock accordingly */
1332
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1333
    is->video_clock += frame_delay;
1334

    
1335
#if defined(DEBUG_SYNC) && 0
1336
    {
1337
        int ftype;
1338
        if (src_frame->pict_type == FF_B_TYPE)
1339
            ftype = 'B';
1340
        else if (src_frame->pict_type == FF_I_TYPE)
1341
            ftype = 'I';
1342
        else
1343
            ftype = 'P';
1344
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1345
               ftype, pts, pts1);
1346
    }
1347
#endif
1348
    return queue_picture(is, src_frame, pts);
1349
}
1350

    
1351
static int video_thread(void *arg)
1352
{
1353
    VideoState *is = arg;
1354
    AVPacket pkt1, *pkt = &pkt1;
1355
    int len1, got_picture;
1356
    AVFrame *frame= avcodec_alloc_frame();
1357
    double pts;
1358

    
1359
    for(;;) {
1360
        while (is->paused && !is->videoq.abort_request) {
1361
            SDL_Delay(10);
1362
        }
1363
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1364
            break;
1365

    
1366
        if(pkt->data == flush_pkt.data){
1367
            avcodec_flush_buffers(is->video_st->codec);
1368
            continue;
1369
        }
1370

    
1371
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1372
           this packet, if any */
1373
        is->video_st->codec->reordered_opaque= pkt->pts;
1374
        len1 = avcodec_decode_video2(is->video_st->codec,
1375
                                    frame, &got_picture,
1376
                                    pkt);
1377

    
1378
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1379
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1380
            pts= frame->reordered_opaque;
1381
        else if(pkt->dts != AV_NOPTS_VALUE)
1382
            pts= pkt->dts;
1383
        else
1384
            pts= 0;
1385
        pts *= av_q2d(is->video_st->time_base);
1386

    
1387
//            if (len1 < 0)
1388
//                break;
1389
        if (got_picture) {
1390
            if (output_picture2(is, frame, pts) < 0)
1391
                goto the_end;
1392
        }
1393
        av_free_packet(pkt);
1394
        if (step)
1395
            if (cur_stream)
1396
                stream_pause(cur_stream);
1397
    }
1398
 the_end:
1399
    av_free(frame);
1400
    return 0;
1401
}
1402

    
1403
static int subtitle_thread(void *arg)
1404
{
1405
    VideoState *is = arg;
1406
    SubPicture *sp;
1407
    AVPacket pkt1, *pkt = &pkt1;
1408
    int len1, got_subtitle;
1409
    double pts;
1410
    int i, j;
1411
    int r, g, b, y, u, v, a;
1412

    
1413
    for(;;) {
1414
        while (is->paused && !is->subtitleq.abort_request) {
1415
            SDL_Delay(10);
1416
        }
1417
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1418
            break;
1419

    
1420
        if(pkt->data == flush_pkt.data){
1421
            avcodec_flush_buffers(is->subtitle_st->codec);
1422
            continue;
1423
        }
1424
        SDL_LockMutex(is->subpq_mutex);
1425
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1426
               !is->subtitleq.abort_request) {
1427
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1428
        }
1429
        SDL_UnlockMutex(is->subpq_mutex);
1430

    
1431
        if (is->subtitleq.abort_request)
1432
            goto the_end;
1433

    
1434
        sp = &is->subpq[is->subpq_windex];
1435

    
1436
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1437
           this packet, if any */
1438
        pts = 0;
1439
        if (pkt->pts != AV_NOPTS_VALUE)
1440
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1441

    
1442
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1443
                                    &sp->sub, &got_subtitle,
1444
                                    pkt);
1445
//            if (len1 < 0)
1446
//                break;
1447
        if (got_subtitle && sp->sub.format == 0) {
1448
            sp->pts = pts;
1449

    
1450
            for (i = 0; i < sp->sub.num_rects; i++)
1451
            {
1452
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1453
                {
1454
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1455
                    y = RGB_TO_Y_CCIR(r, g, b);
1456
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1457
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1458
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1459
                }
1460
            }
1461

    
1462
            /* now we can update the picture count */
1463
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1464
                is->subpq_windex = 0;
1465
            SDL_LockMutex(is->subpq_mutex);
1466
            is->subpq_size++;
1467
            SDL_UnlockMutex(is->subpq_mutex);
1468
        }
1469
        av_free_packet(pkt);
1470
//        if (step)
1471
//            if (cur_stream)
1472
//                stream_pause(cur_stream);
1473
    }
1474
 the_end:
1475
    return 0;
1476
}
1477

    
1478
/* copy samples for viewing in editor window */
1479
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1480
{
1481
    int size, len, channels;
1482

    
1483
    channels = is->audio_st->codec->channels;
1484

    
1485
    size = samples_size / sizeof(short);
1486
    while (size > 0) {
1487
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1488
        if (len > size)
1489
            len = size;
1490
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1491
        samples += len;
1492
        is->sample_array_index += len;
1493
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1494
            is->sample_array_index = 0;
1495
        size -= len;
1496
    }
1497
}
1498

    
1499
/* return the new audio buffer size (samples can be added or deleted
1500
   to get better sync if video or external master clock) */
1501
static int synchronize_audio(VideoState *is, short *samples,
1502
                             int samples_size1, double pts)
1503
{
1504
    int n, samples_size;
1505
    double ref_clock;
1506

    
1507
    n = 2 * is->audio_st->codec->channels;
1508
    samples_size = samples_size1;
1509

    
1510
    /* if not master, then we try to remove or add samples to correct the clock */
1511
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1512
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1513
        double diff, avg_diff;
1514
        int wanted_size, min_size, max_size, nb_samples;
1515

    
1516
        ref_clock = get_master_clock(is);
1517
        diff = get_audio_clock(is) - ref_clock;
1518

    
1519
        if (diff < AV_NOSYNC_THRESHOLD) {
1520
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1521
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1522
                /* not enough measures to have a correct estimate */
1523
                is->audio_diff_avg_count++;
1524
            } else {
1525
                /* estimate the A-V difference */
1526
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1527

    
1528
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1529
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1530
                    nb_samples = samples_size / n;
1531

    
1532
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1533
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1534
                    if (wanted_size < min_size)
1535
                        wanted_size = min_size;
1536
                    else if (wanted_size > max_size)
1537
                        wanted_size = max_size;
1538

    
1539
                    /* add or remove samples to correction the synchro */
1540
                    if (wanted_size < samples_size) {
1541
                        /* remove samples */
1542
                        samples_size = wanted_size;
1543
                    } else if (wanted_size > samples_size) {
1544
                        uint8_t *samples_end, *q;
1545
                        int nb;
1546

    
1547
                        /* add samples */
1548
                        nb = (samples_size - wanted_size);
1549
                        samples_end = (uint8_t *)samples + samples_size - n;
1550
                        q = samples_end + n;
1551
                        while (nb > 0) {
1552
                            memcpy(q, samples_end, n);
1553
                            q += n;
1554
                            nb -= n;
1555
                        }
1556
                        samples_size = wanted_size;
1557
                    }
1558
                }
1559
#if 0
1560
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1561
                       diff, avg_diff, samples_size - samples_size1,
1562
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1563
#endif
1564
            }
1565
        } else {
1566
            /* too big difference : may be initial PTS errors, so
1567
               reset A-V filter */
1568
            is->audio_diff_avg_count = 0;
1569
            is->audio_diff_cum = 0;
1570
        }
1571
    }
1572

    
1573
    return samples_size;
1574
}
1575

    
1576
/* decode one audio frame and returns its uncompressed size */
1577
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1578
{
1579
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1580
    AVPacket *pkt = &is->audio_pkt;
1581
    AVCodecContext *dec= is->audio_st->codec;
1582
    int n, len1, data_size;
1583
    double pts;
1584

    
1585
    for(;;) {
1586
        /* NOTE: the audio packet can contain several frames */
1587
        while (pkt_temp->size > 0) {
1588
            data_size = sizeof(is->audio_buf1);
1589
            len1 = avcodec_decode_audio3(dec,
1590
                                        (int16_t *)is->audio_buf1, &data_size,
1591
                                        pkt_temp);
1592
            if (len1 < 0) {
1593
                /* if error, we skip the frame */
1594
                pkt_temp->size = 0;
1595
                break;
1596
            }
1597

    
1598
            pkt_temp->data += len1;
1599
            pkt_temp->size -= len1;
1600
            if (data_size <= 0)
1601
                continue;
1602

    
1603
            if (dec->sample_fmt != is->audio_src_fmt) {
1604
                if (is->reformat_ctx)
1605
                    av_audio_convert_free(is->reformat_ctx);
1606
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1607
                                                         dec->sample_fmt, 1, NULL, 0);
1608
                if (!is->reformat_ctx) {
1609
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1610
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1611
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1612
                        break;
1613
                }
1614
                is->audio_src_fmt= dec->sample_fmt;
1615
            }
1616

    
1617
            if (is->reformat_ctx) {
1618
                const void *ibuf[6]= {is->audio_buf1};
1619
                void *obuf[6]= {is->audio_buf2};
1620
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1621
                int ostride[6]= {2};
1622
                int len= data_size/istride[0];
1623
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1624
                    printf("av_audio_convert() failed\n");
1625
                    break;
1626
                }
1627
                is->audio_buf= is->audio_buf2;
1628
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1629
                          remove this legacy cruft */
1630
                data_size= len*2;
1631
            }else{
1632
                is->audio_buf= is->audio_buf1;
1633
            }
1634

    
1635
            /* if no pts, then compute it */
1636
            pts = is->audio_clock;
1637
            *pts_ptr = pts;
1638
            n = 2 * dec->channels;
1639
            is->audio_clock += (double)data_size /
1640
                (double)(n * dec->sample_rate);
1641
#if defined(DEBUG_SYNC)
1642
            {
1643
                static double last_clock;
1644
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1645
                       is->audio_clock - last_clock,
1646
                       is->audio_clock, pts);
1647
                last_clock = is->audio_clock;
1648
            }
1649
#endif
1650
            return data_size;
1651
        }
1652

    
1653
        /* free the current packet */
1654
        if (pkt->data)
1655
            av_free_packet(pkt);
1656

    
1657
        if (is->paused || is->audioq.abort_request) {
1658
            return -1;
1659
        }
1660

    
1661
        /* read next packet */
1662
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1663
            return -1;
1664
        if(pkt->data == flush_pkt.data){
1665
            avcodec_flush_buffers(dec);
1666
            continue;
1667
        }
1668

    
1669
        pkt_temp->data = pkt->data;
1670
        pkt_temp->size = pkt->size;
1671

    
1672
        /* if update the audio clock with the pts */
1673
        if (pkt->pts != AV_NOPTS_VALUE) {
1674
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1675
        }
1676
    }
1677
}
1678

    
1679
/* get the current audio output buffer size, in samples. With SDL, we
1680
   cannot have a precise information */
1681
static int audio_write_get_buf_size(VideoState *is)
1682
{
1683
    return is->audio_buf_size - is->audio_buf_index;
1684
}
1685

    
1686

    
1687
/* prepare a new audio buffer */
1688
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1689
{
1690
    VideoState *is = opaque;
1691
    int audio_size, len1;
1692
    double pts;
1693

    
1694
    audio_callback_time = av_gettime();
1695

    
1696
    while (len > 0) {
1697
        if (is->audio_buf_index >= is->audio_buf_size) {
1698
           audio_size = audio_decode_frame(is, &pts);
1699
           if (audio_size < 0) {
1700
                /* if error, just output silence */
1701
               is->audio_buf = is->audio_buf1;
1702
               is->audio_buf_size = 1024;
1703
               memset(is->audio_buf, 0, is->audio_buf_size);
1704
           } else {
1705
               if (is->show_audio)
1706
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1707
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1708
                                              pts);
1709
               is->audio_buf_size = audio_size;
1710
           }
1711
           is->audio_buf_index = 0;
1712
        }
1713
        len1 = is->audio_buf_size - is->audio_buf_index;
1714
        if (len1 > len)
1715
            len1 = len;
1716
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1717
        len -= len1;
1718
        stream += len1;
1719
        is->audio_buf_index += len1;
1720
    }
1721
}
1722

    
1723
/* open a given stream. Return 0 if OK */
1724
static int stream_component_open(VideoState *is, int stream_index)
1725
{
1726
    AVFormatContext *ic = is->ic;
1727
    AVCodecContext *enc;
1728
    AVCodec *codec;
1729
    SDL_AudioSpec wanted_spec, spec;
1730

    
1731
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1732
        return -1;
1733
    enc = ic->streams[stream_index]->codec;
1734

    
1735
    /* prepare audio output */
1736
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1737
        if (enc->channels > 0) {
1738
            enc->request_channels = FFMIN(2, enc->channels);
1739
        } else {
1740
            enc->request_channels = 2;
1741
        }
1742
    }
1743

    
1744
    codec = avcodec_find_decoder(enc->codec_id);
1745
    enc->debug_mv = debug_mv;
1746
    enc->debug = debug;
1747
    enc->workaround_bugs = workaround_bugs;
1748
    enc->lowres = lowres;
1749
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1750
    enc->idct_algo= idct;
1751
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1752
    enc->skip_frame= skip_frame;
1753
    enc->skip_idct= skip_idct;
1754
    enc->skip_loop_filter= skip_loop_filter;
1755
    enc->error_recognition= error_recognition;
1756
    enc->error_concealment= error_concealment;
1757

    
1758
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1759

    
1760
    if (!codec ||
1761
        avcodec_open(enc, codec) < 0)
1762
        return -1;
1763

    
1764
    /* prepare audio output */
1765
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1766
        wanted_spec.freq = enc->sample_rate;
1767
        wanted_spec.format = AUDIO_S16SYS;
1768
        wanted_spec.channels = enc->channels;
1769
        wanted_spec.silence = 0;
1770
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1771
        wanted_spec.callback = sdl_audio_callback;
1772
        wanted_spec.userdata = is;
1773
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1774
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1775
            return -1;
1776
        }
1777
        is->audio_hw_buf_size = spec.size;
1778
        is->audio_src_fmt= SAMPLE_FMT_S16;
1779
    }
1780

    
1781
    if(thread_count>1)
1782
        avcodec_thread_init(enc, thread_count);
1783
    enc->thread_count= thread_count;
1784
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1785
    switch(enc->codec_type) {
1786
    case CODEC_TYPE_AUDIO:
1787
        is->audio_stream = stream_index;
1788
        is->audio_st = ic->streams[stream_index];
1789
        is->audio_buf_size = 0;
1790
        is->audio_buf_index = 0;
1791

    
1792
        /* init averaging filter */
1793
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1794
        is->audio_diff_avg_count = 0;
1795
        /* since we do not have a precise anough audio fifo fullness,
1796
           we correct audio sync only if larger than this threshold */
1797
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1798

    
1799
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1800
        packet_queue_init(&is->audioq);
1801
        SDL_PauseAudio(0);
1802
        break;
1803
    case CODEC_TYPE_VIDEO:
1804
        is->video_stream = stream_index;
1805
        is->video_st = ic->streams[stream_index];
1806

    
1807
        is->frame_last_delay = 40e-3;
1808
        is->frame_timer = (double)av_gettime() / 1000000.0;
1809
        is->video_current_pts_time = av_gettime();
1810

    
1811
        packet_queue_init(&is->videoq);
1812
        is->video_tid = SDL_CreateThread(video_thread, is);
1813
        break;
1814
    case CODEC_TYPE_SUBTITLE:
1815
        is->subtitle_stream = stream_index;
1816
        is->subtitle_st = ic->streams[stream_index];
1817
        packet_queue_init(&is->subtitleq);
1818

    
1819
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1820
        break;
1821
    default:
1822
        break;
1823
    }
1824
    return 0;
1825
}
1826

    
1827
static void stream_component_close(VideoState *is, int stream_index)
1828
{
1829
    AVFormatContext *ic = is->ic;
1830
    AVCodecContext *enc;
1831

    
1832
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1833
        return;
1834
    enc = ic->streams[stream_index]->codec;
1835

    
1836
    switch(enc->codec_type) {
1837
    case CODEC_TYPE_AUDIO:
1838
        packet_queue_abort(&is->audioq);
1839

    
1840
        SDL_CloseAudio();
1841

    
1842
        packet_queue_end(&is->audioq);
1843
        if (is->reformat_ctx)
1844
            av_audio_convert_free(is->reformat_ctx);
1845
        break;
1846
    case CODEC_TYPE_VIDEO:
1847
        packet_queue_abort(&is->videoq);
1848

    
1849
        /* note: we also signal this mutex to make sure we deblock the
1850
           video thread in all cases */
1851
        SDL_LockMutex(is->pictq_mutex);
1852
        SDL_CondSignal(is->pictq_cond);
1853
        SDL_UnlockMutex(is->pictq_mutex);
1854

    
1855
        SDL_WaitThread(is->video_tid, NULL);
1856

    
1857
        packet_queue_end(&is->videoq);
1858
        break;
1859
    case CODEC_TYPE_SUBTITLE:
1860
        packet_queue_abort(&is->subtitleq);
1861

    
1862
        /* note: we also signal this mutex to make sure we deblock the
1863
           video thread in all cases */
1864
        SDL_LockMutex(is->subpq_mutex);
1865
        is->subtitle_stream_changed = 1;
1866

    
1867
        SDL_CondSignal(is->subpq_cond);
1868
        SDL_UnlockMutex(is->subpq_mutex);
1869

    
1870
        SDL_WaitThread(is->subtitle_tid, NULL);
1871

    
1872
        packet_queue_end(&is->subtitleq);
1873
        break;
1874
    default:
1875
        break;
1876
    }
1877

    
1878
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1879
    avcodec_close(enc);
1880
    switch(enc->codec_type) {
1881
    case CODEC_TYPE_AUDIO:
1882
        is->audio_st = NULL;
1883
        is->audio_stream = -1;
1884
        break;
1885
    case CODEC_TYPE_VIDEO:
1886
        is->video_st = NULL;
1887
        is->video_stream = -1;
1888
        break;
1889
    case CODEC_TYPE_SUBTITLE:
1890
        is->subtitle_st = NULL;
1891
        is->subtitle_stream = -1;
1892
        break;
1893
    default:
1894
        break;
1895
    }
1896
}
1897

    
1898
static void dump_stream_info(const AVFormatContext *s)
1899
{
1900
    AVMetadataTag *tag = NULL;
1901
    while ((tag=av_metadata_get(s->metadata,"",tag,AV_METADATA_IGNORE_SUFFIX)))
1902
        fprintf(stderr, "%s: %s\n", tag->key, tag->value);
1903
}
1904

    
1905
/* since we have only one decoding thread, we can use a global
1906
   variable instead of a thread local variable */
1907
static VideoState *global_video_state;
1908

    
1909
static int decode_interrupt_cb(void)
1910
{
1911
    return (global_video_state && global_video_state->abort_request);
1912
}
1913

    
1914
/* this thread gets the stream from the disk or the network */
1915
static int decode_thread(void *arg)
1916
{
1917
    VideoState *is = arg;
1918
    AVFormatContext *ic;
1919
    int err, i, ret, video_index, audio_index, subtitle_index;
1920
    AVPacket pkt1, *pkt = &pkt1;
1921
    AVFormatParameters params, *ap = &params;
1922
    int eof=0;
1923

    
1924
    video_index = -1;
1925
    audio_index = -1;
1926
    subtitle_index = -1;
1927
    is->video_stream = -1;
1928
    is->audio_stream = -1;
1929
    is->subtitle_stream = -1;
1930

    
1931
    global_video_state = is;
1932
    url_set_interrupt_cb(decode_interrupt_cb);
1933

    
1934
    memset(ap, 0, sizeof(*ap));
1935

    
1936
    ap->width = frame_width;
1937
    ap->height= frame_height;
1938
    ap->time_base= (AVRational){1, 25};
1939
    ap->pix_fmt = frame_pix_fmt;
1940

    
1941
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1942
    if (err < 0) {
1943
        print_error(is->filename, err);
1944
        ret = -1;
1945
        goto fail;
1946
    }
1947
    is->ic = ic;
1948

    
1949
    if(genpts)
1950
        ic->flags |= AVFMT_FLAG_GENPTS;
1951

    
1952
    err = av_find_stream_info(ic);
1953
    if (err < 0) {
1954
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1955
        ret = -1;
1956
        goto fail;
1957
    }
1958
    if(ic->pb)
1959
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1960

    
1961
    /* if seeking requested, we execute it */
1962
    if (start_time != AV_NOPTS_VALUE) {
1963
        int64_t timestamp;
1964

    
1965
        timestamp = start_time;
1966
        /* add the stream start time */
1967
        if (ic->start_time != AV_NOPTS_VALUE)
1968
            timestamp += ic->start_time;
1969
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
1970
        if (ret < 0) {
1971
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1972
                    is->filename, (double)timestamp / AV_TIME_BASE);
1973
        }
1974
    }
1975

    
1976
    for(i = 0; i < ic->nb_streams; i++) {
1977
        AVCodecContext *enc = ic->streams[i]->codec;
1978
        ic->streams[i]->discard = AVDISCARD_ALL;
1979
        switch(enc->codec_type) {
1980
        case CODEC_TYPE_AUDIO:
1981
            if (wanted_audio_stream-- >= 0 && !audio_disable)
1982
                audio_index = i;
1983
            break;
1984
        case CODEC_TYPE_VIDEO:
1985
            if (wanted_video_stream-- >= 0 && !video_disable)
1986
                video_index = i;
1987
            break;
1988
        case CODEC_TYPE_SUBTITLE:
1989
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
1990
                subtitle_index = i;
1991
            break;
1992
        default:
1993
            break;
1994
        }
1995
    }
1996
    if (show_status) {
1997
        dump_format(ic, 0, is->filename, 0);
1998
        dump_stream_info(ic);
1999
    }
2000

    
2001
    /* open the streams */
2002
    if (audio_index >= 0) {
2003
        stream_component_open(is, audio_index);
2004
    }
2005

    
2006
    if (video_index >= 0) {
2007
        stream_component_open(is, video_index);
2008
    } else {
2009
        if (!display_disable)
2010
            is->show_audio = 1;
2011
    }
2012

    
2013
    if (subtitle_index >= 0) {
2014
        stream_component_open(is, subtitle_index);
2015
    }
2016

    
2017
    if (is->video_stream < 0 && is->audio_stream < 0) {
2018
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2019
        ret = -1;
2020
        goto fail;
2021
    }
2022

    
2023
    for(;;) {
2024
        if (is->abort_request)
2025
            break;
2026
        if (is->paused != is->last_paused) {
2027
            is->last_paused = is->paused;
2028
            if (is->paused)
2029
                av_read_pause(ic);
2030
            else
2031
                av_read_play(ic);
2032
        }
2033
#if CONFIG_RTSP_DEMUXER
2034
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2035
            /* wait 10 ms to avoid trying to get another packet */
2036
            /* XXX: horrible */
2037
            SDL_Delay(10);
2038
            continue;
2039
        }
2040
#endif
2041
        if (is->seek_req) {
2042
            int64_t seek_target= is->seek_pos;
2043
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2044
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2045
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2046
//      of the seek_pos/seek_rel variables
2047

    
2048
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2049
            if (ret < 0) {
2050
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2051
            }else{
2052
                if (is->audio_stream >= 0) {
2053
                    packet_queue_flush(&is->audioq);
2054
                    packet_queue_put(&is->audioq, &flush_pkt);
2055
                }
2056
                if (is->subtitle_stream >= 0) {
2057
                    packet_queue_flush(&is->subtitleq);
2058
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2059
                }
2060
                if (is->video_stream >= 0) {
2061
                    packet_queue_flush(&is->videoq);
2062
                    packet_queue_put(&is->videoq, &flush_pkt);
2063
                }
2064
            }
2065
            is->seek_req = 0;
2066
        }
2067

    
2068
        /* if the queue are full, no need to read more */
2069
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2070
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2071
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE) {
2072
            /* wait 10 ms */
2073
            SDL_Delay(10);
2074
            continue;
2075
        }
2076
        if(url_feof(ic->pb) || eof) {
2077
            if(is->video_stream >= 0){
2078
                av_init_packet(pkt);
2079
                pkt->data=NULL;
2080
                pkt->size=0;
2081
                pkt->stream_index= is->video_stream;
2082
                packet_queue_put(&is->videoq, pkt);
2083
            }
2084
            SDL_Delay(10);
2085
            continue;
2086
        }
2087
        ret = av_read_frame(ic, pkt);
2088
        if (ret < 0) {
2089
            if (ret == AVERROR_EOF)
2090
                eof=1;
2091
            if (url_ferror(ic->pb))
2092
                break;
2093
            SDL_Delay(100); /* wait for user event */
2094
            continue;
2095
        }
2096
        if (pkt->stream_index == is->audio_stream) {
2097
            packet_queue_put(&is->audioq, pkt);
2098
        } else if (pkt->stream_index == is->video_stream) {
2099
            packet_queue_put(&is->videoq, pkt);
2100
        } else if (pkt->stream_index == is->subtitle_stream) {
2101
            packet_queue_put(&is->subtitleq, pkt);
2102
        } else {
2103
            av_free_packet(pkt);
2104
        }
2105
    }
2106
    /* wait until the end */
2107
    while (!is->abort_request) {
2108
        SDL_Delay(100);
2109
    }
2110

    
2111
    ret = 0;
2112
 fail:
2113
    /* disable interrupting */
2114
    global_video_state = NULL;
2115

    
2116
    /* close each stream */
2117
    if (is->audio_stream >= 0)
2118
        stream_component_close(is, is->audio_stream);
2119
    if (is->video_stream >= 0)
2120
        stream_component_close(is, is->video_stream);
2121
    if (is->subtitle_stream >= 0)
2122
        stream_component_close(is, is->subtitle_stream);
2123
    if (is->ic) {
2124
        av_close_input_file(is->ic);
2125
        is->ic = NULL; /* safety */
2126
    }
2127
    url_set_interrupt_cb(NULL);
2128

    
2129
    if (ret != 0) {
2130
        SDL_Event event;
2131

    
2132
        event.type = FF_QUIT_EVENT;
2133
        event.user.data1 = is;
2134
        SDL_PushEvent(&event);
2135
    }
2136
    return 0;
2137
}
2138

    
2139
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2140
{
2141
    VideoState *is;
2142

    
2143
    is = av_mallocz(sizeof(VideoState));
2144
    if (!is)
2145
        return NULL;
2146
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2147
    is->iformat = iformat;
2148
    is->ytop = 0;
2149
    is->xleft = 0;
2150

    
2151
    /* start video display */
2152
    is->pictq_mutex = SDL_CreateMutex();
2153
    is->pictq_cond = SDL_CreateCond();
2154

    
2155
    is->subpq_mutex = SDL_CreateMutex();
2156
    is->subpq_cond = SDL_CreateCond();
2157

    
2158
    /* add the refresh timer to draw the picture */
2159
    schedule_refresh(is, 40);
2160

    
2161
    is->av_sync_type = av_sync_type;
2162
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2163
    if (!is->parse_tid) {
2164
        av_free(is);
2165
        return NULL;
2166
    }
2167
    return is;
2168
}
2169

    
2170
static void stream_close(VideoState *is)
2171
{
2172
    VideoPicture *vp;
2173
    int i;
2174
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2175
    is->abort_request = 1;
2176
    SDL_WaitThread(is->parse_tid, NULL);
2177

    
2178
    /* free all pictures */
2179
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2180
        vp = &is->pictq[i];
2181
        if (vp->bmp) {
2182
            SDL_FreeYUVOverlay(vp->bmp);
2183
            vp->bmp = NULL;
2184
        }
2185
    }
2186
    SDL_DestroyMutex(is->pictq_mutex);
2187
    SDL_DestroyCond(is->pictq_cond);
2188
    SDL_DestroyMutex(is->subpq_mutex);
2189
    SDL_DestroyCond(is->subpq_cond);
2190
}
2191

    
2192
static void stream_cycle_channel(VideoState *is, int codec_type)
2193
{
2194
    AVFormatContext *ic = is->ic;
2195
    int start_index, stream_index;
2196
    AVStream *st;
2197

    
2198
    if (codec_type == CODEC_TYPE_VIDEO)
2199
        start_index = is->video_stream;
2200
    else if (codec_type == CODEC_TYPE_AUDIO)
2201
        start_index = is->audio_stream;
2202
    else
2203
        start_index = is->subtitle_stream;
2204
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2205
        return;
2206
    stream_index = start_index;
2207
    for(;;) {
2208
        if (++stream_index >= is->ic->nb_streams)
2209
        {
2210
            if (codec_type == CODEC_TYPE_SUBTITLE)
2211
            {
2212
                stream_index = -1;
2213
                goto the_end;
2214
            } else
2215
                stream_index = 0;
2216
        }
2217
        if (stream_index == start_index)
2218
            return;
2219
        st = ic->streams[stream_index];
2220
        if (st->codec->codec_type == codec_type) {
2221
            /* check that parameters are OK */
2222
            switch(codec_type) {
2223
            case CODEC_TYPE_AUDIO:
2224
                if (st->codec->sample_rate != 0 &&
2225
                    st->codec->channels != 0)
2226
                    goto the_end;
2227
                break;
2228
            case CODEC_TYPE_VIDEO:
2229
            case CODEC_TYPE_SUBTITLE:
2230
                goto the_end;
2231
            default:
2232
                break;
2233
            }
2234
        }
2235
    }
2236
 the_end:
2237
    stream_component_close(is, start_index);
2238
    stream_component_open(is, stream_index);
2239
}
2240

    
2241

    
2242
static void toggle_full_screen(void)
2243
{
2244
    is_full_screen = !is_full_screen;
2245
    if (!fs_screen_width) {
2246
        /* use default SDL method */
2247
//        SDL_WM_ToggleFullScreen(screen);
2248
    }
2249
    video_open(cur_stream);
2250
}
2251

    
2252
static void toggle_pause(void)
2253
{
2254
    if (cur_stream)
2255
        stream_pause(cur_stream);
2256
    step = 0;
2257
}
2258

    
2259
static void step_to_next_frame(void)
2260
{
2261
    if (cur_stream) {
2262
        /* if the stream is paused unpause it, then step */
2263
        if (cur_stream->paused)
2264
            stream_pause(cur_stream);
2265
    }
2266
    step = 1;
2267
}
2268

    
2269
static void do_exit(void)
2270
{
2271
    if (cur_stream) {
2272
        stream_close(cur_stream);
2273
        cur_stream = NULL;
2274
    }
2275
    if (show_status)
2276
        printf("\n");
2277
    SDL_Quit();
2278
    exit(0);
2279
}
2280

    
2281
static void toggle_audio_display(void)
2282
{
2283
    if (cur_stream) {
2284
        cur_stream->show_audio = !cur_stream->show_audio;
2285
    }
2286
}
2287

    
2288
/* handle an event sent by the GUI */
2289
static void event_loop(void)
2290
{
2291
    SDL_Event event;
2292
    double incr, pos, frac;
2293

    
2294
    for(;;) {
2295
        SDL_WaitEvent(&event);
2296
        switch(event.type) {
2297
        case SDL_KEYDOWN:
2298
            switch(event.key.keysym.sym) {
2299
            case SDLK_ESCAPE:
2300
            case SDLK_q:
2301
                do_exit();
2302
                break;
2303
            case SDLK_f:
2304
                toggle_full_screen();
2305
                break;
2306
            case SDLK_p:
2307
            case SDLK_SPACE:
2308
                toggle_pause();
2309
                break;
2310
            case SDLK_s: //S: Step to next frame
2311
                step_to_next_frame();
2312
                break;
2313
            case SDLK_a:
2314
                if (cur_stream)
2315
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2316
                break;
2317
            case SDLK_v:
2318
                if (cur_stream)
2319
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2320
                break;
2321
            case SDLK_t:
2322
                if (cur_stream)
2323
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2324
                break;
2325
            case SDLK_w:
2326
                toggle_audio_display();
2327
                break;
2328
            case SDLK_LEFT:
2329
                incr = -10.0;
2330
                goto do_seek;
2331
            case SDLK_RIGHT:
2332
                incr = 10.0;
2333
                goto do_seek;
2334
            case SDLK_UP:
2335
                incr = 60.0;
2336
                goto do_seek;
2337
            case SDLK_DOWN:
2338
                incr = -60.0;
2339
            do_seek:
2340
                if (cur_stream) {
2341
                    if (seek_by_bytes) {
2342
                        pos = url_ftell(cur_stream->ic->pb);
2343
                        if (cur_stream->ic->bit_rate)
2344
                            incr *= cur_stream->ic->bit_rate / 60.0;
2345
                        else
2346
                            incr *= 180000.0;
2347
                        pos += incr;
2348
                        stream_seek(cur_stream, pos, incr);
2349
                    } else {
2350
                        pos = get_master_clock(cur_stream);
2351
                        pos += incr;
2352
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE));
2353
                    }
2354
                }
2355
                break;
2356
            default:
2357
                break;
2358
            }
2359
            break;
2360
        case SDL_MOUSEBUTTONDOWN:
2361
            if (cur_stream) {
2362
                int ns, hh, mm, ss;
2363
                int tns, thh, tmm, tss;
2364
                tns = cur_stream->ic->duration/1000000LL;
2365
                thh = tns/3600;
2366
                tmm = (tns%3600)/60;
2367
                tss = (tns%60);
2368
                frac = (double)event.button.x/(double)cur_stream->width;
2369
                ns = frac*tns;
2370
                hh = ns/3600;
2371
                mm = (ns%3600)/60;
2372
                ss = (ns%60);
2373
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2374
                        hh, mm, ss, thh, tmm, tss);
2375
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2376
            }
2377
            break;
2378
        case SDL_VIDEORESIZE:
2379
            if (cur_stream) {
2380
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2381
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2382
                screen_width = cur_stream->width = event.resize.w;
2383
                screen_height= cur_stream->height= event.resize.h;
2384
            }
2385
            break;
2386
        case SDL_QUIT:
2387
        case FF_QUIT_EVENT:
2388
            do_exit();
2389
            break;
2390
        case FF_ALLOC_EVENT:
2391
            video_open(event.user.data1);
2392
            alloc_picture(event.user.data1);
2393
            break;
2394
        case FF_REFRESH_EVENT:
2395
            video_refresh_timer(event.user.data1);
2396
            break;
2397
        default:
2398
            break;
2399
        }
2400
    }
2401
}
2402

    
2403
static void opt_frame_size(const char *arg)
2404
{
2405
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2406
        fprintf(stderr, "Incorrect frame size\n");
2407
        exit(1);
2408
    }
2409
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2410
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2411
        exit(1);
2412
    }
2413
}
2414

    
2415
static int opt_width(const char *opt, const char *arg)
2416
{
2417
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2418
    return 0;
2419
}
2420

    
2421
static int opt_height(const char *opt, const char *arg)
2422
{
2423
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2424
    return 0;
2425
}
2426

    
2427
static void opt_format(const char *arg)
2428
{
2429
    file_iformat = av_find_input_format(arg);
2430
    if (!file_iformat) {
2431
        fprintf(stderr, "Unknown input format: %s\n", arg);
2432
        exit(1);
2433
    }
2434
}
2435

    
2436
static void opt_frame_pix_fmt(const char *arg)
2437
{
2438
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2439
}
2440

    
2441
static int opt_sync(const char *opt, const char *arg)
2442
{
2443
    if (!strcmp(arg, "audio"))
2444
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2445
    else if (!strcmp(arg, "video"))
2446
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2447
    else if (!strcmp(arg, "ext"))
2448
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2449
    else {
2450
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2451
        exit(1);
2452
    }
2453
    return 0;
2454
}
2455

    
2456
static int opt_seek(const char *opt, const char *arg)
2457
{
2458
    start_time = parse_time_or_die(opt, arg, 1);
2459
    return 0;
2460
}
2461

    
2462
static int opt_debug(const char *opt, const char *arg)
2463
{
2464
    av_log_set_level(99);
2465
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2466
    return 0;
2467
}
2468

    
2469
static int opt_vismv(const char *opt, const char *arg)
2470
{
2471
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2472
    return 0;
2473
}
2474

    
2475
static int opt_thread_count(const char *opt, const char *arg)
2476
{
2477
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2478
#if !HAVE_THREADS
2479
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2480
#endif
2481
    return 0;
2482
}
2483

    
2484
static const OptionDef options[] = {
2485
    { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2486
    { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2487
    { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2488
    { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2489
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2490
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2491
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2492
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2493
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2494
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2495
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2496
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2497
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2498
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2499
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2500
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2501
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2502
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2503
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2504
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2505
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2506
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2507
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2508
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2509
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2510
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2511
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2512
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2513
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2514
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2515
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2516
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2517
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2518
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2519
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2520
    { NULL, },
2521
};
2522

    
2523
static void show_help(void)
2524
{
2525
    printf("usage: ffplay [options] input_file\n"
2526
           "Simple media player\n");
2527
    printf("\n");
2528
    show_help_options(options, "Main options:\n",
2529
                      OPT_EXPERT, 0);
2530
    show_help_options(options, "\nAdvanced options:\n",
2531
                      OPT_EXPERT, OPT_EXPERT);
2532
    printf("\nWhile playing:\n"
2533
           "q, ESC              quit\n"
2534
           "f                   toggle full screen\n"
2535
           "p, SPC              pause\n"
2536
           "a                   cycle audio channel\n"
2537
           "v                   cycle video channel\n"
2538
           "t                   cycle subtitle channel\n"
2539
           "w                   show audio waves\n"
2540
           "left/right          seek backward/forward 10 seconds\n"
2541
           "down/up             seek backward/forward 1 minute\n"
2542
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2543
           );
2544
}
2545

    
2546
static void opt_input_file(const char *filename)
2547
{
2548
    if (!strcmp(filename, "-"))
2549
        filename = "pipe:";
2550
    input_filename = filename;
2551
}
2552

    
2553
/* Called from the main */
2554
int main(int argc, char **argv)
2555
{
2556
    int flags, i;
2557

    
2558
    /* register all codecs, demux and protocols */
2559
    avcodec_register_all();
2560
    avdevice_register_all();
2561
    av_register_all();
2562

    
2563
    for(i=0; i<CODEC_TYPE_NB; i++){
2564
        avcodec_opts[i]= avcodec_alloc_context2(i);
2565
    }
2566
    avformat_opts = avformat_alloc_context();
2567
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2568

    
2569
    show_banner();
2570

    
2571
    parse_options(argc, argv, options, opt_input_file);
2572

    
2573
    if (!input_filename) {
2574
        fprintf(stderr, "An input file must be specified\n");
2575
        exit(1);
2576
    }
2577

    
2578
    if (display_disable) {
2579
        video_disable = 1;
2580
    }
2581
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2582
#if !defined(__MINGW32__) && !defined(__APPLE__)
2583
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2584
#endif
2585
    if (SDL_Init (flags)) {
2586
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2587
        exit(1);
2588
    }
2589

    
2590
    if (!display_disable) {
2591
#if HAVE_SDL_VIDEO_SIZE
2592
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2593
        fs_screen_width = vi->current_w;
2594
        fs_screen_height = vi->current_h;
2595
#endif
2596
    }
2597

    
2598
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2599
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2600
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2601
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2602

    
2603
    av_init_packet(&flush_pkt);
2604
    flush_pkt.data= "FLUSH";
2605

    
2606
    cur_stream = stream_open(input_filename, file_iformat);
2607

    
2608
    event_loop();
2609

    
2610
    /* never returns */
2611

    
2612
    return 0;
2613
}