Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ db4fac64

History | View | Annotate | Download (79.1 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include <math.h>
23
#include <limits.h>
24
#include "libavutil/avstring.h"
25
#include "libavformat/avformat.h"
26
#include "libavformat/rtsp.h"
27
#include "libavdevice/avdevice.h"
28
#include "libswscale/swscale.h"
29
#include "libavcodec/audioconvert.h"
30
#include "libavcodec/opt.h"
31

    
32
#include "cmdutils.h"
33

    
34
#include <SDL.h>
35
#include <SDL_thread.h>
36

    
37
#ifdef __MINGW32__
38
#undef main /* We don't want SDL to override our main() */
39
#endif
40

    
41
#undef exit
42

    
43
const char program_name[] = "FFplay";
44
const int program_birth_year = 2003;
45

    
46
//#define DEBUG_SYNC
47

    
48
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
49
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
50
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51

    
52
/* SDL audio buffer size, in samples. Should be small to have precise
53
   A/V sync as SDL does not have hardware buffer fullness info. */
54
#define SDL_AUDIO_BUFFER_SIZE 1024
55

    
56
/* no AV sync correction is done if below the AV sync threshold */
57
#define AV_SYNC_THRESHOLD 0.01
58
/* no AV correction is done if too big error */
59
#define AV_NOSYNC_THRESHOLD 10.0
60

    
61
/* maximum audio speed change to get correct sync */
62
#define SAMPLE_CORRECTION_PERCENT_MAX 10
63

    
64
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
65
#define AUDIO_DIFF_AVG_NB   20
66

    
67
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
68
#define SAMPLE_ARRAY_SIZE (2*65536)
69

    
70
static int sws_flags = SWS_BICUBIC;
71

    
72
typedef struct PacketQueue {
73
    AVPacketList *first_pkt, *last_pkt;
74
    int nb_packets;
75
    int size;
76
    int abort_request;
77
    SDL_mutex *mutex;
78
    SDL_cond *cond;
79
} PacketQueue;
80

    
81
#define VIDEO_PICTURE_QUEUE_SIZE 1
82
#define SUBPICTURE_QUEUE_SIZE 4
83

    
84
typedef struct VideoPicture {
85
    double pts;                                  ///<presentation time stamp for this picture
86
    SDL_Overlay *bmp;
87
    int width, height; /* source height & width */
88
    int allocated;
89
} VideoPicture;
90

    
91
typedef struct SubPicture {
92
    double pts; /* presentation time stamp for this picture */
93
    AVSubtitle sub;
94
} SubPicture;
95

    
96
enum {
97
    AV_SYNC_AUDIO_MASTER, /* default choice */
98
    AV_SYNC_VIDEO_MASTER,
99
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100
};
101

    
102
typedef struct VideoState {
103
    SDL_Thread *parse_tid;
104
    SDL_Thread *video_tid;
105
    AVInputFormat *iformat;
106
    int no_background;
107
    int abort_request;
108
    int paused;
109
    int last_paused;
110
    int seek_req;
111
    int seek_flags;
112
    int64_t seek_pos;
113
    AVFormatContext *ic;
114
    int dtg_active_format;
115

    
116
    int audio_stream;
117

    
118
    int av_sync_type;
119
    double external_clock; /* external clock base */
120
    int64_t external_clock_time;
121

    
122
    double audio_clock;
123
    double audio_diff_cum; /* used for AV difference average computation */
124
    double audio_diff_avg_coef;
125
    double audio_diff_threshold;
126
    int audio_diff_avg_count;
127
    AVStream *audio_st;
128
    PacketQueue audioq;
129
    int audio_hw_buf_size;
130
    /* samples output by the codec. we reserve more space for avsync
131
       compensation */
132
    DECLARE_ALIGNED(16,uint8_t,audio_buf1[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
133
    DECLARE_ALIGNED(16,uint8_t,audio_buf2[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
134
    uint8_t *audio_buf;
135
    unsigned int audio_buf_size; /* in bytes */
136
    int audio_buf_index; /* in bytes */
137
    AVPacket audio_pkt;
138
    uint8_t *audio_pkt_data;
139
    int audio_pkt_size;
140
    enum SampleFormat audio_src_fmt;
141
    AVAudioConvert *reformat_ctx;
142

    
143
    int show_audio; /* if true, display audio samples */
144
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
145
    int sample_array_index;
146
    int last_i_start;
147

    
148
    SDL_Thread *subtitle_tid;
149
    int subtitle_stream;
150
    int subtitle_stream_changed;
151
    AVStream *subtitle_st;
152
    PacketQueue subtitleq;
153
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
154
    int subpq_size, subpq_rindex, subpq_windex;
155
    SDL_mutex *subpq_mutex;
156
    SDL_cond *subpq_cond;
157

    
158
    double frame_timer;
159
    double frame_last_pts;
160
    double frame_last_delay;
161
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
162
    int video_stream;
163
    AVStream *video_st;
164
    PacketQueue videoq;
165
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
166
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
167
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
168
    int pictq_size, pictq_rindex, pictq_windex;
169
    SDL_mutex *pictq_mutex;
170
    SDL_cond *pictq_cond;
171

    
172
    //    QETimer *video_timer;
173
    char filename[1024];
174
    int width, height, xleft, ytop;
175
} VideoState;
176

    
177
static void show_help(void);
178
static int audio_write_get_buf_size(VideoState *is);
179

    
180
/* options specified by the user */
181
static AVInputFormat *file_iformat;
182
static const char *input_filename;
183
static int fs_screen_width;
184
static int fs_screen_height;
185
static int screen_width = 0;
186
static int screen_height = 0;
187
static int frame_width = 0;
188
static int frame_height = 0;
189
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
190
static int audio_disable;
191
static int video_disable;
192
static int wanted_audio_stream= 0;
193
static int wanted_video_stream= 0;
194
static int seek_by_bytes;
195
static int display_disable;
196
static int show_status;
197
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
198
static int64_t start_time = AV_NOPTS_VALUE;
199
static int debug = 0;
200
static int debug_mv = 0;
201
static int step = 0;
202
static int thread_count = 1;
203
static int workaround_bugs = 1;
204
static int fast = 0;
205
static int genpts = 0;
206
static int lowres = 0;
207
static int idct = FF_IDCT_AUTO;
208
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
209
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
210
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
211
static int error_recognition = FF_ER_CAREFUL;
212
static int error_concealment = 3;
213
static int decoder_reorder_pts= 0;
214

    
215
/* current context */
216
static int is_full_screen;
217
static VideoState *cur_stream;
218
static int64_t audio_callback_time;
219

    
220
static AVPacket flush_pkt;
221

    
222
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
223
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
224
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
225

    
226
static SDL_Surface *screen;
227

    
228
/* packet queue handling */
229
static void packet_queue_init(PacketQueue *q)
230
{
231
    memset(q, 0, sizeof(PacketQueue));
232
    q->mutex = SDL_CreateMutex();
233
    q->cond = SDL_CreateCond();
234
}
235

    
236
static void packet_queue_flush(PacketQueue *q)
237
{
238
    AVPacketList *pkt, *pkt1;
239

    
240
    SDL_LockMutex(q->mutex);
241
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
242
        pkt1 = pkt->next;
243
        av_free_packet(&pkt->pkt);
244
        av_freep(&pkt);
245
    }
246
    q->last_pkt = NULL;
247
    q->first_pkt = NULL;
248
    q->nb_packets = 0;
249
    q->size = 0;
250
    SDL_UnlockMutex(q->mutex);
251
}
252

    
253
static void packet_queue_end(PacketQueue *q)
254
{
255
    packet_queue_flush(q);
256
    SDL_DestroyMutex(q->mutex);
257
    SDL_DestroyCond(q->cond);
258
}
259

    
260
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
261
{
262
    AVPacketList *pkt1;
263

    
264
    /* duplicate the packet */
265
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
266
        return -1;
267

    
268
    pkt1 = av_malloc(sizeof(AVPacketList));
269
    if (!pkt1)
270
        return -1;
271
    pkt1->pkt = *pkt;
272
    pkt1->next = NULL;
273

    
274

    
275
    SDL_LockMutex(q->mutex);
276

    
277
    if (!q->last_pkt)
278

    
279
        q->first_pkt = pkt1;
280
    else
281
        q->last_pkt->next = pkt1;
282
    q->last_pkt = pkt1;
283
    q->nb_packets++;
284
    q->size += pkt1->pkt.size;
285
    /* XXX: should duplicate packet data in DV case */
286
    SDL_CondSignal(q->cond);
287

    
288
    SDL_UnlockMutex(q->mutex);
289
    return 0;
290
}
291

    
292
static void packet_queue_abort(PacketQueue *q)
293
{
294
    SDL_LockMutex(q->mutex);
295

    
296
    q->abort_request = 1;
297

    
298
    SDL_CondSignal(q->cond);
299

    
300
    SDL_UnlockMutex(q->mutex);
301
}
302

    
303
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
304
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
305
{
306
    AVPacketList *pkt1;
307
    int ret;
308

    
309
    SDL_LockMutex(q->mutex);
310

    
311
    for(;;) {
312
        if (q->abort_request) {
313
            ret = -1;
314
            break;
315
        }
316

    
317
        pkt1 = q->first_pkt;
318
        if (pkt1) {
319
            q->first_pkt = pkt1->next;
320
            if (!q->first_pkt)
321
                q->last_pkt = NULL;
322
            q->nb_packets--;
323
            q->size -= pkt1->pkt.size;
324
            *pkt = pkt1->pkt;
325
            av_free(pkt1);
326
            ret = 1;
327
            break;
328
        } else if (!block) {
329
            ret = 0;
330
            break;
331
        } else {
332
            SDL_CondWait(q->cond, q->mutex);
333
        }
334
    }
335
    SDL_UnlockMutex(q->mutex);
336
    return ret;
337
}
338

    
339
static inline void fill_rectangle(SDL_Surface *screen,
340
                                  int x, int y, int w, int h, int color)
341
{
342
    SDL_Rect rect;
343
    rect.x = x;
344
    rect.y = y;
345
    rect.w = w;
346
    rect.h = h;
347
    SDL_FillRect(screen, &rect, color);
348
}
349

    
350
#if 0
351
/* draw only the border of a rectangle */
352
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
353
{
354
    int w1, w2, h1, h2;
355

356
    /* fill the background */
357
    w1 = x;
358
    if (w1 < 0)
359
        w1 = 0;
360
    w2 = s->width - (x + w);
361
    if (w2 < 0)
362
        w2 = 0;
363
    h1 = y;
364
    if (h1 < 0)
365
        h1 = 0;
366
    h2 = s->height - (y + h);
367
    if (h2 < 0)
368
        h2 = 0;
369
    fill_rectangle(screen,
370
                   s->xleft, s->ytop,
371
                   w1, s->height,
372
                   color);
373
    fill_rectangle(screen,
374
                   s->xleft + s->width - w2, s->ytop,
375
                   w2, s->height,
376
                   color);
377
    fill_rectangle(screen,
378
                   s->xleft + w1, s->ytop,
379
                   s->width - w1 - w2, h1,
380
                   color);
381
    fill_rectangle(screen,
382
                   s->xleft + w1, s->ytop + s->height - h2,
383
                   s->width - w1 - w2, h2,
384
                   color);
385
}
386
#endif
387

    
388

    
389

    
390
#define SCALEBITS 10
391
#define ONE_HALF  (1 << (SCALEBITS - 1))
392
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
393

    
394
#define RGB_TO_Y_CCIR(r, g, b) \
395
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
396
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
397

    
398
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
399
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
400
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401

    
402
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
403
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
404
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
405

    
406
#define ALPHA_BLEND(a, oldp, newp, s)\
407
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
408

    
409
#define RGBA_IN(r, g, b, a, s)\
410
{\
411
    unsigned int v = ((const uint32_t *)(s))[0];\
412
    a = (v >> 24) & 0xff;\
413
    r = (v >> 16) & 0xff;\
414
    g = (v >> 8) & 0xff;\
415
    b = v & 0xff;\
416
}
417

    
418
#define YUVA_IN(y, u, v, a, s, pal)\
419
{\
420
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
421
    a = (val >> 24) & 0xff;\
422
    y = (val >> 16) & 0xff;\
423
    u = (val >> 8) & 0xff;\
424
    v = val & 0xff;\
425
}
426

    
427
#define YUVA_OUT(d, y, u, v, a)\
428
{\
429
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
430
}
431

    
432

    
433
#define BPP 1
434

    
435
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
436
{
437
    int wrap, wrap3, width2, skip2;
438
    int y, u, v, a, u1, v1, a1, w, h;
439
    uint8_t *lum, *cb, *cr;
440
    const uint8_t *p;
441
    const uint32_t *pal;
442
    int dstx, dsty, dstw, dsth;
443

    
444
    dstw = av_clip(rect->w, 0, imgw);
445
    dsth = av_clip(rect->h, 0, imgh);
446
    dstx = av_clip(rect->x, 0, imgw - dstw);
447
    dsty = av_clip(rect->y, 0, imgh - dsth);
448
    lum = dst->data[0] + dsty * dst->linesize[0];
449
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
450
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
451

    
452
    width2 = (dstw + 1) >> 1;
453
    skip2 = dstx >> 1;
454
    wrap = dst->linesize[0];
455
    wrap3 = rect->linesize;
456
    p = rect->bitmap;
457
    pal = rect->rgba_palette;  /* Now in YCrCb! */
458

    
459
    if (dsty & 1) {
460
        lum += dstx;
461
        cb += skip2;
462
        cr += skip2;
463

    
464
        if (dstx & 1) {
465
            YUVA_IN(y, u, v, a, p, pal);
466
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
467
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
468
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
469
            cb++;
470
            cr++;
471
            lum++;
472
            p += BPP;
473
        }
474
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
475
            YUVA_IN(y, u, v, a, p, pal);
476
            u1 = u;
477
            v1 = v;
478
            a1 = a;
479
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
480

    
481
            YUVA_IN(y, u, v, a, p + BPP, pal);
482
            u1 += u;
483
            v1 += v;
484
            a1 += a;
485
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
486
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
487
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
488
            cb++;
489
            cr++;
490
            p += 2 * BPP;
491
            lum += 2;
492
        }
493
        if (w) {
494
            YUVA_IN(y, u, v, a, p, pal);
495
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
496
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
497
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
498
        }
499
        p += wrap3 + (wrap3 - dstw * BPP);
500
        lum += wrap + (wrap - dstw - dstx);
501
        cb += dst->linesize[1] - width2 - skip2;
502
        cr += dst->linesize[2] - width2 - skip2;
503
    }
504
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505
        lum += dstx;
506
        cb += skip2;
507
        cr += skip2;
508

    
509
        if (dstx & 1) {
510
            YUVA_IN(y, u, v, a, p, pal);
511
            u1 = u;
512
            v1 = v;
513
            a1 = a;
514
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515
            p += wrap3;
516
            lum += wrap;
517
            YUVA_IN(y, u, v, a, p, pal);
518
            u1 += u;
519
            v1 += v;
520
            a1 += a;
521
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524
            cb++;
525
            cr++;
526
            p += -wrap3 + BPP;
527
            lum += -wrap + 1;
528
        }
529
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530
            YUVA_IN(y, u, v, a, p, pal);
531
            u1 = u;
532
            v1 = v;
533
            a1 = a;
534
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535

    
536
            YUVA_IN(y, u, v, a, p, pal);
537
            u1 += u;
538
            v1 += v;
539
            a1 += a;
540
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541
            p += wrap3;
542
            lum += wrap;
543

    
544
            YUVA_IN(y, u, v, a, p, pal);
545
            u1 += u;
546
            v1 += v;
547
            a1 += a;
548
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549

    
550
            YUVA_IN(y, u, v, a, p, pal);
551
            u1 += u;
552
            v1 += v;
553
            a1 += a;
554
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555

    
556
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558

    
559
            cb++;
560
            cr++;
561
            p += -wrap3 + 2 * BPP;
562
            lum += -wrap + 2;
563
        }
564
        if (w) {
565
            YUVA_IN(y, u, v, a, p, pal);
566
            u1 = u;
567
            v1 = v;
568
            a1 = a;
569
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570
            p += wrap3;
571
            lum += wrap;
572
            YUVA_IN(y, u, v, a, p, pal);
573
            u1 += u;
574
            v1 += v;
575
            a1 += a;
576
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579
            cb++;
580
            cr++;
581
            p += -wrap3 + BPP;
582
            lum += -wrap + 1;
583
        }
584
        p += wrap3 + (wrap3 - dstw * BPP);
585
        lum += wrap + (wrap - dstw - dstx);
586
        cb += dst->linesize[1] - width2 - skip2;
587
        cr += dst->linesize[2] - width2 - skip2;
588
    }
589
    /* handle odd height */
590
    if (h) {
591
        lum += dstx;
592
        cb += skip2;
593
        cr += skip2;
594

    
595
        if (dstx & 1) {
596
            YUVA_IN(y, u, v, a, p, pal);
597
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600
            cb++;
601
            cr++;
602
            lum++;
603
            p += BPP;
604
        }
605
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606
            YUVA_IN(y, u, v, a, p, pal);
607
            u1 = u;
608
            v1 = v;
609
            a1 = a;
610
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611

    
612
            YUVA_IN(y, u, v, a, p + BPP, pal);
613
            u1 += u;
614
            v1 += v;
615
            a1 += a;
616
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619
            cb++;
620
            cr++;
621
            p += 2 * BPP;
622
            lum += 2;
623
        }
624
        if (w) {
625
            YUVA_IN(y, u, v, a, p, pal);
626
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629
        }
630
    }
631
}
632

    
633
static void free_subpicture(SubPicture *sp)
634
{
635
    int i;
636

    
637
    for (i = 0; i < sp->sub.num_rects; i++)
638
    {
639
        av_freep(&sp->sub.rects[i]->bitmap);
640
        av_freep(&sp->sub.rects[i]->rgba_palette);
641
        av_freep(&sp->sub.rects[i]);
642
    }
643

    
644
    av_free(sp->sub.rects);
645

    
646
    memset(&sp->sub, 0, sizeof(AVSubtitle));
647
}
648

    
649
static void video_image_display(VideoState *is)
650
{
651
    VideoPicture *vp;
652
    SubPicture *sp;
653
    AVPicture pict;
654
    float aspect_ratio;
655
    int width, height, x, y;
656
    SDL_Rect rect;
657
    int i;
658

    
659
    vp = &is->pictq[is->pictq_rindex];
660
    if (vp->bmp) {
661
        /* XXX: use variable in the frame */
662
        if (is->video_st->sample_aspect_ratio.num)
663
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
664
        else if (is->video_st->codec->sample_aspect_ratio.num)
665
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
666
        else
667
            aspect_ratio = 0;
668
        if (aspect_ratio <= 0.0)
669
            aspect_ratio = 1.0;
670
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
671
        /* if an active format is indicated, then it overrides the
672
           mpeg format */
673
#if 0
674
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
675
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
676
            printf("dtg_active_format=%d\n", is->dtg_active_format);
677
        }
678
#endif
679
#if 0
680
        switch(is->video_st->codec->dtg_active_format) {
681
        case FF_DTG_AFD_SAME:
682
        default:
683
            /* nothing to do */
684
            break;
685
        case FF_DTG_AFD_4_3:
686
            aspect_ratio = 4.0 / 3.0;
687
            break;
688
        case FF_DTG_AFD_16_9:
689
            aspect_ratio = 16.0 / 9.0;
690
            break;
691
        case FF_DTG_AFD_14_9:
692
            aspect_ratio = 14.0 / 9.0;
693
            break;
694
        case FF_DTG_AFD_4_3_SP_14_9:
695
            aspect_ratio = 14.0 / 9.0;
696
            break;
697
        case FF_DTG_AFD_16_9_SP_14_9:
698
            aspect_ratio = 14.0 / 9.0;
699
            break;
700
        case FF_DTG_AFD_SP_4_3:
701
            aspect_ratio = 4.0 / 3.0;
702
            break;
703
        }
704
#endif
705

    
706
        if (is->subtitle_st)
707
        {
708
            if (is->subpq_size > 0)
709
            {
710
                sp = &is->subpq[is->subpq_rindex];
711

    
712
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
713
                {
714
                    SDL_LockYUVOverlay (vp->bmp);
715

    
716
                    pict.data[0] = vp->bmp->pixels[0];
717
                    pict.data[1] = vp->bmp->pixels[2];
718
                    pict.data[2] = vp->bmp->pixels[1];
719

    
720
                    pict.linesize[0] = vp->bmp->pitches[0];
721
                    pict.linesize[1] = vp->bmp->pitches[2];
722
                    pict.linesize[2] = vp->bmp->pitches[1];
723

    
724
                    for (i = 0; i < sp->sub.num_rects; i++)
725
                        blend_subrect(&pict, sp->sub.rects[i],
726
                                      vp->bmp->w, vp->bmp->h);
727

    
728
                    SDL_UnlockYUVOverlay (vp->bmp);
729
                }
730
            }
731
        }
732

    
733

    
734
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
735
        height = is->height;
736
        width = ((int)rint(height * aspect_ratio)) & ~1;
737
        if (width > is->width) {
738
            width = is->width;
739
            height = ((int)rint(width / aspect_ratio)) & ~1;
740
        }
741
        x = (is->width - width) / 2;
742
        y = (is->height - height) / 2;
743
        if (!is->no_background) {
744
            /* fill the background */
745
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
746
        } else {
747
            is->no_background = 0;
748
        }
749
        rect.x = is->xleft + x;
750
        rect.y = is->ytop  + y;
751
        rect.w = width;
752
        rect.h = height;
753
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
754
    } else {
755
#if 0
756
        fill_rectangle(screen,
757
                       is->xleft, is->ytop, is->width, is->height,
758
                       QERGB(0x00, 0x00, 0x00));
759
#endif
760
    }
761
}
762

    
763
static inline int compute_mod(int a, int b)
764
{
765
    a = a % b;
766
    if (a >= 0)
767
        return a;
768
    else
769
        return a + b;
770
}
771

    
772
static void video_audio_display(VideoState *s)
773
{
774
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
775
    int ch, channels, h, h2, bgcolor, fgcolor;
776
    int16_t time_diff;
777

    
778
    /* compute display index : center on currently output samples */
779
    channels = s->audio_st->codec->channels;
780
    nb_display_channels = channels;
781
    if (!s->paused) {
782
        n = 2 * channels;
783
        delay = audio_write_get_buf_size(s);
784
        delay /= n;
785

    
786
        /* to be more precise, we take into account the time spent since
787
           the last buffer computation */
788
        if (audio_callback_time) {
789
            time_diff = av_gettime() - audio_callback_time;
790
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
791
        }
792

    
793
        delay -= s->width / 2;
794
        if (delay < s->width)
795
            delay = s->width;
796

    
797
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
798

    
799
        h= INT_MIN;
800
        for(i=0; i<1000; i+=channels){
801
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
802
            int a= s->sample_array[idx];
803
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
804
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
805
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
806
            int score= a-d;
807
            if(h<score && (b^c)<0){
808
                h= score;
809
                i_start= idx;
810
            }
811
        }
812

    
813
        s->last_i_start = i_start;
814
    } else {
815
        i_start = s->last_i_start;
816
    }
817

    
818
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
819
    fill_rectangle(screen,
820
                   s->xleft, s->ytop, s->width, s->height,
821
                   bgcolor);
822

    
823
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
824

    
825
    /* total height for one channel */
826
    h = s->height / nb_display_channels;
827
    /* graph height / 2 */
828
    h2 = (h * 9) / 20;
829
    for(ch = 0;ch < nb_display_channels; ch++) {
830
        i = i_start + ch;
831
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
832
        for(x = 0; x < s->width; x++) {
833
            y = (s->sample_array[i] * h2) >> 15;
834
            if (y < 0) {
835
                y = -y;
836
                ys = y1 - y;
837
            } else {
838
                ys = y1;
839
            }
840
            fill_rectangle(screen,
841
                           s->xleft + x, ys, 1, y,
842
                           fgcolor);
843
            i += channels;
844
            if (i >= SAMPLE_ARRAY_SIZE)
845
                i -= SAMPLE_ARRAY_SIZE;
846
        }
847
    }
848

    
849
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
850

    
851
    for(ch = 1;ch < nb_display_channels; ch++) {
852
        y = s->ytop + ch * h;
853
        fill_rectangle(screen,
854
                       s->xleft, y, s->width, 1,
855
                       fgcolor);
856
    }
857
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
858
}
859

    
860
static int video_open(VideoState *is){
861
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
862
    int w,h;
863

    
864
    if(is_full_screen) flags |= SDL_FULLSCREEN;
865
    else               flags |= SDL_RESIZABLE;
866

    
867
    if (is_full_screen && fs_screen_width) {
868
        w = fs_screen_width;
869
        h = fs_screen_height;
870
    } else if(!is_full_screen && screen_width){
871
        w = screen_width;
872
        h = screen_height;
873
    }else if (is->video_st && is->video_st->codec->width){
874
        w = is->video_st->codec->width;
875
        h = is->video_st->codec->height;
876
    } else {
877
        w = 640;
878
        h = 480;
879
    }
880
#ifndef __APPLE__
881
    screen = SDL_SetVideoMode(w, h, 0, flags);
882
#else
883
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
884
    screen = SDL_SetVideoMode(w, h, 24, flags);
885
#endif
886
    if (!screen) {
887
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
888
        return -1;
889
    }
890
    SDL_WM_SetCaption("FFplay", "FFplay");
891

    
892
    is->width = screen->w;
893
    is->height = screen->h;
894

    
895
    return 0;
896
}
897

    
898
/* display the current picture, if any */
899
static void video_display(VideoState *is)
900
{
901
    if(!screen)
902
        video_open(cur_stream);
903
    if (is->audio_st && is->show_audio)
904
        video_audio_display(is);
905
    else if (is->video_st)
906
        video_image_display(is);
907
}
908

    
909
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
910
{
911
    SDL_Event event;
912
    event.type = FF_REFRESH_EVENT;
913
    event.user.data1 = opaque;
914
    SDL_PushEvent(&event);
915
    return 0; /* 0 means stop timer */
916
}
917

    
918
/* schedule a video refresh in 'delay' ms */
919
static void schedule_refresh(VideoState *is, int delay)
920
{
921
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
922
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
923
}
924

    
925
/* get the current audio clock value */
926
static double get_audio_clock(VideoState *is)
927
{
928
    double pts;
929
    int hw_buf_size, bytes_per_sec;
930
    pts = is->audio_clock;
931
    hw_buf_size = audio_write_get_buf_size(is);
932
    bytes_per_sec = 0;
933
    if (is->audio_st) {
934
        bytes_per_sec = is->audio_st->codec->sample_rate *
935
            2 * is->audio_st->codec->channels;
936
    }
937
    if (bytes_per_sec)
938
        pts -= (double)hw_buf_size / bytes_per_sec;
939
    return pts;
940
}
941

    
942
/* get the current video clock value */
943
static double get_video_clock(VideoState *is)
944
{
945
    double delta;
946
    if (is->paused) {
947
        delta = 0;
948
    } else {
949
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
950
    }
951
    return is->video_current_pts + delta;
952
}
953

    
954
/* get the current external clock value */
955
static double get_external_clock(VideoState *is)
956
{
957
    int64_t ti;
958
    ti = av_gettime();
959
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
960
}
961

    
962
/* get the current master clock value */
963
static double get_master_clock(VideoState *is)
964
{
965
    double val;
966

    
967
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
968
        if (is->video_st)
969
            val = get_video_clock(is);
970
        else
971
            val = get_audio_clock(is);
972
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
973
        if (is->audio_st)
974
            val = get_audio_clock(is);
975
        else
976
            val = get_video_clock(is);
977
    } else {
978
        val = get_external_clock(is);
979
    }
980
    return val;
981
}
982

    
983
/* seek in the stream */
984
static void stream_seek(VideoState *is, int64_t pos, int rel)
985
{
986
    if (!is->seek_req) {
987
        is->seek_pos = pos;
988
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
989
        if (seek_by_bytes)
990
            is->seek_flags |= AVSEEK_FLAG_BYTE;
991
        is->seek_req = 1;
992
    }
993
}
994

    
995
/* pause or resume the video */
996
static void stream_pause(VideoState *is)
997
{
998
    is->paused = !is->paused;
999
    if (!is->paused) {
1000
        is->video_current_pts = get_video_clock(is);
1001
        is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
1002
    }
1003
}
1004

    
1005
/* called to display each frame */
1006
static void video_refresh_timer(void *opaque)
1007
{
1008
    VideoState *is = opaque;
1009
    VideoPicture *vp;
1010
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1011

    
1012
    SubPicture *sp, *sp2;
1013

    
1014
    if (is->video_st) {
1015
        if (is->pictq_size == 0) {
1016
            /* if no picture, need to wait */
1017
            schedule_refresh(is, 1);
1018
        } else {
1019
            /* dequeue the picture */
1020
            vp = &is->pictq[is->pictq_rindex];
1021

    
1022
            /* update current video pts */
1023
            is->video_current_pts = vp->pts;
1024
            is->video_current_pts_time = av_gettime();
1025

    
1026
            /* compute nominal delay */
1027
            delay = vp->pts - is->frame_last_pts;
1028
            if (delay <= 0 || delay >= 10.0) {
1029
                /* if incorrect delay, use previous one */
1030
                delay = is->frame_last_delay;
1031
            }
1032
            is->frame_last_delay = delay;
1033
            is->frame_last_pts = vp->pts;
1034

    
1035
            /* update delay to follow master synchronisation source */
1036
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1037
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1038
                /* if video is slave, we try to correct big delays by
1039
                   duplicating or deleting a frame */
1040
                ref_clock = get_master_clock(is);
1041
                diff = vp->pts - ref_clock;
1042

    
1043
                /* skip or repeat frame. We take into account the
1044
                   delay to compute the threshold. I still don't know
1045
                   if it is the best guess */
1046
                sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1047
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1048
                    if (diff <= -sync_threshold)
1049
                        delay = 0;
1050
                    else if (diff >= sync_threshold)
1051
                        delay = 2 * delay;
1052
                }
1053
            }
1054

    
1055
            is->frame_timer += delay;
1056
            /* compute the REAL delay (we need to do that to avoid
1057
               long term errors */
1058
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1059
            if (actual_delay < 0.010) {
1060
                /* XXX: should skip picture */
1061
                actual_delay = 0.010;
1062
            }
1063
            /* launch timer for next picture */
1064
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1065

    
1066
#if defined(DEBUG_SYNC)
1067
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1068
                   delay, actual_delay, vp->pts, -diff);
1069
#endif
1070

    
1071
            if(is->subtitle_st) {
1072
                if (is->subtitle_stream_changed) {
1073
                    SDL_LockMutex(is->subpq_mutex);
1074

    
1075
                    while (is->subpq_size) {
1076
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1077

    
1078
                        /* update queue size and signal for next picture */
1079
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1080
                            is->subpq_rindex = 0;
1081

    
1082
                        is->subpq_size--;
1083
                    }
1084
                    is->subtitle_stream_changed = 0;
1085

    
1086
                    SDL_CondSignal(is->subpq_cond);
1087
                    SDL_UnlockMutex(is->subpq_mutex);
1088
                } else {
1089
                    if (is->subpq_size > 0) {
1090
                        sp = &is->subpq[is->subpq_rindex];
1091

    
1092
                        if (is->subpq_size > 1)
1093
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1094
                        else
1095
                            sp2 = NULL;
1096

    
1097
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1098
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1099
                        {
1100
                            free_subpicture(sp);
1101

    
1102
                            /* update queue size and signal for next picture */
1103
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1104
                                is->subpq_rindex = 0;
1105

    
1106
                            SDL_LockMutex(is->subpq_mutex);
1107
                            is->subpq_size--;
1108
                            SDL_CondSignal(is->subpq_cond);
1109
                            SDL_UnlockMutex(is->subpq_mutex);
1110
                        }
1111
                    }
1112
                }
1113
            }
1114

    
1115
            /* display picture */
1116
            video_display(is);
1117

    
1118
            /* update queue size and signal for next picture */
1119
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1120
                is->pictq_rindex = 0;
1121

    
1122
            SDL_LockMutex(is->pictq_mutex);
1123
            is->pictq_size--;
1124
            SDL_CondSignal(is->pictq_cond);
1125
            SDL_UnlockMutex(is->pictq_mutex);
1126
        }
1127
    } else if (is->audio_st) {
1128
        /* draw the next audio frame */
1129

    
1130
        schedule_refresh(is, 40);
1131

    
1132
        /* if only audio stream, then display the audio bars (better
1133
           than nothing, just to test the implementation */
1134

    
1135
        /* display picture */
1136
        video_display(is);
1137
    } else {
1138
        schedule_refresh(is, 100);
1139
    }
1140
    if (show_status) {
1141
        static int64_t last_time;
1142
        int64_t cur_time;
1143
        int aqsize, vqsize, sqsize;
1144
        double av_diff;
1145

    
1146
        cur_time = av_gettime();
1147
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1148
            aqsize = 0;
1149
            vqsize = 0;
1150
            sqsize = 0;
1151
            if (is->audio_st)
1152
                aqsize = is->audioq.size;
1153
            if (is->video_st)
1154
                vqsize = is->videoq.size;
1155
            if (is->subtitle_st)
1156
                sqsize = is->subtitleq.size;
1157
            av_diff = 0;
1158
            if (is->audio_st && is->video_st)
1159
                av_diff = get_audio_clock(is) - get_video_clock(is);
1160
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1161
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1162
            fflush(stdout);
1163
            last_time = cur_time;
1164
        }
1165
    }
1166
}
1167

    
1168
/* allocate a picture (needs to do that in main thread to avoid
1169
   potential locking problems */
1170
static void alloc_picture(void *opaque)
1171
{
1172
    VideoState *is = opaque;
1173
    VideoPicture *vp;
1174

    
1175
    vp = &is->pictq[is->pictq_windex];
1176

    
1177
    if (vp->bmp)
1178
        SDL_FreeYUVOverlay(vp->bmp);
1179

    
1180
#if 0
1181
    /* XXX: use generic function */
1182
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1183
    switch(is->video_st->codec->pix_fmt) {
1184
    case PIX_FMT_YUV420P:
1185
    case PIX_FMT_YUV422P:
1186
    case PIX_FMT_YUV444P:
1187
    case PIX_FMT_YUYV422:
1188
    case PIX_FMT_YUV410P:
1189
    case PIX_FMT_YUV411P:
1190
        is_yuv = 1;
1191
        break;
1192
    default:
1193
        is_yuv = 0;
1194
        break;
1195
    }
1196
#endif
1197
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1198
                                   is->video_st->codec->height,
1199
                                   SDL_YV12_OVERLAY,
1200
                                   screen);
1201
    vp->width = is->video_st->codec->width;
1202
    vp->height = is->video_st->codec->height;
1203

    
1204
    SDL_LockMutex(is->pictq_mutex);
1205
    vp->allocated = 1;
1206
    SDL_CondSignal(is->pictq_cond);
1207
    SDL_UnlockMutex(is->pictq_mutex);
1208
}
1209

    
1210
/**
1211
 *
1212
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1213
 */
1214
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1215
{
1216
    VideoPicture *vp;
1217
    int dst_pix_fmt;
1218
    AVPicture pict;
1219
    static struct SwsContext *img_convert_ctx;
1220

    
1221
    /* wait until we have space to put a new picture */
1222
    SDL_LockMutex(is->pictq_mutex);
1223
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1224
           !is->videoq.abort_request) {
1225
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1226
    }
1227
    SDL_UnlockMutex(is->pictq_mutex);
1228

    
1229
    if (is->videoq.abort_request)
1230
        return -1;
1231

    
1232
    vp = &is->pictq[is->pictq_windex];
1233

    
1234
    /* alloc or resize hardware picture buffer */
1235
    if (!vp->bmp ||
1236
        vp->width != is->video_st->codec->width ||
1237
        vp->height != is->video_st->codec->height) {
1238
        SDL_Event event;
1239

    
1240
        vp->allocated = 0;
1241

    
1242
        /* the allocation must be done in the main thread to avoid
1243
           locking problems */
1244
        event.type = FF_ALLOC_EVENT;
1245
        event.user.data1 = is;
1246
        SDL_PushEvent(&event);
1247

    
1248
        /* wait until the picture is allocated */
1249
        SDL_LockMutex(is->pictq_mutex);
1250
        while (!vp->allocated && !is->videoq.abort_request) {
1251
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1252
        }
1253
        SDL_UnlockMutex(is->pictq_mutex);
1254

    
1255
        if (is->videoq.abort_request)
1256
            return -1;
1257
    }
1258

    
1259
    /* if the frame is not skipped, then display it */
1260
    if (vp->bmp) {
1261
        /* get a pointer on the bitmap */
1262
        SDL_LockYUVOverlay (vp->bmp);
1263

    
1264
        dst_pix_fmt = PIX_FMT_YUV420P;
1265
        pict.data[0] = vp->bmp->pixels[0];
1266
        pict.data[1] = vp->bmp->pixels[2];
1267
        pict.data[2] = vp->bmp->pixels[1];
1268

    
1269
        pict.linesize[0] = vp->bmp->pitches[0];
1270
        pict.linesize[1] = vp->bmp->pitches[2];
1271
        pict.linesize[2] = vp->bmp->pitches[1];
1272
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1273
        img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1274
            is->video_st->codec->width, is->video_st->codec->height,
1275
            is->video_st->codec->pix_fmt,
1276
            is->video_st->codec->width, is->video_st->codec->height,
1277
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1278
        if (img_convert_ctx == NULL) {
1279
            fprintf(stderr, "Cannot initialize the conversion context\n");
1280
            exit(1);
1281
        }
1282
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1283
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1284
        /* update the bitmap content */
1285
        SDL_UnlockYUVOverlay(vp->bmp);
1286

    
1287
        vp->pts = pts;
1288

    
1289
        /* now we can update the picture count */
1290
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1291
            is->pictq_windex = 0;
1292
        SDL_LockMutex(is->pictq_mutex);
1293
        is->pictq_size++;
1294
        SDL_UnlockMutex(is->pictq_mutex);
1295
    }
1296
    return 0;
1297
}
1298

    
1299
/**
1300
 * compute the exact PTS for the picture if it is omitted in the stream
1301
 * @param pts1 the dts of the pkt / pts of the frame
1302
 */
1303
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1304
{
1305
    double frame_delay, pts;
1306

    
1307
    pts = pts1;
1308

    
1309
    if (pts != 0) {
1310
        /* update video clock with pts, if present */
1311
        is->video_clock = pts;
1312
    } else {
1313
        pts = is->video_clock;
1314
    }
1315
    /* update video clock for next frame */
1316
    frame_delay = av_q2d(is->video_st->codec->time_base);
1317
    /* for MPEG2, the frame can be repeated, so we update the
1318
       clock accordingly */
1319
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1320
    is->video_clock += frame_delay;
1321

    
1322
#if defined(DEBUG_SYNC) && 0
1323
    {
1324
        int ftype;
1325
        if (src_frame->pict_type == FF_B_TYPE)
1326
            ftype = 'B';
1327
        else if (src_frame->pict_type == FF_I_TYPE)
1328
            ftype = 'I';
1329
        else
1330
            ftype = 'P';
1331
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1332
               ftype, pts, pts1);
1333
    }
1334
#endif
1335
    return queue_picture(is, src_frame, pts);
1336
}
1337

    
1338
static int video_thread(void *arg)
1339
{
1340
    VideoState *is = arg;
1341
    AVPacket pkt1, *pkt = &pkt1;
1342
    int len1, got_picture;
1343
    AVFrame *frame= avcodec_alloc_frame();
1344
    double pts;
1345

    
1346
    for(;;) {
1347
        while (is->paused && !is->videoq.abort_request) {
1348
            SDL_Delay(10);
1349
        }
1350
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1351
            break;
1352

    
1353
        if(pkt->data == flush_pkt.data){
1354
            avcodec_flush_buffers(is->video_st->codec);
1355
            continue;
1356
        }
1357

    
1358
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1359
           this packet, if any */
1360
        is->video_st->codec->reordered_opaque= pkt->pts;
1361
        len1 = avcodec_decode_video(is->video_st->codec,
1362
                                    frame, &got_picture,
1363
                                    pkt->data, pkt->size);
1364

    
1365
        if(   (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1366
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1367
            pts= frame->reordered_opaque;
1368
        else if(pkt->dts != AV_NOPTS_VALUE)
1369
            pts= pkt->dts;
1370
        else
1371
            pts= 0;
1372
        pts *= av_q2d(is->video_st->time_base);
1373

    
1374
//            if (len1 < 0)
1375
//                break;
1376
        if (got_picture) {
1377
            if (output_picture2(is, frame, pts) < 0)
1378
                goto the_end;
1379
        }
1380
        av_free_packet(pkt);
1381
        if (step)
1382
            if (cur_stream)
1383
                stream_pause(cur_stream);
1384
    }
1385
 the_end:
1386
    av_free(frame);
1387
    return 0;
1388
}
1389

    
1390
static int subtitle_thread(void *arg)
1391
{
1392
    VideoState *is = arg;
1393
    SubPicture *sp;
1394
    AVPacket pkt1, *pkt = &pkt1;
1395
    int len1, got_subtitle;
1396
    double pts;
1397
    int i, j;
1398
    int r, g, b, y, u, v, a;
1399

    
1400
    for(;;) {
1401
        while (is->paused && !is->subtitleq.abort_request) {
1402
            SDL_Delay(10);
1403
        }
1404
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1405
            break;
1406

    
1407
        if(pkt->data == flush_pkt.data){
1408
            avcodec_flush_buffers(is->subtitle_st->codec);
1409
            continue;
1410
        }
1411
        SDL_LockMutex(is->subpq_mutex);
1412
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1413
               !is->subtitleq.abort_request) {
1414
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1415
        }
1416
        SDL_UnlockMutex(is->subpq_mutex);
1417

    
1418
        if (is->subtitleq.abort_request)
1419
            goto the_end;
1420

    
1421
        sp = &is->subpq[is->subpq_windex];
1422

    
1423
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1424
           this packet, if any */
1425
        pts = 0;
1426
        if (pkt->pts != AV_NOPTS_VALUE)
1427
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1428

    
1429
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1430
                                    &sp->sub, &got_subtitle,
1431
                                    pkt->data, pkt->size);
1432
//            if (len1 < 0)
1433
//                break;
1434
        if (got_subtitle && sp->sub.format == 0) {
1435
            sp->pts = pts;
1436

    
1437
            for (i = 0; i < sp->sub.num_rects; i++)
1438
            {
1439
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1440
                {
1441
                    RGBA_IN(r, g, b, a, sp->sub.rects[i]->rgba_palette + j);
1442
                    y = RGB_TO_Y_CCIR(r, g, b);
1443
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1444
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1445
                    YUVA_OUT(sp->sub.rects[i]->rgba_palette + j, y, u, v, a);
1446
                }
1447
            }
1448

    
1449
            /* now we can update the picture count */
1450
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1451
                is->subpq_windex = 0;
1452
            SDL_LockMutex(is->subpq_mutex);
1453
            is->subpq_size++;
1454
            SDL_UnlockMutex(is->subpq_mutex);
1455
        }
1456
        av_free_packet(pkt);
1457
//        if (step)
1458
//            if (cur_stream)
1459
//                stream_pause(cur_stream);
1460
    }
1461
 the_end:
1462
    return 0;
1463
}
1464

    
1465
/* copy samples for viewing in editor window */
1466
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1467
{
1468
    int size, len, channels;
1469

    
1470
    channels = is->audio_st->codec->channels;
1471

    
1472
    size = samples_size / sizeof(short);
1473
    while (size > 0) {
1474
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1475
        if (len > size)
1476
            len = size;
1477
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1478
        samples += len;
1479
        is->sample_array_index += len;
1480
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1481
            is->sample_array_index = 0;
1482
        size -= len;
1483
    }
1484
}
1485

    
1486
/* return the new audio buffer size (samples can be added or deleted
1487
   to get better sync if video or external master clock) */
1488
static int synchronize_audio(VideoState *is, short *samples,
1489
                             int samples_size1, double pts)
1490
{
1491
    int n, samples_size;
1492
    double ref_clock;
1493

    
1494
    n = 2 * is->audio_st->codec->channels;
1495
    samples_size = samples_size1;
1496

    
1497
    /* if not master, then we try to remove or add samples to correct the clock */
1498
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1499
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1500
        double diff, avg_diff;
1501
        int wanted_size, min_size, max_size, nb_samples;
1502

    
1503
        ref_clock = get_master_clock(is);
1504
        diff = get_audio_clock(is) - ref_clock;
1505

    
1506
        if (diff < AV_NOSYNC_THRESHOLD) {
1507
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1508
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1509
                /* not enough measures to have a correct estimate */
1510
                is->audio_diff_avg_count++;
1511
            } else {
1512
                /* estimate the A-V difference */
1513
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1514

    
1515
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1516
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1517
                    nb_samples = samples_size / n;
1518

    
1519
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1520
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1521
                    if (wanted_size < min_size)
1522
                        wanted_size = min_size;
1523
                    else if (wanted_size > max_size)
1524
                        wanted_size = max_size;
1525

    
1526
                    /* add or remove samples to correction the synchro */
1527
                    if (wanted_size < samples_size) {
1528
                        /* remove samples */
1529
                        samples_size = wanted_size;
1530
                    } else if (wanted_size > samples_size) {
1531
                        uint8_t *samples_end, *q;
1532
                        int nb;
1533

    
1534
                        /* add samples */
1535
                        nb = (samples_size - wanted_size);
1536
                        samples_end = (uint8_t *)samples + samples_size - n;
1537
                        q = samples_end + n;
1538
                        while (nb > 0) {
1539
                            memcpy(q, samples_end, n);
1540
                            q += n;
1541
                            nb -= n;
1542
                        }
1543
                        samples_size = wanted_size;
1544
                    }
1545
                }
1546
#if 0
1547
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1548
                       diff, avg_diff, samples_size - samples_size1,
1549
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1550
#endif
1551
            }
1552
        } else {
1553
            /* too big difference : may be initial PTS errors, so
1554
               reset A-V filter */
1555
            is->audio_diff_avg_count = 0;
1556
            is->audio_diff_cum = 0;
1557
        }
1558
    }
1559

    
1560
    return samples_size;
1561
}
1562

    
1563
/* decode one audio frame and returns its uncompressed size */
1564
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1565
{
1566
    AVPacket *pkt = &is->audio_pkt;
1567
    AVCodecContext *dec= is->audio_st->codec;
1568
    int n, len1, data_size;
1569
    double pts;
1570

    
1571
    for(;;) {
1572
        /* NOTE: the audio packet can contain several frames */
1573
        while (is->audio_pkt_size > 0) {
1574
            data_size = sizeof(is->audio_buf1);
1575
            len1 = avcodec_decode_audio2(dec,
1576
                                        (int16_t *)is->audio_buf1, &data_size,
1577
                                        is->audio_pkt_data, is->audio_pkt_size);
1578
            if (len1 < 0) {
1579
                /* if error, we skip the frame */
1580
                is->audio_pkt_size = 0;
1581
                break;
1582
            }
1583

    
1584
            is->audio_pkt_data += len1;
1585
            is->audio_pkt_size -= len1;
1586
            if (data_size <= 0)
1587
                continue;
1588

    
1589
            if (dec->sample_fmt != is->audio_src_fmt) {
1590
                if (is->reformat_ctx)
1591
                    av_audio_convert_free(is->reformat_ctx);
1592
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1593
                                                         dec->sample_fmt, 1, NULL, 0);
1594
                if (!is->reformat_ctx) {
1595
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1596
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1597
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1598
                        break;
1599
                }
1600
                is->audio_src_fmt= dec->sample_fmt;
1601
            }
1602

    
1603
            if (is->reformat_ctx) {
1604
                const void *ibuf[6]= {is->audio_buf1};
1605
                void *obuf[6]= {is->audio_buf2};
1606
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1607
                int ostride[6]= {2};
1608
                int len= data_size/istride[0];
1609
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1610
                    printf("av_audio_convert() failed\n");
1611
                    break;
1612
                }
1613
                is->audio_buf= is->audio_buf2;
1614
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1615
                          remove this legacy cruft */
1616
                data_size= len*2;
1617
            }else{
1618
                is->audio_buf= is->audio_buf1;
1619
            }
1620

    
1621
            /* if no pts, then compute it */
1622
            pts = is->audio_clock;
1623
            *pts_ptr = pts;
1624
            n = 2 * dec->channels;
1625
            is->audio_clock += (double)data_size /
1626
                (double)(n * dec->sample_rate);
1627
#if defined(DEBUG_SYNC)
1628
            {
1629
                static double last_clock;
1630
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1631
                       is->audio_clock - last_clock,
1632
                       is->audio_clock, pts);
1633
                last_clock = is->audio_clock;
1634
            }
1635
#endif
1636
            return data_size;
1637
        }
1638

    
1639
        /* free the current packet */
1640
        if (pkt->data)
1641
            av_free_packet(pkt);
1642

    
1643
        if (is->paused || is->audioq.abort_request) {
1644
            return -1;
1645
        }
1646

    
1647
        /* read next packet */
1648
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1649
            return -1;
1650
        if(pkt->data == flush_pkt.data){
1651
            avcodec_flush_buffers(dec);
1652
            continue;
1653
        }
1654

    
1655
        is->audio_pkt_data = pkt->data;
1656
        is->audio_pkt_size = pkt->size;
1657

    
1658
        /* if update the audio clock with the pts */
1659
        if (pkt->pts != AV_NOPTS_VALUE) {
1660
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1661
        }
1662
    }
1663
}
1664

    
1665
/* get the current audio output buffer size, in samples. With SDL, we
1666
   cannot have a precise information */
1667
static int audio_write_get_buf_size(VideoState *is)
1668
{
1669
    return is->audio_buf_size - is->audio_buf_index;
1670
}
1671

    
1672

    
1673
/* prepare a new audio buffer */
1674
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1675
{
1676
    VideoState *is = opaque;
1677
    int audio_size, len1;
1678
    double pts;
1679

    
1680
    audio_callback_time = av_gettime();
1681

    
1682
    while (len > 0) {
1683
        if (is->audio_buf_index >= is->audio_buf_size) {
1684
           audio_size = audio_decode_frame(is, &pts);
1685
           if (audio_size < 0) {
1686
                /* if error, just output silence */
1687
               is->audio_buf_size = 1024;
1688
               memset(is->audio_buf, 0, is->audio_buf_size);
1689
           } else {
1690
               if (is->show_audio)
1691
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1692
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1693
                                              pts);
1694
               is->audio_buf_size = audio_size;
1695
           }
1696
           is->audio_buf_index = 0;
1697
        }
1698
        len1 = is->audio_buf_size - is->audio_buf_index;
1699
        if (len1 > len)
1700
            len1 = len;
1701
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1702
        len -= len1;
1703
        stream += len1;
1704
        is->audio_buf_index += len1;
1705
    }
1706
}
1707

    
1708
/* open a given stream. Return 0 if OK */
1709
static int stream_component_open(VideoState *is, int stream_index)
1710
{
1711
    AVFormatContext *ic = is->ic;
1712
    AVCodecContext *enc;
1713
    AVCodec *codec;
1714
    SDL_AudioSpec wanted_spec, spec;
1715

    
1716
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1717
        return -1;
1718
    enc = ic->streams[stream_index]->codec;
1719

    
1720
    /* prepare audio output */
1721
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1722
        if (enc->channels > 0) {
1723
            enc->request_channels = FFMIN(2, enc->channels);
1724
        } else {
1725
            enc->request_channels = 2;
1726
        }
1727
    }
1728

    
1729
    codec = avcodec_find_decoder(enc->codec_id);
1730
    enc->debug_mv = debug_mv;
1731
    enc->debug = debug;
1732
    enc->workaround_bugs = workaround_bugs;
1733
    enc->lowres = lowres;
1734
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1735
    enc->idct_algo= idct;
1736
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1737
    enc->skip_frame= skip_frame;
1738
    enc->skip_idct= skip_idct;
1739
    enc->skip_loop_filter= skip_loop_filter;
1740
    enc->error_recognition= error_recognition;
1741
    enc->error_concealment= error_concealment;
1742

    
1743
    set_context_opts(enc, avctx_opts[enc->codec_type], 0);
1744

    
1745
    if (!codec ||
1746
        avcodec_open(enc, codec) < 0)
1747
        return -1;
1748

    
1749
    /* prepare audio output */
1750
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1751
        wanted_spec.freq = enc->sample_rate;
1752
        wanted_spec.format = AUDIO_S16SYS;
1753
        wanted_spec.channels = enc->channels;
1754
        wanted_spec.silence = 0;
1755
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1756
        wanted_spec.callback = sdl_audio_callback;
1757
        wanted_spec.userdata = is;
1758
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1759
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1760
            return -1;
1761
        }
1762
        is->audio_hw_buf_size = spec.size;
1763
        is->audio_src_fmt= SAMPLE_FMT_S16;
1764
    }
1765

    
1766
    if(thread_count>1)
1767
        avcodec_thread_init(enc, thread_count);
1768
    enc->thread_count= thread_count;
1769
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1770
    switch(enc->codec_type) {
1771
    case CODEC_TYPE_AUDIO:
1772
        is->audio_stream = stream_index;
1773
        is->audio_st = ic->streams[stream_index];
1774
        is->audio_buf_size = 0;
1775
        is->audio_buf_index = 0;
1776

    
1777
        /* init averaging filter */
1778
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1779
        is->audio_diff_avg_count = 0;
1780
        /* since we do not have a precise anough audio fifo fullness,
1781
           we correct audio sync only if larger than this threshold */
1782
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1783

    
1784
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1785
        packet_queue_init(&is->audioq);
1786
        SDL_PauseAudio(0);
1787
        break;
1788
    case CODEC_TYPE_VIDEO:
1789
        is->video_stream = stream_index;
1790
        is->video_st = ic->streams[stream_index];
1791

    
1792
        is->frame_last_delay = 40e-3;
1793
        is->frame_timer = (double)av_gettime() / 1000000.0;
1794
        is->video_current_pts_time = av_gettime();
1795

    
1796
        packet_queue_init(&is->videoq);
1797
        is->video_tid = SDL_CreateThread(video_thread, is);
1798
        break;
1799
    case CODEC_TYPE_SUBTITLE:
1800
        is->subtitle_stream = stream_index;
1801
        is->subtitle_st = ic->streams[stream_index];
1802
        packet_queue_init(&is->subtitleq);
1803

    
1804
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1805
        break;
1806
    default:
1807
        break;
1808
    }
1809
    return 0;
1810
}
1811

    
1812
static void stream_component_close(VideoState *is, int stream_index)
1813
{
1814
    AVFormatContext *ic = is->ic;
1815
    AVCodecContext *enc;
1816

    
1817
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1818
        return;
1819
    enc = ic->streams[stream_index]->codec;
1820

    
1821
    switch(enc->codec_type) {
1822
    case CODEC_TYPE_AUDIO:
1823
        packet_queue_abort(&is->audioq);
1824

    
1825
        SDL_CloseAudio();
1826

    
1827
        packet_queue_end(&is->audioq);
1828
        if (is->reformat_ctx)
1829
            av_audio_convert_free(is->reformat_ctx);
1830
        break;
1831
    case CODEC_TYPE_VIDEO:
1832
        packet_queue_abort(&is->videoq);
1833

    
1834
        /* note: we also signal this mutex to make sure we deblock the
1835
           video thread in all cases */
1836
        SDL_LockMutex(is->pictq_mutex);
1837
        SDL_CondSignal(is->pictq_cond);
1838
        SDL_UnlockMutex(is->pictq_mutex);
1839

    
1840
        SDL_WaitThread(is->video_tid, NULL);
1841

    
1842
        packet_queue_end(&is->videoq);
1843
        break;
1844
    case CODEC_TYPE_SUBTITLE:
1845
        packet_queue_abort(&is->subtitleq);
1846

    
1847
        /* note: we also signal this mutex to make sure we deblock the
1848
           video thread in all cases */
1849
        SDL_LockMutex(is->subpq_mutex);
1850
        is->subtitle_stream_changed = 1;
1851

    
1852
        SDL_CondSignal(is->subpq_cond);
1853
        SDL_UnlockMutex(is->subpq_mutex);
1854

    
1855
        SDL_WaitThread(is->subtitle_tid, NULL);
1856

    
1857
        packet_queue_end(&is->subtitleq);
1858
        break;
1859
    default:
1860
        break;
1861
    }
1862

    
1863
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1864
    avcodec_close(enc);
1865
    switch(enc->codec_type) {
1866
    case CODEC_TYPE_AUDIO:
1867
        is->audio_st = NULL;
1868
        is->audio_stream = -1;
1869
        break;
1870
    case CODEC_TYPE_VIDEO:
1871
        is->video_st = NULL;
1872
        is->video_stream = -1;
1873
        break;
1874
    case CODEC_TYPE_SUBTITLE:
1875
        is->subtitle_st = NULL;
1876
        is->subtitle_stream = -1;
1877
        break;
1878
    default:
1879
        break;
1880
    }
1881
}
1882

    
1883
static void dump_stream_info(const AVFormatContext *s)
1884
{
1885
    if (s->track != 0)
1886
        fprintf(stderr, "Track: %d\n", s->track);
1887
    if (s->title[0] != '\0')
1888
        fprintf(stderr, "Title: %s\n", s->title);
1889
    if (s->author[0] != '\0')
1890
        fprintf(stderr, "Author: %s\n", s->author);
1891
    if (s->copyright[0] != '\0')
1892
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1893
    if (s->comment[0] != '\0')
1894
        fprintf(stderr, "Comment: %s\n", s->comment);
1895
    if (s->album[0] != '\0')
1896
        fprintf(stderr, "Album: %s\n", s->album);
1897
    if (s->year != 0)
1898
        fprintf(stderr, "Year: %d\n", s->year);
1899
    if (s->genre[0] != '\0')
1900
        fprintf(stderr, "Genre: %s\n", s->genre);
1901
}
1902

    
1903
/* since we have only one decoding thread, we can use a global
1904
   variable instead of a thread local variable */
1905
static VideoState *global_video_state;
1906

    
1907
static int decode_interrupt_cb(void)
1908
{
1909
    return (global_video_state && global_video_state->abort_request);
1910
}
1911

    
1912
/* this thread gets the stream from the disk or the network */
1913
static int decode_thread(void *arg)
1914
{
1915
    VideoState *is = arg;
1916
    AVFormatContext *ic;
1917
    int err, i, ret, video_index, audio_index;
1918
    AVPacket pkt1, *pkt = &pkt1;
1919
    AVFormatParameters params, *ap = &params;
1920

    
1921
    video_index = -1;
1922
    audio_index = -1;
1923
    is->video_stream = -1;
1924
    is->audio_stream = -1;
1925
    is->subtitle_stream = -1;
1926

    
1927
    global_video_state = is;
1928
    url_set_interrupt_cb(decode_interrupt_cb);
1929

    
1930
    memset(ap, 0, sizeof(*ap));
1931

    
1932
    ap->width = frame_width;
1933
    ap->height= frame_height;
1934
    ap->time_base= (AVRational){1, 25};
1935
    ap->pix_fmt = frame_pix_fmt;
1936

    
1937
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1938
    if (err < 0) {
1939
        print_error(is->filename, err);
1940
        ret = -1;
1941
        goto fail;
1942
    }
1943
    is->ic = ic;
1944

    
1945
    if(genpts)
1946
        ic->flags |= AVFMT_FLAG_GENPTS;
1947

    
1948
    err = av_find_stream_info(ic);
1949
    if (err < 0) {
1950
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1951
        ret = -1;
1952
        goto fail;
1953
    }
1954
    if(ic->pb)
1955
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1956

    
1957
    /* if seeking requested, we execute it */
1958
    if (start_time != AV_NOPTS_VALUE) {
1959
        int64_t timestamp;
1960

    
1961
        timestamp = start_time;
1962
        /* add the stream start time */
1963
        if (ic->start_time != AV_NOPTS_VALUE)
1964
            timestamp += ic->start_time;
1965
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1966
        if (ret < 0) {
1967
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1968
                    is->filename, (double)timestamp / AV_TIME_BASE);
1969
        }
1970
    }
1971

    
1972
    for(i = 0; i < ic->nb_streams; i++) {
1973
        AVCodecContext *enc = ic->streams[i]->codec;
1974
        ic->streams[i]->discard = AVDISCARD_ALL;
1975
        switch(enc->codec_type) {
1976
        case CODEC_TYPE_AUDIO:
1977
            if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1978
                audio_index = i;
1979
            break;
1980
        case CODEC_TYPE_VIDEO:
1981
            if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1982
                video_index = i;
1983
            break;
1984
        default:
1985
            break;
1986
        }
1987
    }
1988
    if (show_status) {
1989
        dump_format(ic, 0, is->filename, 0);
1990
        dump_stream_info(ic);
1991
    }
1992

    
1993
    /* open the streams */
1994
    if (audio_index >= 0) {
1995
        stream_component_open(is, audio_index);
1996
    }
1997

    
1998
    if (video_index >= 0) {
1999
        stream_component_open(is, video_index);
2000
    } else {
2001
        if (!display_disable)
2002
            is->show_audio = 1;
2003
    }
2004

    
2005
    if (is->video_stream < 0 && is->audio_stream < 0) {
2006
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2007
        ret = -1;
2008
        goto fail;
2009
    }
2010

    
2011
    for(;;) {
2012
        if (is->abort_request)
2013
            break;
2014
        if (is->paused != is->last_paused) {
2015
            is->last_paused = is->paused;
2016
            if (is->paused)
2017
                av_read_pause(ic);
2018
            else
2019
                av_read_play(ic);
2020
        }
2021
#if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
2022
        if (is->paused &&
2023
                (!strcmp(ic->iformat->name, "rtsp") ||
2024
                 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
2025
            /* wait 10 ms to avoid trying to get another packet */
2026
            /* XXX: horrible */
2027
            SDL_Delay(10);
2028
            continue;
2029
        }
2030
#endif
2031
        if (is->seek_req) {
2032
            int stream_index= -1;
2033
            int64_t seek_target= is->seek_pos;
2034

    
2035
            if     (is->   video_stream >= 0) stream_index= is->   video_stream;
2036
            else if(is->   audio_stream >= 0) stream_index= is->   audio_stream;
2037
            else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2038

    
2039
            if(stream_index>=0){
2040
                seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2041
            }
2042

    
2043
            ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2044
            if (ret < 0) {
2045
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2046
            }else{
2047
                if (is->audio_stream >= 0) {
2048
                    packet_queue_flush(&is->audioq);
2049
                    packet_queue_put(&is->audioq, &flush_pkt);
2050
                }
2051
                if (is->subtitle_stream >= 0) {
2052
                    packet_queue_flush(&is->subtitleq);
2053
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2054
                }
2055
                if (is->video_stream >= 0) {
2056
                    packet_queue_flush(&is->videoq);
2057
                    packet_queue_put(&is->videoq, &flush_pkt);
2058
                }
2059
            }
2060
            is->seek_req = 0;
2061
        }
2062

    
2063
        /* if the queue are full, no need to read more */
2064
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2065
            is->videoq.size > MAX_VIDEOQ_SIZE ||
2066
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2067
            url_feof(ic->pb)) {
2068
            /* wait 10 ms */
2069
            SDL_Delay(10);
2070
            continue;
2071
        }
2072
        ret = av_read_frame(ic, pkt);
2073
        if (ret < 0) {
2074
            if (url_ferror(ic->pb) == 0) {
2075
                SDL_Delay(100); /* wait for user event */
2076
                continue;
2077
            } else
2078
                break;
2079
        }
2080
        if (pkt->stream_index == is->audio_stream) {
2081
            packet_queue_put(&is->audioq, pkt);
2082
        } else if (pkt->stream_index == is->video_stream) {
2083
            packet_queue_put(&is->videoq, pkt);
2084
        } else if (pkt->stream_index == is->subtitle_stream) {
2085
            packet_queue_put(&is->subtitleq, pkt);
2086
        } else {
2087
            av_free_packet(pkt);
2088
        }
2089
    }
2090
    /* wait until the end */
2091
    while (!is->abort_request) {
2092
        SDL_Delay(100);
2093
    }
2094

    
2095
    ret = 0;
2096
 fail:
2097
    /* disable interrupting */
2098
    global_video_state = NULL;
2099

    
2100
    /* close each stream */
2101
    if (is->audio_stream >= 0)
2102
        stream_component_close(is, is->audio_stream);
2103
    if (is->video_stream >= 0)
2104
        stream_component_close(is, is->video_stream);
2105
    if (is->subtitle_stream >= 0)
2106
        stream_component_close(is, is->subtitle_stream);
2107
    if (is->ic) {
2108
        av_close_input_file(is->ic);
2109
        is->ic = NULL; /* safety */
2110
    }
2111
    url_set_interrupt_cb(NULL);
2112

    
2113
    if (ret != 0) {
2114
        SDL_Event event;
2115

    
2116
        event.type = FF_QUIT_EVENT;
2117
        event.user.data1 = is;
2118
        SDL_PushEvent(&event);
2119
    }
2120
    return 0;
2121
}
2122

    
2123
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2124
{
2125
    VideoState *is;
2126

    
2127
    is = av_mallocz(sizeof(VideoState));
2128
    if (!is)
2129
        return NULL;
2130
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2131
    is->iformat = iformat;
2132
    is->ytop = 0;
2133
    is->xleft = 0;
2134

    
2135
    /* start video display */
2136
    is->pictq_mutex = SDL_CreateMutex();
2137
    is->pictq_cond = SDL_CreateCond();
2138

    
2139
    is->subpq_mutex = SDL_CreateMutex();
2140
    is->subpq_cond = SDL_CreateCond();
2141

    
2142
    /* add the refresh timer to draw the picture */
2143
    schedule_refresh(is, 40);
2144

    
2145
    is->av_sync_type = av_sync_type;
2146
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2147
    if (!is->parse_tid) {
2148
        av_free(is);
2149
        return NULL;
2150
    }
2151
    return is;
2152
}
2153

    
2154
static void stream_close(VideoState *is)
2155
{
2156
    VideoPicture *vp;
2157
    int i;
2158
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2159
    is->abort_request = 1;
2160
    SDL_WaitThread(is->parse_tid, NULL);
2161

    
2162
    /* free all pictures */
2163
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2164
        vp = &is->pictq[i];
2165
        if (vp->bmp) {
2166
            SDL_FreeYUVOverlay(vp->bmp);
2167
            vp->bmp = NULL;
2168
        }
2169
    }
2170
    SDL_DestroyMutex(is->pictq_mutex);
2171
    SDL_DestroyCond(is->pictq_cond);
2172
    SDL_DestroyMutex(is->subpq_mutex);
2173
    SDL_DestroyCond(is->subpq_cond);
2174
}
2175

    
2176
static void stream_cycle_channel(VideoState *is, int codec_type)
2177
{
2178
    AVFormatContext *ic = is->ic;
2179
    int start_index, stream_index;
2180
    AVStream *st;
2181

    
2182
    if (codec_type == CODEC_TYPE_VIDEO)
2183
        start_index = is->video_stream;
2184
    else if (codec_type == CODEC_TYPE_AUDIO)
2185
        start_index = is->audio_stream;
2186
    else
2187
        start_index = is->subtitle_stream;
2188
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2189
        return;
2190
    stream_index = start_index;
2191
    for(;;) {
2192
        if (++stream_index >= is->ic->nb_streams)
2193
        {
2194
            if (codec_type == CODEC_TYPE_SUBTITLE)
2195
            {
2196
                stream_index = -1;
2197
                goto the_end;
2198
            } else
2199
                stream_index = 0;
2200
        }
2201
        if (stream_index == start_index)
2202
            return;
2203
        st = ic->streams[stream_index];
2204
        if (st->codec->codec_type == codec_type) {
2205
            /* check that parameters are OK */
2206
            switch(codec_type) {
2207
            case CODEC_TYPE_AUDIO:
2208
                if (st->codec->sample_rate != 0 &&
2209
                    st->codec->channels != 0)
2210
                    goto the_end;
2211
                break;
2212
            case CODEC_TYPE_VIDEO:
2213
            case CODEC_TYPE_SUBTITLE:
2214
                goto the_end;
2215
            default:
2216
                break;
2217
            }
2218
        }
2219
    }
2220
 the_end:
2221
    stream_component_close(is, start_index);
2222
    stream_component_open(is, stream_index);
2223
}
2224

    
2225

    
2226
static void toggle_full_screen(void)
2227
{
2228
    is_full_screen = !is_full_screen;
2229
    if (!fs_screen_width) {
2230
        /* use default SDL method */
2231
//        SDL_WM_ToggleFullScreen(screen);
2232
    }
2233
    video_open(cur_stream);
2234
}
2235

    
2236
static void toggle_pause(void)
2237
{
2238
    if (cur_stream)
2239
        stream_pause(cur_stream);
2240
    step = 0;
2241
}
2242

    
2243
static void step_to_next_frame(void)
2244
{
2245
    if (cur_stream) {
2246
        /* if the stream is paused unpause it, then step */
2247
        if (cur_stream->paused)
2248
            stream_pause(cur_stream);
2249
    }
2250
    step = 1;
2251
}
2252

    
2253
static void do_exit(void)
2254
{
2255
    if (cur_stream) {
2256
        stream_close(cur_stream);
2257
        cur_stream = NULL;
2258
    }
2259
    if (show_status)
2260
        printf("\n");
2261
    SDL_Quit();
2262
    exit(0);
2263
}
2264

    
2265
static void toggle_audio_display(void)
2266
{
2267
    if (cur_stream) {
2268
        cur_stream->show_audio = !cur_stream->show_audio;
2269
    }
2270
}
2271

    
2272
/* handle an event sent by the GUI */
2273
static void event_loop(void)
2274
{
2275
    SDL_Event event;
2276
    double incr, pos, frac;
2277

    
2278
    for(;;) {
2279
        SDL_WaitEvent(&event);
2280
        switch(event.type) {
2281
        case SDL_KEYDOWN:
2282
            switch(event.key.keysym.sym) {
2283
            case SDLK_ESCAPE:
2284
            case SDLK_q:
2285
                do_exit();
2286
                break;
2287
            case SDLK_f:
2288
                toggle_full_screen();
2289
                break;
2290
            case SDLK_p:
2291
            case SDLK_SPACE:
2292
                toggle_pause();
2293
                break;
2294
            case SDLK_s: //S: Step to next frame
2295
                step_to_next_frame();
2296
                break;
2297
            case SDLK_a:
2298
                if (cur_stream)
2299
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2300
                break;
2301
            case SDLK_v:
2302
                if (cur_stream)
2303
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2304
                break;
2305
            case SDLK_t:
2306
                if (cur_stream)
2307
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2308
                break;
2309
            case SDLK_w:
2310
                toggle_audio_display();
2311
                break;
2312
            case SDLK_LEFT:
2313
                incr = -10.0;
2314
                goto do_seek;
2315
            case SDLK_RIGHT:
2316
                incr = 10.0;
2317
                goto do_seek;
2318
            case SDLK_UP:
2319
                incr = 60.0;
2320
                goto do_seek;
2321
            case SDLK_DOWN:
2322
                incr = -60.0;
2323
            do_seek:
2324
                if (cur_stream) {
2325
                    if (seek_by_bytes) {
2326
                        pos = url_ftell(cur_stream->ic->pb);
2327
                        if (cur_stream->ic->bit_rate)
2328
                            incr *= cur_stream->ic->bit_rate / 60.0;
2329
                        else
2330
                            incr *= 180000.0;
2331
                        pos += incr;
2332
                        stream_seek(cur_stream, pos, incr);
2333
                    } else {
2334
                        pos = get_master_clock(cur_stream);
2335
                        pos += incr;
2336
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2337
                    }
2338
                }
2339
                break;
2340
            default:
2341
                break;
2342
            }
2343
            break;
2344
        case SDL_MOUSEBUTTONDOWN:
2345
            if (cur_stream) {
2346
                int ns, hh, mm, ss;
2347
                int tns, thh, tmm, tss;
2348
                tns = cur_stream->ic->duration/1000000LL;
2349
                thh = tns/3600;
2350
                tmm = (tns%3600)/60;
2351
                tss = (tns%60);
2352
                frac = (double)event.button.x/(double)cur_stream->width;
2353
                ns = frac*tns;
2354
                hh = ns/3600;
2355
                mm = (ns%3600)/60;
2356
                ss = (ns%60);
2357
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2358
                        hh, mm, ss, thh, tmm, tss);
2359
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2360
            }
2361
            break;
2362
        case SDL_VIDEORESIZE:
2363
            if (cur_stream) {
2364
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2365
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2366
                screen_width = cur_stream->width = event.resize.w;
2367
                screen_height= cur_stream->height= event.resize.h;
2368
            }
2369
            break;
2370
        case SDL_QUIT:
2371
        case FF_QUIT_EVENT:
2372
            do_exit();
2373
            break;
2374
        case FF_ALLOC_EVENT:
2375
            video_open(event.user.data1);
2376
            alloc_picture(event.user.data1);
2377
            break;
2378
        case FF_REFRESH_EVENT:
2379
            video_refresh_timer(event.user.data1);
2380
            break;
2381
        default:
2382
            break;
2383
        }
2384
    }
2385
}
2386

    
2387
static void opt_frame_size(const char *arg)
2388
{
2389
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2390
        fprintf(stderr, "Incorrect frame size\n");
2391
        exit(1);
2392
    }
2393
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2394
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2395
        exit(1);
2396
    }
2397
}
2398

    
2399
static int opt_width(const char *opt, const char *arg)
2400
{
2401
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2402
    return 0;
2403
}
2404

    
2405
static int opt_height(const char *opt, const char *arg)
2406
{
2407
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2408
    return 0;
2409
}
2410

    
2411
static void opt_format(const char *arg)
2412
{
2413
    file_iformat = av_find_input_format(arg);
2414
    if (!file_iformat) {
2415
        fprintf(stderr, "Unknown input format: %s\n", arg);
2416
        exit(1);
2417
    }
2418
}
2419

    
2420
static void opt_frame_pix_fmt(const char *arg)
2421
{
2422
    frame_pix_fmt = avcodec_get_pix_fmt(arg);
2423
}
2424

    
2425
static int opt_sync(const char *opt, const char *arg)
2426
{
2427
    if (!strcmp(arg, "audio"))
2428
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2429
    else if (!strcmp(arg, "video"))
2430
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2431
    else if (!strcmp(arg, "ext"))
2432
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2433
    else {
2434
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2435
        exit(1);
2436
    }
2437
    return 0;
2438
}
2439

    
2440
static int opt_seek(const char *opt, const char *arg)
2441
{
2442
    start_time = parse_time_or_die(opt, arg, 1);
2443
    return 0;
2444
}
2445

    
2446
static int opt_debug(const char *opt, const char *arg)
2447
{
2448
    av_log_set_level(99);
2449
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2450
    return 0;
2451
}
2452

    
2453
static int opt_vismv(const char *opt, const char *arg)
2454
{
2455
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2456
    return 0;
2457
}
2458

    
2459
static int opt_thread_count(const char *opt, const char *arg)
2460
{
2461
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2462
#if !defined(HAVE_THREADS)
2463
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2464
#endif
2465
    return 0;
2466
}
2467

    
2468
static const OptionDef options[] = {
2469
    { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2470
    { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2471
    { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2472
    { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2473
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2474
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2475
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2476
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2477
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2478
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2479
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2480
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2481
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2482
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2483
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2484
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2485
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2486
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2487
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2488
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2489
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2490
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2491
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2492
    { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2493
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2494
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2495
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2496
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2497
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2498
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2499
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2500
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2501
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2502
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2503
    { NULL, },
2504
};
2505

    
2506
static void show_help(void)
2507
{
2508
    printf("usage: ffplay [options] input_file\n"
2509
           "Simple media player\n");
2510
    printf("\n");
2511
    show_help_options(options, "Main options:\n",
2512
                      OPT_EXPERT, 0);
2513
    show_help_options(options, "\nAdvanced options:\n",
2514
                      OPT_EXPERT, OPT_EXPERT);
2515
    printf("\nWhile playing:\n"
2516
           "q, ESC              quit\n"
2517
           "f                   toggle full screen\n"
2518
           "p, SPC              pause\n"
2519
           "a                   cycle audio channel\n"
2520
           "v                   cycle video channel\n"
2521
           "t                   cycle subtitle channel\n"
2522
           "w                   show audio waves\n"
2523
           "left/right          seek backward/forward 10 seconds\n"
2524
           "down/up             seek backward/forward 1 minute\n"
2525
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2526
           );
2527
}
2528

    
2529
static void opt_input_file(const char *filename)
2530
{
2531
    if (!strcmp(filename, "-"))
2532
        filename = "pipe:";
2533
    input_filename = filename;
2534
}
2535

    
2536
/* Called from the main */
2537
int main(int argc, char **argv)
2538
{
2539
    int flags, i;
2540

    
2541
    /* register all codecs, demux and protocols */
2542
    avcodec_register_all();
2543
    avdevice_register_all();
2544
    av_register_all();
2545

    
2546
    for(i=0; i<CODEC_TYPE_NB; i++){
2547
        avctx_opts[i]= avcodec_alloc_context2(i);
2548
    }
2549
    avformat_opts = av_alloc_format_context();
2550
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2551

    
2552
    show_banner();
2553

    
2554
    parse_options(argc, argv, options, opt_input_file);
2555

    
2556
    if (!input_filename) {
2557
        fprintf(stderr, "An input file must be specified\n");
2558
        exit(1);
2559
    }
2560

    
2561
    if (display_disable) {
2562
        video_disable = 1;
2563
    }
2564
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2565
#if !defined(__MINGW32__) && !defined(__APPLE__)
2566
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2567
#endif
2568
    if (SDL_Init (flags)) {
2569
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2570
        exit(1);
2571
    }
2572

    
2573
    if (!display_disable) {
2574
#ifdef HAVE_SDL_VIDEO_SIZE
2575
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2576
        fs_screen_width = vi->current_w;
2577
        fs_screen_height = vi->current_h;
2578
#endif
2579
    }
2580

    
2581
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2582
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2583
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2584
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2585

    
2586
    av_init_packet(&flush_pkt);
2587
    flush_pkt.data= "FLUSH";
2588

    
2589
    cur_stream = stream_open(input_filename, file_iformat);
2590

    
2591
    event_loop();
2592

    
2593
    /* never returns */
2594

    
2595
    return 0;
2596
}