Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 12eeda34

History | View | Annotate | Download (84.4 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/dsputil.h"
34

    
35
#include "cmdutils.h"
36

    
37
#include <SDL.h>
38
#include <SDL_thread.h>
39

    
40
#ifdef __MINGW32__
41
#undef main /* We don't want SDL to override our main() */
42
#endif
43

    
44
#undef exit
45
#undef printf
46
#undef fprintf
47

    
48
const char program_name[] = "FFplay";
49
const int program_birth_year = 2003;
50

    
51
//#define DEBUG_SYNC
52

    
53
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
54
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
55
#define MIN_FRAMES 5
56

    
57
/* SDL audio buffer size, in samples. Should be small to have precise
58
   A/V sync as SDL does not have hardware buffer fullness info. */
59
#define SDL_AUDIO_BUFFER_SIZE 1024
60

    
61
/* no AV sync correction is done if below the AV sync threshold */
62
#define AV_SYNC_THRESHOLD 0.01
63
/* no AV correction is done if too big error */
64
#define AV_NOSYNC_THRESHOLD 10.0
65

    
66
/* maximum audio speed change to get correct sync */
67
#define SAMPLE_CORRECTION_PERCENT_MAX 10
68

    
69
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
70
#define AUDIO_DIFF_AVG_NB   20
71

    
72
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
73
#define SAMPLE_ARRAY_SIZE (2*65536)
74

    
75
static int sws_flags = SWS_BICUBIC;
76

    
77
typedef struct PacketQueue {
78
    AVPacketList *first_pkt, *last_pkt;
79
    int nb_packets;
80
    int size;
81
    int abort_request;
82
    SDL_mutex *mutex;
83
    SDL_cond *cond;
84
} PacketQueue;
85

    
86
#define VIDEO_PICTURE_QUEUE_SIZE 1
87
#define SUBPICTURE_QUEUE_SIZE 4
88

    
89
typedef struct VideoPicture {
90
    double pts;                                  ///<presentation time stamp for this picture
91
    int64_t pos;                                 ///<byte position in file
92
    SDL_Overlay *bmp;
93
    int width, height; /* source height & width */
94
    int allocated;
95
    SDL_TimerID timer_id;
96
} VideoPicture;
97

    
98
typedef struct SubPicture {
99
    double pts; /* presentation time stamp for this picture */
100
    AVSubtitle sub;
101
} SubPicture;
102

    
103
enum {
104
    AV_SYNC_AUDIO_MASTER, /* default choice */
105
    AV_SYNC_VIDEO_MASTER,
106
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
107
};
108

    
109
typedef struct VideoState {
110
    SDL_Thread *parse_tid;
111
    SDL_Thread *video_tid;
112
    AVInputFormat *iformat;
113
    int no_background;
114
    int abort_request;
115
    int paused;
116
    int last_paused;
117
    int seek_req;
118
    int seek_flags;
119
    int64_t seek_pos;
120
    int64_t seek_rel;
121
    int read_pause_return;
122
    AVFormatContext *ic;
123
    int dtg_active_format;
124

    
125
    int audio_stream;
126

    
127
    int av_sync_type;
128
    double external_clock; /* external clock base */
129
    int64_t external_clock_time;
130

    
131
    double audio_clock;
132
    double audio_diff_cum; /* used for AV difference average computation */
133
    double audio_diff_avg_coef;
134
    double audio_diff_threshold;
135
    int audio_diff_avg_count;
136
    AVStream *audio_st;
137
    PacketQueue audioq;
138
    int audio_hw_buf_size;
139
    /* samples output by the codec. we reserve more space for avsync
140
       compensation */
141
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
142
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
143
    uint8_t *audio_buf;
144
    unsigned int audio_buf_size; /* in bytes */
145
    int audio_buf_index; /* in bytes */
146
    AVPacket audio_pkt_temp;
147
    AVPacket audio_pkt;
148
    enum SampleFormat audio_src_fmt;
149
    AVAudioConvert *reformat_ctx;
150

    
151
    int show_audio; /* if true, display audio samples */
152
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
153
    int sample_array_index;
154
    int last_i_start;
155
    RDFTContext rdft;
156
    int rdft_bits;
157
    int xpos;
158

    
159
    SDL_Thread *subtitle_tid;
160
    int subtitle_stream;
161
    int subtitle_stream_changed;
162
    AVStream *subtitle_st;
163
    PacketQueue subtitleq;
164
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
165
    int subpq_size, subpq_rindex, subpq_windex;
166
    SDL_mutex *subpq_mutex;
167
    SDL_cond *subpq_cond;
168

    
169
    double frame_timer;
170
    double frame_last_pts;
171
    double frame_last_delay;
172
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
173
    int video_stream;
174
    AVStream *video_st;
175
    PacketQueue videoq;
176
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
177
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
178
    int64_t video_current_pos;                   ///<current displayed file pos
179
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
180
    int pictq_size, pictq_rindex, pictq_windex;
181
    SDL_mutex *pictq_mutex;
182
    SDL_cond *pictq_cond;
183
    struct SwsContext *img_convert_ctx;
184

    
185
    //    QETimer *video_timer;
186
    char filename[1024];
187
    int width, height, xleft, ytop;
188

    
189
    int64_t faulty_pts;
190
    int64_t faulty_dts;
191
    int64_t last_dts_for_fault_detection;
192
    int64_t last_pts_for_fault_detection;
193

    
194
} VideoState;
195

    
196
static void show_help(void);
197
static int audio_write_get_buf_size(VideoState *is);
198

    
199
/* options specified by the user */
200
static AVInputFormat *file_iformat;
201
static const char *input_filename;
202
static int fs_screen_width;
203
static int fs_screen_height;
204
static int screen_width = 0;
205
static int screen_height = 0;
206
static int frame_width = 0;
207
static int frame_height = 0;
208
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
209
static int audio_disable;
210
static int video_disable;
211
static int wanted_audio_stream= 0;
212
static int wanted_video_stream= 0;
213
static int wanted_subtitle_stream= -1;
214
static int seek_by_bytes=-1;
215
static int display_disable;
216
static int show_status = 1;
217
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
218
static int64_t start_time = AV_NOPTS_VALUE;
219
static int debug = 0;
220
static int debug_mv = 0;
221
static int step = 0;
222
static int thread_count = 1;
223
static int workaround_bugs = 1;
224
static int fast = 0;
225
static int genpts = 0;
226
static int lowres = 0;
227
static int idct = FF_IDCT_AUTO;
228
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
229
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
230
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
231
static int error_recognition = FF_ER_CAREFUL;
232
static int error_concealment = 3;
233
static int decoder_reorder_pts= -1;
234
static int autoexit;
235

    
236
/* current context */
237
static int is_full_screen;
238
static VideoState *cur_stream;
239
static int64_t audio_callback_time;
240

    
241
static AVPacket flush_pkt;
242

    
243
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
244
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
245
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
246

    
247
static SDL_Surface *screen;
248

    
249
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
250

    
251
/* packet queue handling */
252
static void packet_queue_init(PacketQueue *q)
253
{
254
    memset(q, 0, sizeof(PacketQueue));
255
    q->mutex = SDL_CreateMutex();
256
    q->cond = SDL_CreateCond();
257
    packet_queue_put(q, &flush_pkt);
258
}
259

    
260
static void packet_queue_flush(PacketQueue *q)
261
{
262
    AVPacketList *pkt, *pkt1;
263

    
264
    SDL_LockMutex(q->mutex);
265
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
266
        pkt1 = pkt->next;
267
        av_free_packet(&pkt->pkt);
268
        av_freep(&pkt);
269
    }
270
    q->last_pkt = NULL;
271
    q->first_pkt = NULL;
272
    q->nb_packets = 0;
273
    q->size = 0;
274
    SDL_UnlockMutex(q->mutex);
275
}
276

    
277
static void packet_queue_end(PacketQueue *q)
278
{
279
    packet_queue_flush(q);
280
    SDL_DestroyMutex(q->mutex);
281
    SDL_DestroyCond(q->cond);
282
}
283

    
284
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
285
{
286
    AVPacketList *pkt1;
287

    
288
    /* duplicate the packet */
289
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
290
        return -1;
291

    
292
    pkt1 = av_malloc(sizeof(AVPacketList));
293
    if (!pkt1)
294
        return -1;
295
    pkt1->pkt = *pkt;
296
    pkt1->next = NULL;
297

    
298

    
299
    SDL_LockMutex(q->mutex);
300

    
301
    if (!q->last_pkt)
302

    
303
        q->first_pkt = pkt1;
304
    else
305
        q->last_pkt->next = pkt1;
306
    q->last_pkt = pkt1;
307
    q->nb_packets++;
308
    q->size += pkt1->pkt.size + sizeof(*pkt1);
309
    /* XXX: should duplicate packet data in DV case */
310
    SDL_CondSignal(q->cond);
311

    
312
    SDL_UnlockMutex(q->mutex);
313
    return 0;
314
}
315

    
316
static void packet_queue_abort(PacketQueue *q)
317
{
318
    SDL_LockMutex(q->mutex);
319

    
320
    q->abort_request = 1;
321

    
322
    SDL_CondSignal(q->cond);
323

    
324
    SDL_UnlockMutex(q->mutex);
325
}
326

    
327
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
328
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
329
{
330
    AVPacketList *pkt1;
331
    int ret;
332

    
333
    SDL_LockMutex(q->mutex);
334

    
335
    for(;;) {
336
        if (q->abort_request) {
337
            ret = -1;
338
            break;
339
        }
340

    
341
        pkt1 = q->first_pkt;
342
        if (pkt1) {
343
            q->first_pkt = pkt1->next;
344
            if (!q->first_pkt)
345
                q->last_pkt = NULL;
346
            q->nb_packets--;
347
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
348
            *pkt = pkt1->pkt;
349
            av_free(pkt1);
350
            ret = 1;
351
            break;
352
        } else if (!block) {
353
            ret = 0;
354
            break;
355
        } else {
356
            SDL_CondWait(q->cond, q->mutex);
357
        }
358
    }
359
    SDL_UnlockMutex(q->mutex);
360
    return ret;
361
}
362

    
363
static inline void fill_rectangle(SDL_Surface *screen,
364
                                  int x, int y, int w, int h, int color)
365
{
366
    SDL_Rect rect;
367
    rect.x = x;
368
    rect.y = y;
369
    rect.w = w;
370
    rect.h = h;
371
    SDL_FillRect(screen, &rect, color);
372
}
373

    
374
#if 0
375
/* draw only the border of a rectangle */
376
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
377
{
378
    int w1, w2, h1, h2;
379

380
    /* fill the background */
381
    w1 = x;
382
    if (w1 < 0)
383
        w1 = 0;
384
    w2 = s->width - (x + w);
385
    if (w2 < 0)
386
        w2 = 0;
387
    h1 = y;
388
    if (h1 < 0)
389
        h1 = 0;
390
    h2 = s->height - (y + h);
391
    if (h2 < 0)
392
        h2 = 0;
393
    fill_rectangle(screen,
394
                   s->xleft, s->ytop,
395
                   w1, s->height,
396
                   color);
397
    fill_rectangle(screen,
398
                   s->xleft + s->width - w2, s->ytop,
399
                   w2, s->height,
400
                   color);
401
    fill_rectangle(screen,
402
                   s->xleft + w1, s->ytop,
403
                   s->width - w1 - w2, h1,
404
                   color);
405
    fill_rectangle(screen,
406
                   s->xleft + w1, s->ytop + s->height - h2,
407
                   s->width - w1 - w2, h2,
408
                   color);
409
}
410
#endif
411

    
412
#define ALPHA_BLEND(a, oldp, newp, s)\
413
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
414

    
415
#define RGBA_IN(r, g, b, a, s)\
416
{\
417
    unsigned int v = ((const uint32_t *)(s))[0];\
418
    a = (v >> 24) & 0xff;\
419
    r = (v >> 16) & 0xff;\
420
    g = (v >> 8) & 0xff;\
421
    b = v & 0xff;\
422
}
423

    
424
#define YUVA_IN(y, u, v, a, s, pal)\
425
{\
426
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
427
    a = (val >> 24) & 0xff;\
428
    y = (val >> 16) & 0xff;\
429
    u = (val >> 8) & 0xff;\
430
    v = val & 0xff;\
431
}
432

    
433
#define YUVA_OUT(d, y, u, v, a)\
434
{\
435
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
436
}
437

    
438

    
439
#define BPP 1
440

    
441
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
442
{
443
    int wrap, wrap3, width2, skip2;
444
    int y, u, v, a, u1, v1, a1, w, h;
445
    uint8_t *lum, *cb, *cr;
446
    const uint8_t *p;
447
    const uint32_t *pal;
448
    int dstx, dsty, dstw, dsth;
449

    
450
    dstw = av_clip(rect->w, 0, imgw);
451
    dsth = av_clip(rect->h, 0, imgh);
452
    dstx = av_clip(rect->x, 0, imgw - dstw);
453
    dsty = av_clip(rect->y, 0, imgh - dsth);
454
    lum = dst->data[0] + dsty * dst->linesize[0];
455
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
456
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
457

    
458
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
459
    skip2 = dstx >> 1;
460
    wrap = dst->linesize[0];
461
    wrap3 = rect->pict.linesize[0];
462
    p = rect->pict.data[0];
463
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
464

    
465
    if (dsty & 1) {
466
        lum += dstx;
467
        cb += skip2;
468
        cr += skip2;
469

    
470
        if (dstx & 1) {
471
            YUVA_IN(y, u, v, a, p, pal);
472
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
473
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
474
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
475
            cb++;
476
            cr++;
477
            lum++;
478
            p += BPP;
479
        }
480
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
481
            YUVA_IN(y, u, v, a, p, pal);
482
            u1 = u;
483
            v1 = v;
484
            a1 = a;
485
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
486

    
487
            YUVA_IN(y, u, v, a, p + BPP, pal);
488
            u1 += u;
489
            v1 += v;
490
            a1 += a;
491
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
492
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
493
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
494
            cb++;
495
            cr++;
496
            p += 2 * BPP;
497
            lum += 2;
498
        }
499
        if (w) {
500
            YUVA_IN(y, u, v, a, p, pal);
501
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
502
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
503
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
504
            p++;
505
            lum++;
506
        }
507
        p += wrap3 - dstw * BPP;
508
        lum += wrap - dstw - dstx;
509
        cb += dst->linesize[1] - width2 - skip2;
510
        cr += dst->linesize[2] - width2 - skip2;
511
    }
512
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
513
        lum += dstx;
514
        cb += skip2;
515
        cr += skip2;
516

    
517
        if (dstx & 1) {
518
            YUVA_IN(y, u, v, a, p, pal);
519
            u1 = u;
520
            v1 = v;
521
            a1 = a;
522
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
523
            p += wrap3;
524
            lum += wrap;
525
            YUVA_IN(y, u, v, a, p, pal);
526
            u1 += u;
527
            v1 += v;
528
            a1 += a;
529
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
531
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
532
            cb++;
533
            cr++;
534
            p += -wrap3 + BPP;
535
            lum += -wrap + 1;
536
        }
537
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
538
            YUVA_IN(y, u, v, a, p, pal);
539
            u1 = u;
540
            v1 = v;
541
            a1 = a;
542
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
543

    
544
            YUVA_IN(y, u, v, a, p + BPP, pal);
545
            u1 += u;
546
            v1 += v;
547
            a1 += a;
548
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
549
            p += wrap3;
550
            lum += wrap;
551

    
552
            YUVA_IN(y, u, v, a, p, pal);
553
            u1 += u;
554
            v1 += v;
555
            a1 += a;
556
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557

    
558
            YUVA_IN(y, u, v, a, p + BPP, pal);
559
            u1 += u;
560
            v1 += v;
561
            a1 += a;
562
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
563

    
564
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
565
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
566

    
567
            cb++;
568
            cr++;
569
            p += -wrap3 + 2 * BPP;
570
            lum += -wrap + 2;
571
        }
572
        if (w) {
573
            YUVA_IN(y, u, v, a, p, pal);
574
            u1 = u;
575
            v1 = v;
576
            a1 = a;
577
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
578
            p += wrap3;
579
            lum += wrap;
580
            YUVA_IN(y, u, v, a, p, pal);
581
            u1 += u;
582
            v1 += v;
583
            a1 += a;
584
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
585
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
586
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
587
            cb++;
588
            cr++;
589
            p += -wrap3 + BPP;
590
            lum += -wrap + 1;
591
        }
592
        p += wrap3 + (wrap3 - dstw * BPP);
593
        lum += wrap + (wrap - dstw - dstx);
594
        cb += dst->linesize[1] - width2 - skip2;
595
        cr += dst->linesize[2] - width2 - skip2;
596
    }
597
    /* handle odd height */
598
    if (h) {
599
        lum += dstx;
600
        cb += skip2;
601
        cr += skip2;
602

    
603
        if (dstx & 1) {
604
            YUVA_IN(y, u, v, a, p, pal);
605
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
607
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
608
            cb++;
609
            cr++;
610
            lum++;
611
            p += BPP;
612
        }
613
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
614
            YUVA_IN(y, u, v, a, p, pal);
615
            u1 = u;
616
            v1 = v;
617
            a1 = a;
618
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619

    
620
            YUVA_IN(y, u, v, a, p + BPP, pal);
621
            u1 += u;
622
            v1 += v;
623
            a1 += a;
624
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
625
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
626
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
627
            cb++;
628
            cr++;
629
            p += 2 * BPP;
630
            lum += 2;
631
        }
632
        if (w) {
633
            YUVA_IN(y, u, v, a, p, pal);
634
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
635
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
636
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
637
        }
638
    }
639
}
640

    
641
static void free_subpicture(SubPicture *sp)
642
{
643
    int i;
644

    
645
    for (i = 0; i < sp->sub.num_rects; i++)
646
    {
647
        av_freep(&sp->sub.rects[i]->pict.data[0]);
648
        av_freep(&sp->sub.rects[i]->pict.data[1]);
649
        av_freep(&sp->sub.rects[i]);
650
    }
651

    
652
    av_free(sp->sub.rects);
653

    
654
    memset(&sp->sub, 0, sizeof(AVSubtitle));
655
}
656

    
657
static void video_image_display(VideoState *is)
658
{
659
    VideoPicture *vp;
660
    SubPicture *sp;
661
    AVPicture pict;
662
    float aspect_ratio;
663
    int width, height, x, y;
664
    SDL_Rect rect;
665
    int i;
666

    
667
    vp = &is->pictq[is->pictq_rindex];
668
    if (vp->bmp) {
669
        /* XXX: use variable in the frame */
670
        if (is->video_st->sample_aspect_ratio.num)
671
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
672
        else if (is->video_st->codec->sample_aspect_ratio.num)
673
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
674
        else
675
            aspect_ratio = 0;
676
        if (aspect_ratio <= 0.0)
677
            aspect_ratio = 1.0;
678
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
679
        /* if an active format is indicated, then it overrides the
680
           mpeg format */
681
#if 0
682
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
683
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
684
            printf("dtg_active_format=%d\n", is->dtg_active_format);
685
        }
686
#endif
687
#if 0
688
        switch(is->video_st->codec->dtg_active_format) {
689
        case FF_DTG_AFD_SAME:
690
        default:
691
            /* nothing to do */
692
            break;
693
        case FF_DTG_AFD_4_3:
694
            aspect_ratio = 4.0 / 3.0;
695
            break;
696
        case FF_DTG_AFD_16_9:
697
            aspect_ratio = 16.0 / 9.0;
698
            break;
699
        case FF_DTG_AFD_14_9:
700
            aspect_ratio = 14.0 / 9.0;
701
            break;
702
        case FF_DTG_AFD_4_3_SP_14_9:
703
            aspect_ratio = 14.0 / 9.0;
704
            break;
705
        case FF_DTG_AFD_16_9_SP_14_9:
706
            aspect_ratio = 14.0 / 9.0;
707
            break;
708
        case FF_DTG_AFD_SP_4_3:
709
            aspect_ratio = 4.0 / 3.0;
710
            break;
711
        }
712
#endif
713

    
714
        if (is->subtitle_st)
715
        {
716
            if (is->subpq_size > 0)
717
            {
718
                sp = &is->subpq[is->subpq_rindex];
719

    
720
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
721
                {
722
                    SDL_LockYUVOverlay (vp->bmp);
723

    
724
                    pict.data[0] = vp->bmp->pixels[0];
725
                    pict.data[1] = vp->bmp->pixels[2];
726
                    pict.data[2] = vp->bmp->pixels[1];
727

    
728
                    pict.linesize[0] = vp->bmp->pitches[0];
729
                    pict.linesize[1] = vp->bmp->pitches[2];
730
                    pict.linesize[2] = vp->bmp->pitches[1];
731

    
732
                    for (i = 0; i < sp->sub.num_rects; i++)
733
                        blend_subrect(&pict, sp->sub.rects[i],
734
                                      vp->bmp->w, vp->bmp->h);
735

    
736
                    SDL_UnlockYUVOverlay (vp->bmp);
737
                }
738
            }
739
        }
740

    
741

    
742
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
743
        height = is->height;
744
        width = ((int)rint(height * aspect_ratio)) & ~1;
745
        if (width > is->width) {
746
            width = is->width;
747
            height = ((int)rint(width / aspect_ratio)) & ~1;
748
        }
749
        x = (is->width - width) / 2;
750
        y = (is->height - height) / 2;
751
        if (!is->no_background) {
752
            /* fill the background */
753
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
754
        } else {
755
            is->no_background = 0;
756
        }
757
        rect.x = is->xleft + x;
758
        rect.y = is->ytop  + y;
759
        rect.w = width;
760
        rect.h = height;
761
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
762
    } else {
763
#if 0
764
        fill_rectangle(screen,
765
                       is->xleft, is->ytop, is->width, is->height,
766
                       QERGB(0x00, 0x00, 0x00));
767
#endif
768
    }
769
}
770

    
771
static inline int compute_mod(int a, int b)
772
{
773
    a = a % b;
774
    if (a >= 0)
775
        return a;
776
    else
777
        return a + b;
778
}
779

    
780
static void video_audio_display(VideoState *s)
781
{
782
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
783
    int ch, channels, h, h2, bgcolor, fgcolor;
784
    int16_t time_diff;
785

    
786
    /* compute display index : center on currently output samples */
787
    channels = s->audio_st->codec->channels;
788
    nb_display_channels = channels;
789
    if (!s->paused) {
790
        n = 2 * channels;
791
        delay = audio_write_get_buf_size(s);
792
        delay /= n;
793

    
794
        /* to be more precise, we take into account the time spent since
795
           the last buffer computation */
796
        if (audio_callback_time) {
797
            time_diff = av_gettime() - audio_callback_time;
798
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
799
        }
800

    
801
        delay -= s->width / 2;
802
        if (delay < s->width)
803
            delay = s->width;
804

    
805
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
806
        if(s->show_audio==1){
807
        h= INT_MIN;
808
        for(i=0; i<1000; i+=channels){
809
            int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
810
            int a= s->sample_array[idx];
811
            int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
812
            int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
813
            int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
814
            int score= a-d;
815
            if(h<score && (b^c)<0){
816
                h= score;
817
                i_start= idx;
818
            }
819
        }
820
        }
821

    
822
        s->last_i_start = i_start;
823
    } else {
824
        i_start = s->last_i_start;
825
    }
826

    
827
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
828
    if(s->show_audio==1){
829
    fill_rectangle(screen,
830
                   s->xleft, s->ytop, s->width, s->height,
831
                   bgcolor);
832

    
833
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
834

    
835
    /* total height for one channel */
836
    h = s->height / nb_display_channels;
837
    /* graph height / 2 */
838
    h2 = (h * 9) / 20;
839
    for(ch = 0;ch < nb_display_channels; ch++) {
840
        i = i_start + ch;
841
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
842
        for(x = 0; x < s->width; x++) {
843
            y = (s->sample_array[i] * h2) >> 15;
844
            if (y < 0) {
845
                y = -y;
846
                ys = y1 - y;
847
            } else {
848
                ys = y1;
849
            }
850
            fill_rectangle(screen,
851
                           s->xleft + x, ys, 1, y,
852
                           fgcolor);
853
            i += channels;
854
            if (i >= SAMPLE_ARRAY_SIZE)
855
                i -= SAMPLE_ARRAY_SIZE;
856
        }
857
    }
858

    
859
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
860

    
861
    for(ch = 1;ch < nb_display_channels; ch++) {
862
        y = s->ytop + ch * h;
863
        fill_rectangle(screen,
864
                       s->xleft, y, s->width, 1,
865
                       fgcolor);
866
    }
867
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
868
    }else{
869
        int rdft_bits, nb_freq;
870
        nb_display_channels= FFMIN(nb_display_channels, 2);
871
        for(rdft_bits=1; (1<<rdft_bits)<=s->height; rdft_bits++)
872
            ;
873
        if(rdft_bits != s->rdft_bits){
874
            ff_rdft_end(&s->rdft);
875
            ff_rdft_init(&s->rdft, rdft_bits, RDFT);
876
            s->rdft_bits= rdft_bits;
877
        }
878
        nb_freq= 1<<(rdft_bits-1);
879
        {
880
            FFTSample data[2][2*nb_freq];
881
            for(ch = 0;ch < nb_display_channels; ch++) {
882
                i = i_start + ch;
883
                for(x = 0; x < 2*nb_freq; x++) {
884
                    double w= (x-nb_freq)*(1.0/nb_freq);
885
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
886
                    i += channels;
887
                    if (i >= SAMPLE_ARRAY_SIZE)
888
                        i -= SAMPLE_ARRAY_SIZE;
889
                }
890
                ff_rdft_calc(&s->rdft, data[ch]);
891
            }
892
            //least efficient way to do this, we should of course directly access it but its more than fast enough
893
            for(y=0; y<nb_freq; y++){
894
                double w= 1/sqrt(nb_freq);
895
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
896
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
897
                a= FFMIN(a,255);
898
                b= FFMIN(b,255);
899
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
900

    
901
                fill_rectangle(screen,
902
                            s->xpos, s->height-y, 1, 1,
903
                            fgcolor);
904
            }
905
        }
906
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
907
        s->xpos++;
908
        if(s->xpos >= s->width)
909
            s->xpos= s->xleft;
910
    }
911
}
912

    
913
static int video_open(VideoState *is){
914
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
915
    int w,h;
916

    
917
    if(is_full_screen) flags |= SDL_FULLSCREEN;
918
    else               flags |= SDL_RESIZABLE;
919

    
920
    if (is_full_screen && fs_screen_width) {
921
        w = fs_screen_width;
922
        h = fs_screen_height;
923
    } else if(!is_full_screen && screen_width){
924
        w = screen_width;
925
        h = screen_height;
926
    }else if (is->video_st && is->video_st->codec->width){
927
        w = is->video_st->codec->width;
928
        h = is->video_st->codec->height;
929
    } else {
930
        w = 640;
931
        h = 480;
932
    }
933
#ifndef __APPLE__
934
    screen = SDL_SetVideoMode(w, h, 0, flags);
935
#else
936
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
937
    screen = SDL_SetVideoMode(w, h, 24, flags);
938
#endif
939
    if (!screen) {
940
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
941
        return -1;
942
    }
943
    SDL_WM_SetCaption("FFplay", "FFplay");
944

    
945
    is->width = screen->w;
946
    is->height = screen->h;
947

    
948
    return 0;
949
}
950

    
951
/* display the current picture, if any */
952
static void video_display(VideoState *is)
953
{
954
    if(!screen)
955
        video_open(cur_stream);
956
    if (is->audio_st && is->show_audio)
957
        video_audio_display(is);
958
    else if (is->video_st)
959
        video_image_display(is);
960
}
961

    
962
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
963
{
964
    SDL_Event event;
965
    event.type = FF_REFRESH_EVENT;
966
    event.user.data1 = opaque;
967
    SDL_PushEvent(&event);
968
    return 0; /* 0 means stop timer */
969
}
970

    
971
/* schedule a video refresh in 'delay' ms */
972
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
973
{
974
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
975
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
976
}
977

    
978
/* get the current audio clock value */
979
static double get_audio_clock(VideoState *is)
980
{
981
    double pts;
982
    int hw_buf_size, bytes_per_sec;
983
    pts = is->audio_clock;
984
    hw_buf_size = audio_write_get_buf_size(is);
985
    bytes_per_sec = 0;
986
    if (is->audio_st) {
987
        bytes_per_sec = is->audio_st->codec->sample_rate *
988
            2 * is->audio_st->codec->channels;
989
    }
990
    if (bytes_per_sec)
991
        pts -= (double)hw_buf_size / bytes_per_sec;
992
    return pts;
993
}
994

    
995
/* get the current video clock value */
996
static double get_video_clock(VideoState *is)
997
{
998
    if (is->paused) {
999
        return is->video_current_pts;
1000
    } else {
1001
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1002
    }
1003
}
1004

    
1005
/* get the current external clock value */
1006
static double get_external_clock(VideoState *is)
1007
{
1008
    int64_t ti;
1009
    ti = av_gettime();
1010
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1011
}
1012

    
1013
/* get the current master clock value */
1014
static double get_master_clock(VideoState *is)
1015
{
1016
    double val;
1017

    
1018
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1019
        if (is->video_st)
1020
            val = get_video_clock(is);
1021
        else
1022
            val = get_audio_clock(is);
1023
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1024
        if (is->audio_st)
1025
            val = get_audio_clock(is);
1026
        else
1027
            val = get_video_clock(is);
1028
    } else {
1029
        val = get_external_clock(is);
1030
    }
1031
    return val;
1032
}
1033

    
1034
/* seek in the stream */
1035
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1036
{
1037
    if (!is->seek_req) {
1038
        is->seek_pos = pos;
1039
        is->seek_rel = rel;
1040
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1041
        if (seek_by_bytes)
1042
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1043
        is->seek_req = 1;
1044
    }
1045
}
1046

    
1047
/* pause or resume the video */
1048
static void stream_pause(VideoState *is)
1049
{
1050
    if (is->paused) {
1051
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1052
        if(is->read_pause_return != AVERROR(ENOSYS)){
1053
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1054
        }
1055
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1056
    }
1057
    is->paused = !is->paused;
1058
}
1059

    
1060
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1061
{
1062
    double actual_delay, delay, sync_threshold, ref_clock, diff;
1063

    
1064
    /* compute nominal delay */
1065
    delay = frame_current_pts - is->frame_last_pts;
1066
    if (delay <= 0 || delay >= 10.0) {
1067
        /* if incorrect delay, use previous one */
1068
        delay = is->frame_last_delay;
1069
    } else {
1070
        is->frame_last_delay = delay;
1071
    }
1072
    is->frame_last_pts = frame_current_pts;
1073

    
1074
    /* update delay to follow master synchronisation source */
1075
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1076
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1077
        /* if video is slave, we try to correct big delays by
1078
           duplicating or deleting a frame */
1079
        diff = get_video_clock(is) - get_master_clock(is);
1080

    
1081
        /* skip or repeat frame. We take into account the
1082
           delay to compute the threshold. I still don't know
1083
           if it is the best guess */
1084
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1085
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1086
            if (diff <= -sync_threshold)
1087
                delay = 0;
1088
            else if (diff >= sync_threshold)
1089
                delay = 2 * delay;
1090
        }
1091
    }
1092

    
1093
    is->frame_timer += delay;
1094
    /* compute the REAL delay (we need to do that to avoid
1095
       long term errors */
1096
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1097
    if (actual_delay < 0.010) {
1098
        /* XXX: should skip picture */
1099
        actual_delay = 0.010;
1100
    }
1101

    
1102
#if defined(DEBUG_SYNC)
1103
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1104
            delay, actual_delay, frame_current_pts, -diff);
1105
#endif
1106

    
1107
    return actual_delay;
1108
}
1109

    
1110
/* called to display each frame */
1111
static void video_refresh_timer(void *opaque)
1112
{
1113
    VideoState *is = opaque;
1114
    VideoPicture *vp;
1115

    
1116
    SubPicture *sp, *sp2;
1117

    
1118
    if (is->video_st) {
1119
        if (is->pictq_size == 0) {
1120
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1121
        } else {
1122
            /* dequeue the picture */
1123
            vp = &is->pictq[is->pictq_rindex];
1124

    
1125
            /* update current video pts */
1126
            is->video_current_pts = vp->pts;
1127
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1128
            is->video_current_pos = vp->pos;
1129

    
1130
            if(is->subtitle_st) {
1131
                if (is->subtitle_stream_changed) {
1132
                    SDL_LockMutex(is->subpq_mutex);
1133

    
1134
                    while (is->subpq_size) {
1135
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1136

    
1137
                        /* update queue size and signal for next picture */
1138
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1139
                            is->subpq_rindex = 0;
1140

    
1141
                        is->subpq_size--;
1142
                    }
1143
                    is->subtitle_stream_changed = 0;
1144

    
1145
                    SDL_CondSignal(is->subpq_cond);
1146
                    SDL_UnlockMutex(is->subpq_mutex);
1147
                } else {
1148
                    if (is->subpq_size > 0) {
1149
                        sp = &is->subpq[is->subpq_rindex];
1150

    
1151
                        if (is->subpq_size > 1)
1152
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1153
                        else
1154
                            sp2 = NULL;
1155

    
1156
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1157
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1158
                        {
1159
                            free_subpicture(sp);
1160

    
1161
                            /* update queue size and signal for next picture */
1162
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1163
                                is->subpq_rindex = 0;
1164

    
1165
                            SDL_LockMutex(is->subpq_mutex);
1166
                            is->subpq_size--;
1167
                            SDL_CondSignal(is->subpq_cond);
1168
                            SDL_UnlockMutex(is->subpq_mutex);
1169
                        }
1170
                    }
1171
                }
1172
            }
1173

    
1174
            /* display picture */
1175
            video_display(is);
1176

    
1177
            /* update queue size and signal for next picture */
1178
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1179
                is->pictq_rindex = 0;
1180

    
1181
            SDL_LockMutex(is->pictq_mutex);
1182
            vp->timer_id= 0;
1183
            is->pictq_size--;
1184
            SDL_CondSignal(is->pictq_cond);
1185
            SDL_UnlockMutex(is->pictq_mutex);
1186
        }
1187
    } else if (is->audio_st) {
1188
        /* draw the next audio frame */
1189

    
1190
        schedule_refresh(is, 40);
1191

    
1192
        /* if only audio stream, then display the audio bars (better
1193
           than nothing, just to test the implementation */
1194

    
1195
        /* display picture */
1196
        video_display(is);
1197
    } else {
1198
        schedule_refresh(is, 100);
1199
    }
1200
    if (show_status) {
1201
        static int64_t last_time;
1202
        int64_t cur_time;
1203
        int aqsize, vqsize, sqsize;
1204
        double av_diff;
1205

    
1206
        cur_time = av_gettime();
1207
        if (!last_time || (cur_time - last_time) >= 30000) {
1208
            aqsize = 0;
1209
            vqsize = 0;
1210
            sqsize = 0;
1211
            if (is->audio_st)
1212
                aqsize = is->audioq.size;
1213
            if (is->video_st)
1214
                vqsize = is->videoq.size;
1215
            if (is->subtitle_st)
1216
                sqsize = is->subtitleq.size;
1217
            av_diff = 0;
1218
            if (is->audio_st && is->video_st)
1219
                av_diff = get_audio_clock(is) - get_video_clock(is);
1220
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1221
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1222
            fflush(stdout);
1223
            last_time = cur_time;
1224
        }
1225
    }
1226
}
1227

    
1228
/* allocate a picture (needs to do that in main thread to avoid
1229
   potential locking problems */
1230
static void alloc_picture(void *opaque)
1231
{
1232
    VideoState *is = opaque;
1233
    VideoPicture *vp;
1234

    
1235
    vp = &is->pictq[is->pictq_windex];
1236

    
1237
    if (vp->bmp)
1238
        SDL_FreeYUVOverlay(vp->bmp);
1239

    
1240
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1241
                                   is->video_st->codec->height,
1242
                                   SDL_YV12_OVERLAY,
1243
                                   screen);
1244
    vp->width = is->video_st->codec->width;
1245
    vp->height = is->video_st->codec->height;
1246

    
1247
    SDL_LockMutex(is->pictq_mutex);
1248
    vp->allocated = 1;
1249
    SDL_CondSignal(is->pictq_cond);
1250
    SDL_UnlockMutex(is->pictq_mutex);
1251
}
1252

    
1253
/**
1254
 *
1255
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1256
 */
1257
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1258
{
1259
    VideoPicture *vp;
1260
    int dst_pix_fmt;
1261

    
1262
    /* wait until we have space to put a new picture */
1263
    SDL_LockMutex(is->pictq_mutex);
1264
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1265
           !is->videoq.abort_request) {
1266
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1267
    }
1268
    SDL_UnlockMutex(is->pictq_mutex);
1269

    
1270
    if (is->videoq.abort_request)
1271
        return -1;
1272

    
1273
    vp = &is->pictq[is->pictq_windex];
1274

    
1275
    /* alloc or resize hardware picture buffer */
1276
    if (!vp->bmp ||
1277
        vp->width != is->video_st->codec->width ||
1278
        vp->height != is->video_st->codec->height) {
1279
        SDL_Event event;
1280

    
1281
        vp->allocated = 0;
1282

    
1283
        /* the allocation must be done in the main thread to avoid
1284
           locking problems */
1285
        event.type = FF_ALLOC_EVENT;
1286
        event.user.data1 = is;
1287
        SDL_PushEvent(&event);
1288

    
1289
        /* wait until the picture is allocated */
1290
        SDL_LockMutex(is->pictq_mutex);
1291
        while (!vp->allocated && !is->videoq.abort_request) {
1292
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1293
        }
1294
        SDL_UnlockMutex(is->pictq_mutex);
1295

    
1296
        if (is->videoq.abort_request)
1297
            return -1;
1298
    }
1299

    
1300
    /* if the frame is not skipped, then display it */
1301
    if (vp->bmp) {
1302
        AVPicture pict;
1303

    
1304
        /* get a pointer on the bitmap */
1305
        SDL_LockYUVOverlay (vp->bmp);
1306

    
1307
        dst_pix_fmt = PIX_FMT_YUV420P;
1308
        memset(&pict,0,sizeof(AVPicture));
1309
        pict.data[0] = vp->bmp->pixels[0];
1310
        pict.data[1] = vp->bmp->pixels[2];
1311
        pict.data[2] = vp->bmp->pixels[1];
1312

    
1313
        pict.linesize[0] = vp->bmp->pitches[0];
1314
        pict.linesize[1] = vp->bmp->pitches[2];
1315
        pict.linesize[2] = vp->bmp->pitches[1];
1316
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1317
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1318
            is->video_st->codec->width, is->video_st->codec->height,
1319
            is->video_st->codec->pix_fmt,
1320
            is->video_st->codec->width, is->video_st->codec->height,
1321
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1322
        if (is->img_convert_ctx == NULL) {
1323
            fprintf(stderr, "Cannot initialize the conversion context\n");
1324
            exit(1);
1325
        }
1326
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1327
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1328
        /* update the bitmap content */
1329
        SDL_UnlockYUVOverlay(vp->bmp);
1330

    
1331
        vp->pts = pts;
1332
        vp->pos = pos;
1333

    
1334
        /* now we can update the picture count */
1335
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1336
            is->pictq_windex = 0;
1337
        SDL_LockMutex(is->pictq_mutex);
1338
        is->pictq_size++;
1339
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1340
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1341
        SDL_UnlockMutex(is->pictq_mutex);
1342
    }
1343
    return 0;
1344
}
1345

    
1346
/**
1347
 * compute the exact PTS for the picture if it is omitted in the stream
1348
 * @param pts1 the dts of the pkt / pts of the frame
1349
 */
1350
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1351
{
1352
    double frame_delay, pts;
1353

    
1354
    pts = pts1;
1355

    
1356
    if (pts != 0) {
1357
        /* update video clock with pts, if present */
1358
        is->video_clock = pts;
1359
    } else {
1360
        pts = is->video_clock;
1361
    }
1362
    /* update video clock for next frame */
1363
    frame_delay = av_q2d(is->video_st->codec->time_base);
1364
    /* for MPEG2, the frame can be repeated, so we update the
1365
       clock accordingly */
1366
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1367
    is->video_clock += frame_delay;
1368

    
1369
#if defined(DEBUG_SYNC) && 0
1370
    {
1371
        int ftype;
1372
        if (src_frame->pict_type == FF_B_TYPE)
1373
            ftype = 'B';
1374
        else if (src_frame->pict_type == FF_I_TYPE)
1375
            ftype = 'I';
1376
        else
1377
            ftype = 'P';
1378
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1379
               ftype, pts, pts1);
1380
    }
1381
#endif
1382
    return queue_picture(is, src_frame, pts, pos);
1383
}
1384

    
1385
static int video_thread(void *arg)
1386
{
1387
    VideoState *is = arg;
1388
    AVPacket pkt1, *pkt = &pkt1;
1389
    int len1, got_picture, i;
1390
    AVFrame *frame= avcodec_alloc_frame();
1391
    double pts;
1392

    
1393
    for(;;) {
1394
        while (is->paused && !is->videoq.abort_request) {
1395
            SDL_Delay(10);
1396
        }
1397
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1398
            break;
1399

    
1400
        if(pkt->data == flush_pkt.data){
1401
            avcodec_flush_buffers(is->video_st->codec);
1402

    
1403
            SDL_LockMutex(is->pictq_mutex);
1404
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1405
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1406
                if(is->pictq[i].timer_id){
1407
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1408
                    is->pictq[i].timer_id=0;
1409
                    schedule_refresh(is, 1);
1410
                }
1411
            }
1412
            while (is->pictq_size && !is->videoq.abort_request) {
1413
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1414
            }
1415
            is->video_current_pos= -1;
1416
            SDL_UnlockMutex(is->pictq_mutex);
1417

    
1418
            is->last_dts_for_fault_detection=
1419
            is->last_pts_for_fault_detection= INT64_MIN;
1420
            is->frame_last_pts= AV_NOPTS_VALUE;
1421
            is->frame_last_delay = 0;
1422
            is->frame_timer = (double)av_gettime() / 1000000.0;
1423

    
1424
            continue;
1425
        }
1426

    
1427
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1428
           this packet, if any */
1429
        is->video_st->codec->reordered_opaque= pkt->pts;
1430
        len1 = avcodec_decode_video2(is->video_st->codec,
1431
                                    frame, &got_picture,
1432
                                    pkt);
1433

    
1434
        if (got_picture) {
1435
            if(pkt->dts != AV_NOPTS_VALUE){
1436
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1437
                is->last_dts_for_fault_detection= pkt->dts;
1438
            }
1439
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1440
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1441
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1442
            }
1443
        }
1444

    
1445
        if(   (   decoder_reorder_pts==1
1446
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1447
               || pkt->dts == AV_NOPTS_VALUE)
1448
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1449
            pts= frame->reordered_opaque;
1450
        else if(pkt->dts != AV_NOPTS_VALUE)
1451
            pts= pkt->dts;
1452
        else
1453
            pts= 0;
1454
        pts *= av_q2d(is->video_st->time_base);
1455

    
1456
//            if (len1 < 0)
1457
//                break;
1458
        if (got_picture) {
1459
            if (output_picture2(is, frame, pts, pkt->pos) < 0)
1460
                goto the_end;
1461
        }
1462
        av_free_packet(pkt);
1463
        if (step)
1464
            if (cur_stream)
1465
                stream_pause(cur_stream);
1466
    }
1467
 the_end:
1468
    av_free(frame);
1469
    return 0;
1470
}
1471

    
1472
static int subtitle_thread(void *arg)
1473
{
1474
    VideoState *is = arg;
1475
    SubPicture *sp;
1476
    AVPacket pkt1, *pkt = &pkt1;
1477
    int len1, got_subtitle;
1478
    double pts;
1479
    int i, j;
1480
    int r, g, b, y, u, v, a;
1481

    
1482
    for(;;) {
1483
        while (is->paused && !is->subtitleq.abort_request) {
1484
            SDL_Delay(10);
1485
        }
1486
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1487
            break;
1488

    
1489
        if(pkt->data == flush_pkt.data){
1490
            avcodec_flush_buffers(is->subtitle_st->codec);
1491
            continue;
1492
        }
1493
        SDL_LockMutex(is->subpq_mutex);
1494
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1495
               !is->subtitleq.abort_request) {
1496
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1497
        }
1498
        SDL_UnlockMutex(is->subpq_mutex);
1499

    
1500
        if (is->subtitleq.abort_request)
1501
            goto the_end;
1502

    
1503
        sp = &is->subpq[is->subpq_windex];
1504

    
1505
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1506
           this packet, if any */
1507
        pts = 0;
1508
        if (pkt->pts != AV_NOPTS_VALUE)
1509
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1510

    
1511
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1512
                                    &sp->sub, &got_subtitle,
1513
                                    pkt);
1514
//            if (len1 < 0)
1515
//                break;
1516
        if (got_subtitle && sp->sub.format == 0) {
1517
            sp->pts = pts;
1518

    
1519
            for (i = 0; i < sp->sub.num_rects; i++)
1520
            {
1521
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1522
                {
1523
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1524
                    y = RGB_TO_Y_CCIR(r, g, b);
1525
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1526
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1527
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1528
                }
1529
            }
1530

    
1531
            /* now we can update the picture count */
1532
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1533
                is->subpq_windex = 0;
1534
            SDL_LockMutex(is->subpq_mutex);
1535
            is->subpq_size++;
1536
            SDL_UnlockMutex(is->subpq_mutex);
1537
        }
1538
        av_free_packet(pkt);
1539
//        if (step)
1540
//            if (cur_stream)
1541
//                stream_pause(cur_stream);
1542
    }
1543
 the_end:
1544
    return 0;
1545
}
1546

    
1547
/* copy samples for viewing in editor window */
1548
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1549
{
1550
    int size, len, channels;
1551

    
1552
    channels = is->audio_st->codec->channels;
1553

    
1554
    size = samples_size / sizeof(short);
1555
    while (size > 0) {
1556
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1557
        if (len > size)
1558
            len = size;
1559
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1560
        samples += len;
1561
        is->sample_array_index += len;
1562
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1563
            is->sample_array_index = 0;
1564
        size -= len;
1565
    }
1566
}
1567

    
1568
/* return the new audio buffer size (samples can be added or deleted
1569
   to get better sync if video or external master clock) */
1570
static int synchronize_audio(VideoState *is, short *samples,
1571
                             int samples_size1, double pts)
1572
{
1573
    int n, samples_size;
1574
    double ref_clock;
1575

    
1576
    n = 2 * is->audio_st->codec->channels;
1577
    samples_size = samples_size1;
1578

    
1579
    /* if not master, then we try to remove or add samples to correct the clock */
1580
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1581
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1582
        double diff, avg_diff;
1583
        int wanted_size, min_size, max_size, nb_samples;
1584

    
1585
        ref_clock = get_master_clock(is);
1586
        diff = get_audio_clock(is) - ref_clock;
1587

    
1588
        if (diff < AV_NOSYNC_THRESHOLD) {
1589
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1590
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1591
                /* not enough measures to have a correct estimate */
1592
                is->audio_diff_avg_count++;
1593
            } else {
1594
                /* estimate the A-V difference */
1595
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1596

    
1597
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1598
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1599
                    nb_samples = samples_size / n;
1600

    
1601
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1602
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1603
                    if (wanted_size < min_size)
1604
                        wanted_size = min_size;
1605
                    else if (wanted_size > max_size)
1606
                        wanted_size = max_size;
1607

    
1608
                    /* add or remove samples to correction the synchro */
1609
                    if (wanted_size < samples_size) {
1610
                        /* remove samples */
1611
                        samples_size = wanted_size;
1612
                    } else if (wanted_size > samples_size) {
1613
                        uint8_t *samples_end, *q;
1614
                        int nb;
1615

    
1616
                        /* add samples */
1617
                        nb = (samples_size - wanted_size);
1618
                        samples_end = (uint8_t *)samples + samples_size - n;
1619
                        q = samples_end + n;
1620
                        while (nb > 0) {
1621
                            memcpy(q, samples_end, n);
1622
                            q += n;
1623
                            nb -= n;
1624
                        }
1625
                        samples_size = wanted_size;
1626
                    }
1627
                }
1628
#if 0
1629
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1630
                       diff, avg_diff, samples_size - samples_size1,
1631
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1632
#endif
1633
            }
1634
        } else {
1635
            /* too big difference : may be initial PTS errors, so
1636
               reset A-V filter */
1637
            is->audio_diff_avg_count = 0;
1638
            is->audio_diff_cum = 0;
1639
        }
1640
    }
1641

    
1642
    return samples_size;
1643
}
1644

    
1645
/* decode one audio frame and returns its uncompressed size */
1646
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1647
{
1648
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1649
    AVPacket *pkt = &is->audio_pkt;
1650
    AVCodecContext *dec= is->audio_st->codec;
1651
    int n, len1, data_size;
1652
    double pts;
1653

    
1654
    for(;;) {
1655
        /* NOTE: the audio packet can contain several frames */
1656
        while (pkt_temp->size > 0) {
1657
            data_size = sizeof(is->audio_buf1);
1658
            len1 = avcodec_decode_audio3(dec,
1659
                                        (int16_t *)is->audio_buf1, &data_size,
1660
                                        pkt_temp);
1661
            if (len1 < 0) {
1662
                /* if error, we skip the frame */
1663
                pkt_temp->size = 0;
1664
                break;
1665
            }
1666

    
1667
            pkt_temp->data += len1;
1668
            pkt_temp->size -= len1;
1669
            if (data_size <= 0)
1670
                continue;
1671

    
1672
            if (dec->sample_fmt != is->audio_src_fmt) {
1673
                if (is->reformat_ctx)
1674
                    av_audio_convert_free(is->reformat_ctx);
1675
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1676
                                                         dec->sample_fmt, 1, NULL, 0);
1677
                if (!is->reformat_ctx) {
1678
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1679
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1680
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1681
                        break;
1682
                }
1683
                is->audio_src_fmt= dec->sample_fmt;
1684
            }
1685

    
1686
            if (is->reformat_ctx) {
1687
                const void *ibuf[6]= {is->audio_buf1};
1688
                void *obuf[6]= {is->audio_buf2};
1689
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1690
                int ostride[6]= {2};
1691
                int len= data_size/istride[0];
1692
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1693
                    printf("av_audio_convert() failed\n");
1694
                    break;
1695
                }
1696
                is->audio_buf= is->audio_buf2;
1697
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1698
                          remove this legacy cruft */
1699
                data_size= len*2;
1700
            }else{
1701
                is->audio_buf= is->audio_buf1;
1702
            }
1703

    
1704
            /* if no pts, then compute it */
1705
            pts = is->audio_clock;
1706
            *pts_ptr = pts;
1707
            n = 2 * dec->channels;
1708
            is->audio_clock += (double)data_size /
1709
                (double)(n * dec->sample_rate);
1710
#if defined(DEBUG_SYNC)
1711
            {
1712
                static double last_clock;
1713
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1714
                       is->audio_clock - last_clock,
1715
                       is->audio_clock, pts);
1716
                last_clock = is->audio_clock;
1717
            }
1718
#endif
1719
            return data_size;
1720
        }
1721

    
1722
        /* free the current packet */
1723
        if (pkt->data)
1724
            av_free_packet(pkt);
1725

    
1726
        if (is->paused || is->audioq.abort_request) {
1727
            return -1;
1728
        }
1729

    
1730
        /* read next packet */
1731
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1732
            return -1;
1733
        if(pkt->data == flush_pkt.data){
1734
            avcodec_flush_buffers(dec);
1735
            continue;
1736
        }
1737

    
1738
        pkt_temp->data = pkt->data;
1739
        pkt_temp->size = pkt->size;
1740

    
1741
        /* if update the audio clock with the pts */
1742
        if (pkt->pts != AV_NOPTS_VALUE) {
1743
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1744
        }
1745
    }
1746
}
1747

    
1748
/* get the current audio output buffer size, in samples. With SDL, we
1749
   cannot have a precise information */
1750
static int audio_write_get_buf_size(VideoState *is)
1751
{
1752
    return is->audio_buf_size - is->audio_buf_index;
1753
}
1754

    
1755

    
1756
/* prepare a new audio buffer */
1757
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1758
{
1759
    VideoState *is = opaque;
1760
    int audio_size, len1;
1761
    double pts;
1762

    
1763
    audio_callback_time = av_gettime();
1764

    
1765
    while (len > 0) {
1766
        if (is->audio_buf_index >= is->audio_buf_size) {
1767
           audio_size = audio_decode_frame(is, &pts);
1768
           if (audio_size < 0) {
1769
                /* if error, just output silence */
1770
               is->audio_buf = is->audio_buf1;
1771
               is->audio_buf_size = 1024;
1772
               memset(is->audio_buf, 0, is->audio_buf_size);
1773
           } else {
1774
               if (is->show_audio)
1775
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1776
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1777
                                              pts);
1778
               is->audio_buf_size = audio_size;
1779
           }
1780
           is->audio_buf_index = 0;
1781
        }
1782
        len1 = is->audio_buf_size - is->audio_buf_index;
1783
        if (len1 > len)
1784
            len1 = len;
1785
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1786
        len -= len1;
1787
        stream += len1;
1788
        is->audio_buf_index += len1;
1789
    }
1790
}
1791

    
1792
/* open a given stream. Return 0 if OK */
1793
static int stream_component_open(VideoState *is, int stream_index)
1794
{
1795
    AVFormatContext *ic = is->ic;
1796
    AVCodecContext *enc;
1797
    AVCodec *codec;
1798
    SDL_AudioSpec wanted_spec, spec;
1799

    
1800
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1801
        return -1;
1802
    enc = ic->streams[stream_index]->codec;
1803

    
1804
    /* prepare audio output */
1805
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1806
        if (enc->channels > 0) {
1807
            enc->request_channels = FFMIN(2, enc->channels);
1808
        } else {
1809
            enc->request_channels = 2;
1810
        }
1811
    }
1812

    
1813
    codec = avcodec_find_decoder(enc->codec_id);
1814
    enc->debug_mv = debug_mv;
1815
    enc->debug = debug;
1816
    enc->workaround_bugs = workaround_bugs;
1817
    enc->lowres = lowres;
1818
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1819
    enc->idct_algo= idct;
1820
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1821
    enc->skip_frame= skip_frame;
1822
    enc->skip_idct= skip_idct;
1823
    enc->skip_loop_filter= skip_loop_filter;
1824
    enc->error_recognition= error_recognition;
1825
    enc->error_concealment= error_concealment;
1826
    avcodec_thread_init(enc, thread_count);
1827

    
1828
    set_context_opts(enc, avcodec_opts[enc->codec_type], 0);
1829

    
1830
    if (!codec ||
1831
        avcodec_open(enc, codec) < 0)
1832
        return -1;
1833

    
1834
    /* prepare audio output */
1835
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1836
        wanted_spec.freq = enc->sample_rate;
1837
        wanted_spec.format = AUDIO_S16SYS;
1838
        wanted_spec.channels = enc->channels;
1839
        wanted_spec.silence = 0;
1840
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1841
        wanted_spec.callback = sdl_audio_callback;
1842
        wanted_spec.userdata = is;
1843
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1844
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1845
            return -1;
1846
        }
1847
        is->audio_hw_buf_size = spec.size;
1848
        is->audio_src_fmt= SAMPLE_FMT_S16;
1849
    }
1850

    
1851
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1852
    switch(enc->codec_type) {
1853
    case CODEC_TYPE_AUDIO:
1854
        is->audio_stream = stream_index;
1855
        is->audio_st = ic->streams[stream_index];
1856
        is->audio_buf_size = 0;
1857
        is->audio_buf_index = 0;
1858

    
1859
        /* init averaging filter */
1860
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1861
        is->audio_diff_avg_count = 0;
1862
        /* since we do not have a precise anough audio fifo fullness,
1863
           we correct audio sync only if larger than this threshold */
1864
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1865

    
1866
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1867
        packet_queue_init(&is->audioq);
1868
        SDL_PauseAudio(0);
1869
        break;
1870
    case CODEC_TYPE_VIDEO:
1871
        is->video_stream = stream_index;
1872
        is->video_st = ic->streams[stream_index];
1873

    
1874
//        is->video_current_pts_time = av_gettime();
1875

    
1876
        packet_queue_init(&is->videoq);
1877
        is->video_tid = SDL_CreateThread(video_thread, is);
1878
        break;
1879
    case CODEC_TYPE_SUBTITLE:
1880
        is->subtitle_stream = stream_index;
1881
        is->subtitle_st = ic->streams[stream_index];
1882
        packet_queue_init(&is->subtitleq);
1883

    
1884
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1885
        break;
1886
    default:
1887
        break;
1888
    }
1889
    return 0;
1890
}
1891

    
1892
static void stream_component_close(VideoState *is, int stream_index)
1893
{
1894
    AVFormatContext *ic = is->ic;
1895
    AVCodecContext *enc;
1896

    
1897
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1898
        return;
1899
    enc = ic->streams[stream_index]->codec;
1900

    
1901
    switch(enc->codec_type) {
1902
    case CODEC_TYPE_AUDIO:
1903
        packet_queue_abort(&is->audioq);
1904

    
1905
        SDL_CloseAudio();
1906

    
1907
        packet_queue_end(&is->audioq);
1908
        if (is->reformat_ctx)
1909
            av_audio_convert_free(is->reformat_ctx);
1910
        break;
1911
    case CODEC_TYPE_VIDEO:
1912
        packet_queue_abort(&is->videoq);
1913

    
1914
        /* note: we also signal this mutex to make sure we deblock the
1915
           video thread in all cases */
1916
        SDL_LockMutex(is->pictq_mutex);
1917
        SDL_CondSignal(is->pictq_cond);
1918
        SDL_UnlockMutex(is->pictq_mutex);
1919

    
1920
        SDL_WaitThread(is->video_tid, NULL);
1921

    
1922
        packet_queue_end(&is->videoq);
1923
        break;
1924
    case CODEC_TYPE_SUBTITLE:
1925
        packet_queue_abort(&is->subtitleq);
1926

    
1927
        /* note: we also signal this mutex to make sure we deblock the
1928
           video thread in all cases */
1929
        SDL_LockMutex(is->subpq_mutex);
1930
        is->subtitle_stream_changed = 1;
1931

    
1932
        SDL_CondSignal(is->subpq_cond);
1933
        SDL_UnlockMutex(is->subpq_mutex);
1934

    
1935
        SDL_WaitThread(is->subtitle_tid, NULL);
1936

    
1937
        packet_queue_end(&is->subtitleq);
1938
        break;
1939
    default:
1940
        break;
1941
    }
1942

    
1943
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1944
    avcodec_close(enc);
1945
    switch(enc->codec_type) {
1946
    case CODEC_TYPE_AUDIO:
1947
        is->audio_st = NULL;
1948
        is->audio_stream = -1;
1949
        break;
1950
    case CODEC_TYPE_VIDEO:
1951
        is->video_st = NULL;
1952
        is->video_stream = -1;
1953
        break;
1954
    case CODEC_TYPE_SUBTITLE:
1955
        is->subtitle_st = NULL;
1956
        is->subtitle_stream = -1;
1957
        break;
1958
    default:
1959
        break;
1960
    }
1961
}
1962

    
1963
/* since we have only one decoding thread, we can use a global
1964
   variable instead of a thread local variable */
1965
static VideoState *global_video_state;
1966

    
1967
static int decode_interrupt_cb(void)
1968
{
1969
    return (global_video_state && global_video_state->abort_request);
1970
}
1971

    
1972
/* this thread gets the stream from the disk or the network */
1973
static int decode_thread(void *arg)
1974
{
1975
    VideoState *is = arg;
1976
    AVFormatContext *ic;
1977
    int err, i, ret, video_index, audio_index, subtitle_index;
1978
    AVPacket pkt1, *pkt = &pkt1;
1979
    AVFormatParameters params, *ap = &params;
1980
    int eof=0;
1981

    
1982
    ic = avformat_alloc_context();
1983

    
1984
    video_index = -1;
1985
    audio_index = -1;
1986
    subtitle_index = -1;
1987
    is->video_stream = -1;
1988
    is->audio_stream = -1;
1989
    is->subtitle_stream = -1;
1990

    
1991
    global_video_state = is;
1992
    url_set_interrupt_cb(decode_interrupt_cb);
1993

    
1994
    memset(ap, 0, sizeof(*ap));
1995

    
1996
    ap->prealloced_context = 1;
1997
    ap->width = frame_width;
1998
    ap->height= frame_height;
1999
    ap->time_base= (AVRational){1, 25};
2000
    ap->pix_fmt = frame_pix_fmt;
2001

    
2002
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2003

    
2004
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2005
    if (err < 0) {
2006
        print_error(is->filename, err);
2007
        ret = -1;
2008
        goto fail;
2009
    }
2010
    is->ic = ic;
2011

    
2012
    if(genpts)
2013
        ic->flags |= AVFMT_FLAG_GENPTS;
2014

    
2015
    err = av_find_stream_info(ic);
2016
    if (err < 0) {
2017
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2018
        ret = -1;
2019
        goto fail;
2020
    }
2021
    if(ic->pb)
2022
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2023

    
2024
    if(seek_by_bytes<0)
2025
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2026

    
2027
    /* if seeking requested, we execute it */
2028
    if (start_time != AV_NOPTS_VALUE) {
2029
        int64_t timestamp;
2030

    
2031
        timestamp = start_time;
2032
        /* add the stream start time */
2033
        if (ic->start_time != AV_NOPTS_VALUE)
2034
            timestamp += ic->start_time;
2035
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2036
        if (ret < 0) {
2037
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2038
                    is->filename, (double)timestamp / AV_TIME_BASE);
2039
        }
2040
    }
2041

    
2042
    for(i = 0; i < ic->nb_streams; i++) {
2043
        AVCodecContext *enc = ic->streams[i]->codec;
2044
        ic->streams[i]->discard = AVDISCARD_ALL;
2045
        switch(enc->codec_type) {
2046
        case CODEC_TYPE_AUDIO:
2047
            if (wanted_audio_stream-- >= 0 && !audio_disable)
2048
                audio_index = i;
2049
            break;
2050
        case CODEC_TYPE_VIDEO:
2051
            if (wanted_video_stream-- >= 0 && !video_disable)
2052
                video_index = i;
2053
            break;
2054
        case CODEC_TYPE_SUBTITLE:
2055
            if (wanted_subtitle_stream-- >= 0 && !video_disable)
2056
                subtitle_index = i;
2057
            break;
2058
        default:
2059
            break;
2060
        }
2061
    }
2062
    if (show_status) {
2063
        dump_format(ic, 0, is->filename, 0);
2064
    }
2065

    
2066
    /* open the streams */
2067
    if (audio_index >= 0) {
2068
        stream_component_open(is, audio_index);
2069
    }
2070

    
2071
    if (video_index >= 0) {
2072
        stream_component_open(is, video_index);
2073
    } else {
2074
        /* add the refresh timer to draw the picture */
2075
        schedule_refresh(is, 40);
2076

    
2077
        if (!display_disable)
2078
            is->show_audio = 1;
2079
    }
2080

    
2081
    if (subtitle_index >= 0) {
2082
        stream_component_open(is, subtitle_index);
2083
    }
2084

    
2085
    if (is->video_stream < 0 && is->audio_stream < 0) {
2086
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2087
        ret = -1;
2088
        goto fail;
2089
    }
2090

    
2091
    for(;;) {
2092
        if (is->abort_request)
2093
            break;
2094
        if (is->paused != is->last_paused) {
2095
            is->last_paused = is->paused;
2096
            if (is->paused)
2097
                is->read_pause_return= av_read_pause(ic);
2098
            else
2099
                av_read_play(ic);
2100
        }
2101
#if CONFIG_RTSP_DEMUXER
2102
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2103
            /* wait 10 ms to avoid trying to get another packet */
2104
            /* XXX: horrible */
2105
            SDL_Delay(10);
2106
            continue;
2107
        }
2108
#endif
2109
        if (is->seek_req) {
2110
            int64_t seek_target= is->seek_pos;
2111
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2112
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2113
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2114
//      of the seek_pos/seek_rel variables
2115

    
2116
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2117
            if (ret < 0) {
2118
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2119
            }else{
2120
                if (is->audio_stream >= 0) {
2121
                    packet_queue_flush(&is->audioq);
2122
                    packet_queue_put(&is->audioq, &flush_pkt);
2123
                }
2124
                if (is->subtitle_stream >= 0) {
2125
                    packet_queue_flush(&is->subtitleq);
2126
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2127
                }
2128
                if (is->video_stream >= 0) {
2129
                    packet_queue_flush(&is->videoq);
2130
                    packet_queue_put(&is->videoq, &flush_pkt);
2131
                }
2132
            }
2133
            is->seek_req = 0;
2134
            eof= 0;
2135
        }
2136

    
2137
        /* if the queue are full, no need to read more */
2138
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2139
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2140
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2141
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2142
            /* wait 10 ms */
2143
            SDL_Delay(10);
2144
            continue;
2145
        }
2146
        if(url_feof(ic->pb) || eof) {
2147
            if(is->video_stream >= 0){
2148
                av_init_packet(pkt);
2149
                pkt->data=NULL;
2150
                pkt->size=0;
2151
                pkt->stream_index= is->video_stream;
2152
                packet_queue_put(&is->videoq, pkt);
2153
            }
2154
            SDL_Delay(10);
2155
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2156
                ret=AVERROR_EOF;
2157
                goto fail;
2158
            }
2159
            continue;
2160
        }
2161
        ret = av_read_frame(ic, pkt);
2162
        if (ret < 0) {
2163
            if (ret == AVERROR_EOF)
2164
                eof=1;
2165
            if (url_ferror(ic->pb))
2166
                break;
2167
            SDL_Delay(100); /* wait for user event */
2168
            continue;
2169
        }
2170
        if (pkt->stream_index == is->audio_stream) {
2171
            packet_queue_put(&is->audioq, pkt);
2172
        } else if (pkt->stream_index == is->video_stream) {
2173
            packet_queue_put(&is->videoq, pkt);
2174
        } else if (pkt->stream_index == is->subtitle_stream) {
2175
            packet_queue_put(&is->subtitleq, pkt);
2176
        } else {
2177
            av_free_packet(pkt);
2178
        }
2179
    }
2180
    /* wait until the end */
2181
    while (!is->abort_request) {
2182
        SDL_Delay(100);
2183
    }
2184

    
2185
    ret = 0;
2186
 fail:
2187
    /* disable interrupting */
2188
    global_video_state = NULL;
2189

    
2190
    /* close each stream */
2191
    if (is->audio_stream >= 0)
2192
        stream_component_close(is, is->audio_stream);
2193
    if (is->video_stream >= 0)
2194
        stream_component_close(is, is->video_stream);
2195
    if (is->subtitle_stream >= 0)
2196
        stream_component_close(is, is->subtitle_stream);
2197
    if (is->ic) {
2198
        av_close_input_file(is->ic);
2199
        is->ic = NULL; /* safety */
2200
    }
2201
    url_set_interrupt_cb(NULL);
2202

    
2203
    if (ret != 0) {
2204
        SDL_Event event;
2205

    
2206
        event.type = FF_QUIT_EVENT;
2207
        event.user.data1 = is;
2208
        SDL_PushEvent(&event);
2209
    }
2210
    return 0;
2211
}
2212

    
2213
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2214
{
2215
    VideoState *is;
2216

    
2217
    is = av_mallocz(sizeof(VideoState));
2218
    if (!is)
2219
        return NULL;
2220
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2221
    is->iformat = iformat;
2222
    is->ytop = 0;
2223
    is->xleft = 0;
2224

    
2225
    /* start video display */
2226
    is->pictq_mutex = SDL_CreateMutex();
2227
    is->pictq_cond = SDL_CreateCond();
2228

    
2229
    is->subpq_mutex = SDL_CreateMutex();
2230
    is->subpq_cond = SDL_CreateCond();
2231

    
2232
    is->av_sync_type = av_sync_type;
2233
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2234
    if (!is->parse_tid) {
2235
        av_free(is);
2236
        return NULL;
2237
    }
2238
    return is;
2239
}
2240

    
2241
static void stream_close(VideoState *is)
2242
{
2243
    VideoPicture *vp;
2244
    int i;
2245
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2246
    is->abort_request = 1;
2247
    SDL_WaitThread(is->parse_tid, NULL);
2248

    
2249
    /* free all pictures */
2250
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2251
        vp = &is->pictq[i];
2252
        if (vp->bmp) {
2253
            SDL_FreeYUVOverlay(vp->bmp);
2254
            vp->bmp = NULL;
2255
        }
2256
    }
2257
    SDL_DestroyMutex(is->pictq_mutex);
2258
    SDL_DestroyCond(is->pictq_cond);
2259
    SDL_DestroyMutex(is->subpq_mutex);
2260
    SDL_DestroyCond(is->subpq_cond);
2261
    if (is->img_convert_ctx)
2262
        sws_freeContext(is->img_convert_ctx);
2263
    av_free(is);
2264
}
2265

    
2266
static void stream_cycle_channel(VideoState *is, int codec_type)
2267
{
2268
    AVFormatContext *ic = is->ic;
2269
    int start_index, stream_index;
2270
    AVStream *st;
2271

    
2272
    if (codec_type == CODEC_TYPE_VIDEO)
2273
        start_index = is->video_stream;
2274
    else if (codec_type == CODEC_TYPE_AUDIO)
2275
        start_index = is->audio_stream;
2276
    else
2277
        start_index = is->subtitle_stream;
2278
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2279
        return;
2280
    stream_index = start_index;
2281
    for(;;) {
2282
        if (++stream_index >= is->ic->nb_streams)
2283
        {
2284
            if (codec_type == CODEC_TYPE_SUBTITLE)
2285
            {
2286
                stream_index = -1;
2287
                goto the_end;
2288
            } else
2289
                stream_index = 0;
2290
        }
2291
        if (stream_index == start_index)
2292
            return;
2293
        st = ic->streams[stream_index];
2294
        if (st->codec->codec_type == codec_type) {
2295
            /* check that parameters are OK */
2296
            switch(codec_type) {
2297
            case CODEC_TYPE_AUDIO:
2298
                if (st->codec->sample_rate != 0 &&
2299
                    st->codec->channels != 0)
2300
                    goto the_end;
2301
                break;
2302
            case CODEC_TYPE_VIDEO:
2303
            case CODEC_TYPE_SUBTITLE:
2304
                goto the_end;
2305
            default:
2306
                break;
2307
            }
2308
        }
2309
    }
2310
 the_end:
2311
    stream_component_close(is, start_index);
2312
    stream_component_open(is, stream_index);
2313
}
2314

    
2315

    
2316
static void toggle_full_screen(void)
2317
{
2318
    is_full_screen = !is_full_screen;
2319
    if (!fs_screen_width) {
2320
        /* use default SDL method */
2321
//        SDL_WM_ToggleFullScreen(screen);
2322
    }
2323
    video_open(cur_stream);
2324
}
2325

    
2326
static void toggle_pause(void)
2327
{
2328
    if (cur_stream)
2329
        stream_pause(cur_stream);
2330
    step = 0;
2331
}
2332

    
2333
static void step_to_next_frame(void)
2334
{
2335
    if (cur_stream) {
2336
        /* if the stream is paused unpause it, then step */
2337
        if (cur_stream->paused)
2338
            stream_pause(cur_stream);
2339
    }
2340
    step = 1;
2341
}
2342

    
2343
static void do_exit(void)
2344
{
2345
    int i;
2346
    if (cur_stream) {
2347
        stream_close(cur_stream);
2348
        cur_stream = NULL;
2349
    }
2350
    for (i = 0; i < CODEC_TYPE_NB; i++)
2351
        av_free(avcodec_opts[i]);
2352
    av_free(avformat_opts);
2353
    av_free(sws_opts);
2354
    if (show_status)
2355
        printf("\n");
2356
    SDL_Quit();
2357
    exit(0);
2358
}
2359

    
2360
static void toggle_audio_display(void)
2361
{
2362
    if (cur_stream) {
2363
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2364
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2365
        fill_rectangle(screen,
2366
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2367
                    bgcolor);
2368
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2369
    }
2370
}
2371

    
2372
/* handle an event sent by the GUI */
2373
static void event_loop(void)
2374
{
2375
    SDL_Event event;
2376
    double incr, pos, frac;
2377

    
2378
    for(;;) {
2379
        double x;
2380
        SDL_WaitEvent(&event);
2381
        switch(event.type) {
2382
        case SDL_KEYDOWN:
2383
            switch(event.key.keysym.sym) {
2384
            case SDLK_ESCAPE:
2385
            case SDLK_q:
2386
                do_exit();
2387
                break;
2388
            case SDLK_f:
2389
                toggle_full_screen();
2390
                break;
2391
            case SDLK_p:
2392
            case SDLK_SPACE:
2393
                toggle_pause();
2394
                break;
2395
            case SDLK_s: //S: Step to next frame
2396
                step_to_next_frame();
2397
                break;
2398
            case SDLK_a:
2399
                if (cur_stream)
2400
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2401
                break;
2402
            case SDLK_v:
2403
                if (cur_stream)
2404
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2405
                break;
2406
            case SDLK_t:
2407
                if (cur_stream)
2408
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2409
                break;
2410
            case SDLK_w:
2411
                toggle_audio_display();
2412
                break;
2413
            case SDLK_LEFT:
2414
                incr = -10.0;
2415
                goto do_seek;
2416
            case SDLK_RIGHT:
2417
                incr = 10.0;
2418
                goto do_seek;
2419
            case SDLK_UP:
2420
                incr = 60.0;
2421
                goto do_seek;
2422
            case SDLK_DOWN:
2423
                incr = -60.0;
2424
            do_seek:
2425
                if (cur_stream) {
2426
                    if (seek_by_bytes) {
2427
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2428
                            pos= cur_stream->video_current_pos;
2429
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2430
                            pos= cur_stream->audio_pkt.pos;
2431
                        }else
2432
                            pos = url_ftell(cur_stream->ic->pb);
2433
                        if (cur_stream->ic->bit_rate)
2434
                            incr *= cur_stream->ic->bit_rate / 8.0;
2435
                        else
2436
                            incr *= 180000.0;
2437
                        pos += incr;
2438
                        stream_seek(cur_stream, pos, incr, 1);
2439
                    } else {
2440
                        pos = get_master_clock(cur_stream);
2441
                        pos += incr;
2442
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2443
                    }
2444
                }
2445
                break;
2446
            default:
2447
                break;
2448
            }
2449
            break;
2450
        case SDL_MOUSEBUTTONDOWN:
2451
        case SDL_MOUSEMOTION:
2452
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2453
                x= event.button.x;
2454
            }else{
2455
                if(event.motion.state != SDL_PRESSED)
2456
                    break;
2457
                x= event.motion.x;
2458
            }
2459
            if (cur_stream) {
2460
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2461
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2462
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2463
                }else{
2464
                    int64_t ts;
2465
                    int ns, hh, mm, ss;
2466
                    int tns, thh, tmm, tss;
2467
                    tns = cur_stream->ic->duration/1000000LL;
2468
                    thh = tns/3600;
2469
                    tmm = (tns%3600)/60;
2470
                    tss = (tns%60);
2471
                    frac = x/cur_stream->width;
2472
                    ns = frac*tns;
2473
                    hh = ns/3600;
2474
                    mm = (ns%3600)/60;
2475
                    ss = (ns%60);
2476
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2477
                            hh, mm, ss, thh, tmm, tss);
2478
                    ts = frac*cur_stream->ic->duration;
2479
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2480
                        ts += cur_stream->ic->start_time;
2481
                    stream_seek(cur_stream, ts, 0, 0);
2482
                }
2483
            }
2484
            break;
2485
        case SDL_VIDEORESIZE:
2486
            if (cur_stream) {
2487
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2488
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2489
                screen_width = cur_stream->width = event.resize.w;
2490
                screen_height= cur_stream->height= event.resize.h;
2491
            }
2492
            break;
2493
        case SDL_QUIT:
2494
        case FF_QUIT_EVENT:
2495
            do_exit();
2496
            break;
2497
        case FF_ALLOC_EVENT:
2498
            video_open(event.user.data1);
2499
            alloc_picture(event.user.data1);
2500
            break;
2501
        case FF_REFRESH_EVENT:
2502
            video_refresh_timer(event.user.data1);
2503
            break;
2504
        default:
2505
            break;
2506
        }
2507
    }
2508
}
2509

    
2510
static void opt_frame_size(const char *arg)
2511
{
2512
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2513
        fprintf(stderr, "Incorrect frame size\n");
2514
        exit(1);
2515
    }
2516
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2517
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2518
        exit(1);
2519
    }
2520
}
2521

    
2522
static int opt_width(const char *opt, const char *arg)
2523
{
2524
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2525
    return 0;
2526
}
2527

    
2528
static int opt_height(const char *opt, const char *arg)
2529
{
2530
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2531
    return 0;
2532
}
2533

    
2534
static void opt_format(const char *arg)
2535
{
2536
    file_iformat = av_find_input_format(arg);
2537
    if (!file_iformat) {
2538
        fprintf(stderr, "Unknown input format: %s\n", arg);
2539
        exit(1);
2540
    }
2541
}
2542

    
2543
static void opt_frame_pix_fmt(const char *arg)
2544
{
2545
    frame_pix_fmt = av_get_pix_fmt(arg);
2546
}
2547

    
2548
static int opt_sync(const char *opt, const char *arg)
2549
{
2550
    if (!strcmp(arg, "audio"))
2551
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2552
    else if (!strcmp(arg, "video"))
2553
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2554
    else if (!strcmp(arg, "ext"))
2555
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2556
    else {
2557
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2558
        exit(1);
2559
    }
2560
    return 0;
2561
}
2562

    
2563
static int opt_seek(const char *opt, const char *arg)
2564
{
2565
    start_time = parse_time_or_die(opt, arg, 1);
2566
    return 0;
2567
}
2568

    
2569
static int opt_debug(const char *opt, const char *arg)
2570
{
2571
    av_log_set_level(99);
2572
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2573
    return 0;
2574
}
2575

    
2576
static int opt_vismv(const char *opt, const char *arg)
2577
{
2578
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2579
    return 0;
2580
}
2581

    
2582
static int opt_thread_count(const char *opt, const char *arg)
2583
{
2584
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2585
#if !HAVE_THREADS
2586
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2587
#endif
2588
    return 0;
2589
}
2590

    
2591
static const OptionDef options[] = {
2592
#include "cmdutils_common_opts.h"
2593
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2594
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2595
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2596
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2597
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2598
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2599
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "select desired audio stream", "stream_number" },
2600
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "select desired video stream", "stream_number" },
2601
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_subtitle_stream}, "select desired subtitle stream", "stream_number" },
2602
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2603
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto" },
2604
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2605
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2606
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2607
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2608
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2609
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2610
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2611
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2612
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2613
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2614
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2615
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2616
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2617
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2618
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2619
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2620
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2621
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2622
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2623
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2624
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2625
    { NULL, },
2626
};
2627

    
2628
static void show_usage(void)
2629
{
2630
    printf("Simple media player\n");
2631
    printf("usage: ffplay [options] input_file\n");
2632
    printf("\n");
2633
}
2634

    
2635
static void show_help(void)
2636
{
2637
    show_usage();
2638
    show_help_options(options, "Main options:\n",
2639
                      OPT_EXPERT, 0);
2640
    show_help_options(options, "\nAdvanced options:\n",
2641
                      OPT_EXPERT, OPT_EXPERT);
2642
    printf("\nWhile playing:\n"
2643
           "q, ESC              quit\n"
2644
           "f                   toggle full screen\n"
2645
           "p, SPC              pause\n"
2646
           "a                   cycle audio channel\n"
2647
           "v                   cycle video channel\n"
2648
           "t                   cycle subtitle channel\n"
2649
           "w                   show audio waves\n"
2650
           "left/right          seek backward/forward 10 seconds\n"
2651
           "down/up             seek backward/forward 1 minute\n"
2652
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2653
           );
2654
}
2655

    
2656
static void opt_input_file(const char *filename)
2657
{
2658
    if (!strcmp(filename, "-"))
2659
        filename = "pipe:";
2660
    input_filename = filename;
2661
}
2662

    
2663
/* Called from the main */
2664
int main(int argc, char **argv)
2665
{
2666
    int flags, i;
2667

    
2668
    /* register all codecs, demux and protocols */
2669
    avcodec_register_all();
2670
    avdevice_register_all();
2671
    av_register_all();
2672

    
2673
    for(i=0; i<CODEC_TYPE_NB; i++){
2674
        avcodec_opts[i]= avcodec_alloc_context2(i);
2675
    }
2676
    avformat_opts = avformat_alloc_context();
2677
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2678

    
2679
    show_banner();
2680

    
2681
    parse_options(argc, argv, options, opt_input_file);
2682

    
2683
    if (!input_filename) {
2684
        show_usage();
2685
        fprintf(stderr, "An input file must be specified\n");
2686
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2687
        exit(1);
2688
    }
2689

    
2690
    if (display_disable) {
2691
        video_disable = 1;
2692
    }
2693
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2694
#if !defined(__MINGW32__) && !defined(__APPLE__)
2695
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2696
#endif
2697
    if (SDL_Init (flags)) {
2698
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2699
        exit(1);
2700
    }
2701

    
2702
    if (!display_disable) {
2703
#if HAVE_SDL_VIDEO_SIZE
2704
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2705
        fs_screen_width = vi->current_w;
2706
        fs_screen_height = vi->current_h;
2707
#endif
2708
    }
2709

    
2710
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2711
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2712
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2713

    
2714
    av_init_packet(&flush_pkt);
2715
    flush_pkt.data= "FLUSH";
2716

    
2717
    cur_stream = stream_open(input_filename, file_iformat);
2718

    
2719
    event_loop();
2720

    
2721
    /* never returns */
2722

    
2723
    return 0;
2724
}