Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 9f7490a0

History | View | Annotate | Download (85.2 KB)

1
/*
2
 * FFplay : Simple Media Player based on the FFmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21

    
22
#include "config.h"
23
#include <math.h>
24
#include <limits.h>
25
#include "libavutil/avstring.h"
26
#include "libavutil/pixdesc.h"
27
#include "libavformat/avformat.h"
28
#include "libavdevice/avdevice.h"
29
#include "libswscale/swscale.h"
30
#include "libavcodec/audioconvert.h"
31
#include "libavcodec/colorspace.h"
32
#include "libavcodec/opt.h"
33
#include "libavcodec/dsputil.h"
34

    
35
#include "cmdutils.h"
36

    
37
#include <SDL.h>
38
#include <SDL_thread.h>
39

    
40
#ifdef __MINGW32__
41
#undef main /* We don't want SDL to override our main() */
42
#endif
43

    
44
#undef exit
45
#undef printf
46
#undef fprintf
47

    
48
const char program_name[] = "FFplay";
49
const int program_birth_year = 2003;
50

    
51
//#define DEBUG_SYNC
52

    
53
#define MAX_QUEUE_SIZE (15 * 1024 * 1024)
54
#define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
55
#define MIN_FRAMES 5
56

    
57
/* SDL audio buffer size, in samples. Should be small to have precise
58
   A/V sync as SDL does not have hardware buffer fullness info. */
59
#define SDL_AUDIO_BUFFER_SIZE 1024
60

    
61
/* no AV sync correction is done if below the AV sync threshold */
62
#define AV_SYNC_THRESHOLD 0.01
63
/* no AV correction is done if too big error */
64
#define AV_NOSYNC_THRESHOLD 10.0
65

    
66
/* maximum audio speed change to get correct sync */
67
#define SAMPLE_CORRECTION_PERCENT_MAX 10
68

    
69
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
70
#define AUDIO_DIFF_AVG_NB   20
71

    
72
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
73
#define SAMPLE_ARRAY_SIZE (2*65536)
74

    
75
static int sws_flags = SWS_BICUBIC;
76

    
77
typedef struct PacketQueue {
78
    AVPacketList *first_pkt, *last_pkt;
79
    int nb_packets;
80
    int size;
81
    int abort_request;
82
    SDL_mutex *mutex;
83
    SDL_cond *cond;
84
} PacketQueue;
85

    
86
#define VIDEO_PICTURE_QUEUE_SIZE 1
87
#define SUBPICTURE_QUEUE_SIZE 4
88

    
89
typedef struct VideoPicture {
90
    double pts;                                  ///<presentation time stamp for this picture
91
    int64_t pos;                                 ///<byte position in file
92
    SDL_Overlay *bmp;
93
    int width, height; /* source height & width */
94
    int allocated;
95
    SDL_TimerID timer_id;
96
} VideoPicture;
97

    
98
typedef struct SubPicture {
99
    double pts; /* presentation time stamp for this picture */
100
    AVSubtitle sub;
101
} SubPicture;
102

    
103
enum {
104
    AV_SYNC_AUDIO_MASTER, /* default choice */
105
    AV_SYNC_VIDEO_MASTER,
106
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
107
};
108

    
109
typedef struct VideoState {
110
    SDL_Thread *parse_tid;
111
    SDL_Thread *video_tid;
112
    AVInputFormat *iformat;
113
    int no_background;
114
    int abort_request;
115
    int paused;
116
    int last_paused;
117
    int seek_req;
118
    int seek_flags;
119
    int64_t seek_pos;
120
    int64_t seek_rel;
121
    int read_pause_return;
122
    AVFormatContext *ic;
123
    int dtg_active_format;
124

    
125
    int audio_stream;
126

    
127
    int av_sync_type;
128
    double external_clock; /* external clock base */
129
    int64_t external_clock_time;
130

    
131
    double audio_clock;
132
    double audio_diff_cum; /* used for AV difference average computation */
133
    double audio_diff_avg_coef;
134
    double audio_diff_threshold;
135
    int audio_diff_avg_count;
136
    AVStream *audio_st;
137
    PacketQueue audioq;
138
    int audio_hw_buf_size;
139
    /* samples output by the codec. we reserve more space for avsync
140
       compensation */
141
    DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
142
    DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
143
    uint8_t *audio_buf;
144
    unsigned int audio_buf_size; /* in bytes */
145
    int audio_buf_index; /* in bytes */
146
    AVPacket audio_pkt_temp;
147
    AVPacket audio_pkt;
148
    enum SampleFormat audio_src_fmt;
149
    AVAudioConvert *reformat_ctx;
150

    
151
    int show_audio; /* if true, display audio samples */
152
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
153
    int sample_array_index;
154
    int last_i_start;
155
    RDFTContext rdft;
156
    int rdft_bits;
157
    int xpos;
158

    
159
    SDL_Thread *subtitle_tid;
160
    int subtitle_stream;
161
    int subtitle_stream_changed;
162
    AVStream *subtitle_st;
163
    PacketQueue subtitleq;
164
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
165
    int subpq_size, subpq_rindex, subpq_windex;
166
    SDL_mutex *subpq_mutex;
167
    SDL_cond *subpq_cond;
168

    
169
    double frame_timer;
170
    double frame_last_pts;
171
    double frame_last_delay;
172
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
173
    int video_stream;
174
    AVStream *video_st;
175
    PacketQueue videoq;
176
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
177
    double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
178
    int64_t video_current_pos;                   ///<current displayed file pos
179
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
180
    int pictq_size, pictq_rindex, pictq_windex;
181
    SDL_mutex *pictq_mutex;
182
    SDL_cond *pictq_cond;
183
    struct SwsContext *img_convert_ctx;
184

    
185
    //    QETimer *video_timer;
186
    char filename[1024];
187
    int width, height, xleft, ytop;
188

    
189
    int64_t faulty_pts;
190
    int64_t faulty_dts;
191
    int64_t last_dts_for_fault_detection;
192
    int64_t last_pts_for_fault_detection;
193

    
194
} VideoState;
195

    
196
static void show_help(void);
197
static int audio_write_get_buf_size(VideoState *is);
198

    
199
/* options specified by the user */
200
static AVInputFormat *file_iformat;
201
static const char *input_filename;
202
static int fs_screen_width;
203
static int fs_screen_height;
204
static int screen_width = 0;
205
static int screen_height = 0;
206
static int frame_width = 0;
207
static int frame_height = 0;
208
static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
209
static int audio_disable;
210
static int video_disable;
211
static int wanted_stream[CODEC_TYPE_NB]={
212
    [CODEC_TYPE_AUDIO]=-1,
213
    [CODEC_TYPE_VIDEO]=-1,
214
    [CODEC_TYPE_SUBTITLE]=-1,
215
};
216
static int seek_by_bytes=-1;
217
static int display_disable;
218
static int show_status = 1;
219
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
220
static int64_t start_time = AV_NOPTS_VALUE;
221
static int debug = 0;
222
static int debug_mv = 0;
223
static int step = 0;
224
static int thread_count = 1;
225
static int workaround_bugs = 1;
226
static int fast = 0;
227
static int genpts = 0;
228
static int lowres = 0;
229
static int idct = FF_IDCT_AUTO;
230
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
231
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
232
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
233
static int error_recognition = FF_ER_CAREFUL;
234
static int error_concealment = 3;
235
static int decoder_reorder_pts= -1;
236
static int autoexit;
237

    
238
/* current context */
239
static int is_full_screen;
240
static VideoState *cur_stream;
241
static int64_t audio_callback_time;
242

    
243
static AVPacket flush_pkt;
244

    
245
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
246
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
247
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
248

    
249
static SDL_Surface *screen;
250

    
251
static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
252

    
253
/* packet queue handling */
254
static void packet_queue_init(PacketQueue *q)
255
{
256
    memset(q, 0, sizeof(PacketQueue));
257
    q->mutex = SDL_CreateMutex();
258
    q->cond = SDL_CreateCond();
259
    packet_queue_put(q, &flush_pkt);
260
}
261

    
262
static void packet_queue_flush(PacketQueue *q)
263
{
264
    AVPacketList *pkt, *pkt1;
265

    
266
    SDL_LockMutex(q->mutex);
267
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
268
        pkt1 = pkt->next;
269
        av_free_packet(&pkt->pkt);
270
        av_freep(&pkt);
271
    }
272
    q->last_pkt = NULL;
273
    q->first_pkt = NULL;
274
    q->nb_packets = 0;
275
    q->size = 0;
276
    SDL_UnlockMutex(q->mutex);
277
}
278

    
279
static void packet_queue_end(PacketQueue *q)
280
{
281
    packet_queue_flush(q);
282
    SDL_DestroyMutex(q->mutex);
283
    SDL_DestroyCond(q->cond);
284
}
285

    
286
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
287
{
288
    AVPacketList *pkt1;
289

    
290
    /* duplicate the packet */
291
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
292
        return -1;
293

    
294
    pkt1 = av_malloc(sizeof(AVPacketList));
295
    if (!pkt1)
296
        return -1;
297
    pkt1->pkt = *pkt;
298
    pkt1->next = NULL;
299

    
300

    
301
    SDL_LockMutex(q->mutex);
302

    
303
    if (!q->last_pkt)
304

    
305
        q->first_pkt = pkt1;
306
    else
307
        q->last_pkt->next = pkt1;
308
    q->last_pkt = pkt1;
309
    q->nb_packets++;
310
    q->size += pkt1->pkt.size + sizeof(*pkt1);
311
    /* XXX: should duplicate packet data in DV case */
312
    SDL_CondSignal(q->cond);
313

    
314
    SDL_UnlockMutex(q->mutex);
315
    return 0;
316
}
317

    
318
static void packet_queue_abort(PacketQueue *q)
319
{
320
    SDL_LockMutex(q->mutex);
321

    
322
    q->abort_request = 1;
323

    
324
    SDL_CondSignal(q->cond);
325

    
326
    SDL_UnlockMutex(q->mutex);
327
}
328

    
329
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
330
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
331
{
332
    AVPacketList *pkt1;
333
    int ret;
334

    
335
    SDL_LockMutex(q->mutex);
336

    
337
    for(;;) {
338
        if (q->abort_request) {
339
            ret = -1;
340
            break;
341
        }
342

    
343
        pkt1 = q->first_pkt;
344
        if (pkt1) {
345
            q->first_pkt = pkt1->next;
346
            if (!q->first_pkt)
347
                q->last_pkt = NULL;
348
            q->nb_packets--;
349
            q->size -= pkt1->pkt.size + sizeof(*pkt1);
350
            *pkt = pkt1->pkt;
351
            av_free(pkt1);
352
            ret = 1;
353
            break;
354
        } else if (!block) {
355
            ret = 0;
356
            break;
357
        } else {
358
            SDL_CondWait(q->cond, q->mutex);
359
        }
360
    }
361
    SDL_UnlockMutex(q->mutex);
362
    return ret;
363
}
364

    
365
static inline void fill_rectangle(SDL_Surface *screen,
366
                                  int x, int y, int w, int h, int color)
367
{
368
    SDL_Rect rect;
369
    rect.x = x;
370
    rect.y = y;
371
    rect.w = w;
372
    rect.h = h;
373
    SDL_FillRect(screen, &rect, color);
374
}
375

    
376
#if 0
377
/* draw only the border of a rectangle */
378
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
379
{
380
    int w1, w2, h1, h2;
381

382
    /* fill the background */
383
    w1 = x;
384
    if (w1 < 0)
385
        w1 = 0;
386
    w2 = s->width - (x + w);
387
    if (w2 < 0)
388
        w2 = 0;
389
    h1 = y;
390
    if (h1 < 0)
391
        h1 = 0;
392
    h2 = s->height - (y + h);
393
    if (h2 < 0)
394
        h2 = 0;
395
    fill_rectangle(screen,
396
                   s->xleft, s->ytop,
397
                   w1, s->height,
398
                   color);
399
    fill_rectangle(screen,
400
                   s->xleft + s->width - w2, s->ytop,
401
                   w2, s->height,
402
                   color);
403
    fill_rectangle(screen,
404
                   s->xleft + w1, s->ytop,
405
                   s->width - w1 - w2, h1,
406
                   color);
407
    fill_rectangle(screen,
408
                   s->xleft + w1, s->ytop + s->height - h2,
409
                   s->width - w1 - w2, h2,
410
                   color);
411
}
412
#endif
413

    
414
#define ALPHA_BLEND(a, oldp, newp, s)\
415
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
416

    
417
#define RGBA_IN(r, g, b, a, s)\
418
{\
419
    unsigned int v = ((const uint32_t *)(s))[0];\
420
    a = (v >> 24) & 0xff;\
421
    r = (v >> 16) & 0xff;\
422
    g = (v >> 8) & 0xff;\
423
    b = v & 0xff;\
424
}
425

    
426
#define YUVA_IN(y, u, v, a, s, pal)\
427
{\
428
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
429
    a = (val >> 24) & 0xff;\
430
    y = (val >> 16) & 0xff;\
431
    u = (val >> 8) & 0xff;\
432
    v = val & 0xff;\
433
}
434

    
435
#define YUVA_OUT(d, y, u, v, a)\
436
{\
437
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
438
}
439

    
440

    
441
#define BPP 1
442

    
443
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
444
{
445
    int wrap, wrap3, width2, skip2;
446
    int y, u, v, a, u1, v1, a1, w, h;
447
    uint8_t *lum, *cb, *cr;
448
    const uint8_t *p;
449
    const uint32_t *pal;
450
    int dstx, dsty, dstw, dsth;
451

    
452
    dstw = av_clip(rect->w, 0, imgw);
453
    dsth = av_clip(rect->h, 0, imgh);
454
    dstx = av_clip(rect->x, 0, imgw - dstw);
455
    dsty = av_clip(rect->y, 0, imgh - dsth);
456
    lum = dst->data[0] + dsty * dst->linesize[0];
457
    cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
458
    cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
459

    
460
    width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
461
    skip2 = dstx >> 1;
462
    wrap = dst->linesize[0];
463
    wrap3 = rect->pict.linesize[0];
464
    p = rect->pict.data[0];
465
    pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
466

    
467
    if (dsty & 1) {
468
        lum += dstx;
469
        cb += skip2;
470
        cr += skip2;
471

    
472
        if (dstx & 1) {
473
            YUVA_IN(y, u, v, a, p, pal);
474
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
476
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
477
            cb++;
478
            cr++;
479
            lum++;
480
            p += BPP;
481
        }
482
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
483
            YUVA_IN(y, u, v, a, p, pal);
484
            u1 = u;
485
            v1 = v;
486
            a1 = a;
487
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
488

    
489
            YUVA_IN(y, u, v, a, p + BPP, pal);
490
            u1 += u;
491
            v1 += v;
492
            a1 += a;
493
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
494
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
495
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
496
            cb++;
497
            cr++;
498
            p += 2 * BPP;
499
            lum += 2;
500
        }
501
        if (w) {
502
            YUVA_IN(y, u, v, a, p, pal);
503
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
505
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
506
            p++;
507
            lum++;
508
        }
509
        p += wrap3 - dstw * BPP;
510
        lum += wrap - dstw - dstx;
511
        cb += dst->linesize[1] - width2 - skip2;
512
        cr += dst->linesize[2] - width2 - skip2;
513
    }
514
    for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
515
        lum += dstx;
516
        cb += skip2;
517
        cr += skip2;
518

    
519
        if (dstx & 1) {
520
            YUVA_IN(y, u, v, a, p, pal);
521
            u1 = u;
522
            v1 = v;
523
            a1 = a;
524
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
525
            p += wrap3;
526
            lum += wrap;
527
            YUVA_IN(y, u, v, a, p, pal);
528
            u1 += u;
529
            v1 += v;
530
            a1 += a;
531
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
533
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
534
            cb++;
535
            cr++;
536
            p += -wrap3 + BPP;
537
            lum += -wrap + 1;
538
        }
539
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
540
            YUVA_IN(y, u, v, a, p, pal);
541
            u1 = u;
542
            v1 = v;
543
            a1 = a;
544
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545

    
546
            YUVA_IN(y, u, v, a, p + BPP, pal);
547
            u1 += u;
548
            v1 += v;
549
            a1 += a;
550
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551
            p += wrap3;
552
            lum += wrap;
553

    
554
            YUVA_IN(y, u, v, a, p, pal);
555
            u1 += u;
556
            v1 += v;
557
            a1 += a;
558
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
559

    
560
            YUVA_IN(y, u, v, a, p + BPP, pal);
561
            u1 += u;
562
            v1 += v;
563
            a1 += a;
564
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
565

    
566
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
567
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
568

    
569
            cb++;
570
            cr++;
571
            p += -wrap3 + 2 * BPP;
572
            lum += -wrap + 2;
573
        }
574
        if (w) {
575
            YUVA_IN(y, u, v, a, p, pal);
576
            u1 = u;
577
            v1 = v;
578
            a1 = a;
579
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
580
            p += wrap3;
581
            lum += wrap;
582
            YUVA_IN(y, u, v, a, p, pal);
583
            u1 += u;
584
            v1 += v;
585
            a1 += a;
586
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
587
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
588
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
589
            cb++;
590
            cr++;
591
            p += -wrap3 + BPP;
592
            lum += -wrap + 1;
593
        }
594
        p += wrap3 + (wrap3 - dstw * BPP);
595
        lum += wrap + (wrap - dstw - dstx);
596
        cb += dst->linesize[1] - width2 - skip2;
597
        cr += dst->linesize[2] - width2 - skip2;
598
    }
599
    /* handle odd height */
600
    if (h) {
601
        lum += dstx;
602
        cb += skip2;
603
        cr += skip2;
604

    
605
        if (dstx & 1) {
606
            YUVA_IN(y, u, v, a, p, pal);
607
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
609
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
610
            cb++;
611
            cr++;
612
            lum++;
613
            p += BPP;
614
        }
615
        for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
616
            YUVA_IN(y, u, v, a, p, pal);
617
            u1 = u;
618
            v1 = v;
619
            a1 = a;
620
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621

    
622
            YUVA_IN(y, u, v, a, p + BPP, pal);
623
            u1 += u;
624
            v1 += v;
625
            a1 += a;
626
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
627
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
628
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
629
            cb++;
630
            cr++;
631
            p += 2 * BPP;
632
            lum += 2;
633
        }
634
        if (w) {
635
            YUVA_IN(y, u, v, a, p, pal);
636
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
637
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
638
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
639
        }
640
    }
641
}
642

    
643
static void free_subpicture(SubPicture *sp)
644
{
645
    int i;
646

    
647
    for (i = 0; i < sp->sub.num_rects; i++)
648
    {
649
        av_freep(&sp->sub.rects[i]->pict.data[0]);
650
        av_freep(&sp->sub.rects[i]->pict.data[1]);
651
        av_freep(&sp->sub.rects[i]);
652
    }
653

    
654
    av_free(sp->sub.rects);
655

    
656
    memset(&sp->sub, 0, sizeof(AVSubtitle));
657
}
658

    
659
static void video_image_display(VideoState *is)
660
{
661
    VideoPicture *vp;
662
    SubPicture *sp;
663
    AVPicture pict;
664
    float aspect_ratio;
665
    int width, height, x, y;
666
    SDL_Rect rect;
667
    int i;
668

    
669
    vp = &is->pictq[is->pictq_rindex];
670
    if (vp->bmp) {
671
        /* XXX: use variable in the frame */
672
        if (is->video_st->sample_aspect_ratio.num)
673
            aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
674
        else if (is->video_st->codec->sample_aspect_ratio.num)
675
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
676
        else
677
            aspect_ratio = 0;
678
        if (aspect_ratio <= 0.0)
679
            aspect_ratio = 1.0;
680
        aspect_ratio *= (float)is->video_st->codec->width / is->video_st->codec->height;
681
        /* if an active format is indicated, then it overrides the
682
           mpeg format */
683
#if 0
684
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
685
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
686
            printf("dtg_active_format=%d\n", is->dtg_active_format);
687
        }
688
#endif
689
#if 0
690
        switch(is->video_st->codec->dtg_active_format) {
691
        case FF_DTG_AFD_SAME:
692
        default:
693
            /* nothing to do */
694
            break;
695
        case FF_DTG_AFD_4_3:
696
            aspect_ratio = 4.0 / 3.0;
697
            break;
698
        case FF_DTG_AFD_16_9:
699
            aspect_ratio = 16.0 / 9.0;
700
            break;
701
        case FF_DTG_AFD_14_9:
702
            aspect_ratio = 14.0 / 9.0;
703
            break;
704
        case FF_DTG_AFD_4_3_SP_14_9:
705
            aspect_ratio = 14.0 / 9.0;
706
            break;
707
        case FF_DTG_AFD_16_9_SP_14_9:
708
            aspect_ratio = 14.0 / 9.0;
709
            break;
710
        case FF_DTG_AFD_SP_4_3:
711
            aspect_ratio = 4.0 / 3.0;
712
            break;
713
        }
714
#endif
715

    
716
        if (is->subtitle_st)
717
        {
718
            if (is->subpq_size > 0)
719
            {
720
                sp = &is->subpq[is->subpq_rindex];
721

    
722
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
723
                {
724
                    SDL_LockYUVOverlay (vp->bmp);
725

    
726
                    pict.data[0] = vp->bmp->pixels[0];
727
                    pict.data[1] = vp->bmp->pixels[2];
728
                    pict.data[2] = vp->bmp->pixels[1];
729

    
730
                    pict.linesize[0] = vp->bmp->pitches[0];
731
                    pict.linesize[1] = vp->bmp->pitches[2];
732
                    pict.linesize[2] = vp->bmp->pitches[1];
733

    
734
                    for (i = 0; i < sp->sub.num_rects; i++)
735
                        blend_subrect(&pict, sp->sub.rects[i],
736
                                      vp->bmp->w, vp->bmp->h);
737

    
738
                    SDL_UnlockYUVOverlay (vp->bmp);
739
                }
740
            }
741
        }
742

    
743

    
744
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
745
        height = is->height;
746
        width = ((int)rint(height * aspect_ratio)) & ~1;
747
        if (width > is->width) {
748
            width = is->width;
749
            height = ((int)rint(width / aspect_ratio)) & ~1;
750
        }
751
        x = (is->width - width) / 2;
752
        y = (is->height - height) / 2;
753
        if (!is->no_background) {
754
            /* fill the background */
755
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
756
        } else {
757
            is->no_background = 0;
758
        }
759
        rect.x = is->xleft + x;
760
        rect.y = is->ytop  + y;
761
        rect.w = width;
762
        rect.h = height;
763
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
764
    } else {
765
#if 0
766
        fill_rectangle(screen,
767
                       is->xleft, is->ytop, is->width, is->height,
768
                       QERGB(0x00, 0x00, 0x00));
769
#endif
770
    }
771
}
772

    
773
static inline int compute_mod(int a, int b)
774
{
775
    a = a % b;
776
    if (a >= 0)
777
        return a;
778
    else
779
        return a + b;
780
}
781

    
782
static void video_audio_display(VideoState *s)
783
{
784
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
785
    int ch, channels, h, h2, bgcolor, fgcolor;
786
    int16_t time_diff;
787
    int rdft_bits, nb_freq;
788

    
789
    for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
790
        ;
791
    nb_freq= 1<<(rdft_bits-1);
792

    
793
    /* compute display index : center on currently output samples */
794
    channels = s->audio_st->codec->channels;
795
    nb_display_channels = channels;
796
    if (!s->paused) {
797
        int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
798
        n = 2 * channels;
799
        delay = audio_write_get_buf_size(s);
800
        delay /= n;
801

    
802
        /* to be more precise, we take into account the time spent since
803
           the last buffer computation */
804
        if (audio_callback_time) {
805
            time_diff = av_gettime() - audio_callback_time;
806
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
807
        }
808

    
809
        delay -= data_used / 2;
810
        if (delay < data_used)
811
            delay = data_used;
812

    
813
        i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
814
        if(s->show_audio==1){
815
            h= INT_MIN;
816
            for(i=0; i<1000; i+=channels){
817
                int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
818
                int a= s->sample_array[idx];
819
                int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
820
                int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
821
                int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
822
                int score= a-d;
823
                if(h<score && (b^c)<0){
824
                    h= score;
825
                    i_start= idx;
826
                }
827
            }
828
        }
829

    
830
        s->last_i_start = i_start;
831
    } else {
832
        i_start = s->last_i_start;
833
    }
834

    
835
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
836
    if(s->show_audio==1){
837
        fill_rectangle(screen,
838
                       s->xleft, s->ytop, s->width, s->height,
839
                       bgcolor);
840

    
841
        fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
842

    
843
        /* total height for one channel */
844
        h = s->height / nb_display_channels;
845
        /* graph height / 2 */
846
        h2 = (h * 9) / 20;
847
        for(ch = 0;ch < nb_display_channels; ch++) {
848
            i = i_start + ch;
849
            y1 = s->ytop + ch * h + (h / 2); /* position of center line */
850
            for(x = 0; x < s->width; x++) {
851
                y = (s->sample_array[i] * h2) >> 15;
852
                if (y < 0) {
853
                    y = -y;
854
                    ys = y1 - y;
855
                } else {
856
                    ys = y1;
857
                }
858
                fill_rectangle(screen,
859
                               s->xleft + x, ys, 1, y,
860
                               fgcolor);
861
                i += channels;
862
                if (i >= SAMPLE_ARRAY_SIZE)
863
                    i -= SAMPLE_ARRAY_SIZE;
864
            }
865
        }
866

    
867
        fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
868

    
869
        for(ch = 1;ch < nb_display_channels; ch++) {
870
            y = s->ytop + ch * h;
871
            fill_rectangle(screen,
872
                           s->xleft, y, s->width, 1,
873
                           fgcolor);
874
        }
875
        SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
876
    }else{
877
        nb_display_channels= FFMIN(nb_display_channels, 2);
878
        if(rdft_bits != s->rdft_bits){
879
            ff_rdft_end(&s->rdft);
880
            ff_rdft_init(&s->rdft, rdft_bits, RDFT);
881
            s->rdft_bits= rdft_bits;
882
        }
883
        {
884
            FFTSample data[2][2*nb_freq];
885
            for(ch = 0;ch < nb_display_channels; ch++) {
886
                i = i_start + ch;
887
                for(x = 0; x < 2*nb_freq; x++) {
888
                    double w= (x-nb_freq)*(1.0/nb_freq);
889
                    data[ch][x]= s->sample_array[i]*(1.0-w*w);
890
                    i += channels;
891
                    if (i >= SAMPLE_ARRAY_SIZE)
892
                        i -= SAMPLE_ARRAY_SIZE;
893
                }
894
                ff_rdft_calc(&s->rdft, data[ch]);
895
            }
896
            //least efficient way to do this, we should of course directly access it but its more than fast enough
897
            for(y=0; y<s->height; y++){
898
                double w= 1/sqrt(nb_freq);
899
                int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
900
                int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
901
                a= FFMIN(a,255);
902
                b= FFMIN(b,255);
903
                fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
904

    
905
                fill_rectangle(screen,
906
                            s->xpos, s->height-y, 1, 1,
907
                            fgcolor);
908
            }
909
        }
910
        SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
911
        s->xpos++;
912
        if(s->xpos >= s->width)
913
            s->xpos= s->xleft;
914
    }
915
}
916

    
917
static int video_open(VideoState *is){
918
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
919
    int w,h;
920

    
921
    if(is_full_screen) flags |= SDL_FULLSCREEN;
922
    else               flags |= SDL_RESIZABLE;
923

    
924
    if (is_full_screen && fs_screen_width) {
925
        w = fs_screen_width;
926
        h = fs_screen_height;
927
    } else if(!is_full_screen && screen_width){
928
        w = screen_width;
929
        h = screen_height;
930
    }else if (is->video_st && is->video_st->codec->width){
931
        w = is->video_st->codec->width;
932
        h = is->video_st->codec->height;
933
    } else {
934
        w = 640;
935
        h = 480;
936
    }
937
#ifndef __APPLE__
938
    screen = SDL_SetVideoMode(w, h, 0, flags);
939
#else
940
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
941
    screen = SDL_SetVideoMode(w, h, 24, flags);
942
#endif
943
    if (!screen) {
944
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
945
        return -1;
946
    }
947
    SDL_WM_SetCaption("FFplay", "FFplay");
948

    
949
    is->width = screen->w;
950
    is->height = screen->h;
951

    
952
    return 0;
953
}
954

    
955
/* display the current picture, if any */
956
static void video_display(VideoState *is)
957
{
958
    if(!screen)
959
        video_open(cur_stream);
960
    if (is->audio_st && is->show_audio)
961
        video_audio_display(is);
962
    else if (is->video_st)
963
        video_image_display(is);
964
}
965

    
966
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
967
{
968
    SDL_Event event;
969
    event.type = FF_REFRESH_EVENT;
970
    event.user.data1 = opaque;
971
    SDL_PushEvent(&event);
972
    return 0; /* 0 means stop timer */
973
}
974

    
975
/* schedule a video refresh in 'delay' ms */
976
static SDL_TimerID schedule_refresh(VideoState *is, int delay)
977
{
978
    if(!delay) delay=1; //SDL seems to be buggy when the delay is 0
979
    return SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
980
}
981

    
982
/* get the current audio clock value */
983
static double get_audio_clock(VideoState *is)
984
{
985
    double pts;
986
    int hw_buf_size, bytes_per_sec;
987
    pts = is->audio_clock;
988
    hw_buf_size = audio_write_get_buf_size(is);
989
    bytes_per_sec = 0;
990
    if (is->audio_st) {
991
        bytes_per_sec = is->audio_st->codec->sample_rate *
992
            2 * is->audio_st->codec->channels;
993
    }
994
    if (bytes_per_sec)
995
        pts -= (double)hw_buf_size / bytes_per_sec;
996
    return pts;
997
}
998

    
999
/* get the current video clock value */
1000
static double get_video_clock(VideoState *is)
1001
{
1002
    if (is->paused) {
1003
        return is->video_current_pts;
1004
    } else {
1005
        return is->video_current_pts_drift + av_gettime() / 1000000.0;
1006
    }
1007
}
1008

    
1009
/* get the current external clock value */
1010
static double get_external_clock(VideoState *is)
1011
{
1012
    int64_t ti;
1013
    ti = av_gettime();
1014
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1015
}
1016

    
1017
/* get the current master clock value */
1018
static double get_master_clock(VideoState *is)
1019
{
1020
    double val;
1021

    
1022
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1023
        if (is->video_st)
1024
            val = get_video_clock(is);
1025
        else
1026
            val = get_audio_clock(is);
1027
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1028
        if (is->audio_st)
1029
            val = get_audio_clock(is);
1030
        else
1031
            val = get_video_clock(is);
1032
    } else {
1033
        val = get_external_clock(is);
1034
    }
1035
    return val;
1036
}
1037

    
1038
/* seek in the stream */
1039
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1040
{
1041
    if (!is->seek_req) {
1042
        is->seek_pos = pos;
1043
        is->seek_rel = rel;
1044
        is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1045
        if (seek_by_bytes)
1046
            is->seek_flags |= AVSEEK_FLAG_BYTE;
1047
        is->seek_req = 1;
1048
    }
1049
}
1050

    
1051
/* pause or resume the video */
1052
static void stream_pause(VideoState *is)
1053
{
1054
    if (is->paused) {
1055
        is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1056
        if(is->read_pause_return != AVERROR(ENOSYS)){
1057
            is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1058
        }
1059
        is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1060
    }
1061
    is->paused = !is->paused;
1062
}
1063

    
1064
static double compute_frame_delay(double frame_current_pts, VideoState *is)
1065
{
1066
    double actual_delay, delay, sync_threshold, diff;
1067

    
1068
    /* compute nominal delay */
1069
    delay = frame_current_pts - is->frame_last_pts;
1070
    if (delay <= 0 || delay >= 10.0) {
1071
        /* if incorrect delay, use previous one */
1072
        delay = is->frame_last_delay;
1073
    } else {
1074
        is->frame_last_delay = delay;
1075
    }
1076
    is->frame_last_pts = frame_current_pts;
1077

    
1078
    /* update delay to follow master synchronisation source */
1079
    if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1080
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1081
        /* if video is slave, we try to correct big delays by
1082
           duplicating or deleting a frame */
1083
        diff = get_video_clock(is) - get_master_clock(is);
1084

    
1085
        /* skip or repeat frame. We take into account the
1086
           delay to compute the threshold. I still don't know
1087
           if it is the best guess */
1088
        sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1089
        if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1090
            if (diff <= -sync_threshold)
1091
                delay = 0;
1092
            else if (diff >= sync_threshold)
1093
                delay = 2 * delay;
1094
        }
1095
    }
1096

    
1097
    is->frame_timer += delay;
1098
    /* compute the REAL delay (we need to do that to avoid
1099
       long term errors */
1100
    actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1101
    if (actual_delay < 0.010) {
1102
        /* XXX: should skip picture */
1103
        actual_delay = 0.010;
1104
    }
1105

    
1106
#if defined(DEBUG_SYNC)
1107
    printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1108
            delay, actual_delay, frame_current_pts, -diff);
1109
#endif
1110

    
1111
    return actual_delay;
1112
}
1113

    
1114
/* called to display each frame */
1115
static void video_refresh_timer(void *opaque)
1116
{
1117
    VideoState *is = opaque;
1118
    VideoPicture *vp;
1119

    
1120
    SubPicture *sp, *sp2;
1121

    
1122
    if (is->video_st) {
1123
        if (is->pictq_size == 0) {
1124
            fprintf(stderr, "Internal error detected in the SDL timer\n");
1125
        } else {
1126
            /* dequeue the picture */
1127
            vp = &is->pictq[is->pictq_rindex];
1128

    
1129
            /* update current video pts */
1130
            is->video_current_pts = vp->pts;
1131
            is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1132
            is->video_current_pos = vp->pos;
1133

    
1134
            if(is->subtitle_st) {
1135
                if (is->subtitle_stream_changed) {
1136
                    SDL_LockMutex(is->subpq_mutex);
1137

    
1138
                    while (is->subpq_size) {
1139
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1140

    
1141
                        /* update queue size and signal for next picture */
1142
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1143
                            is->subpq_rindex = 0;
1144

    
1145
                        is->subpq_size--;
1146
                    }
1147
                    is->subtitle_stream_changed = 0;
1148

    
1149
                    SDL_CondSignal(is->subpq_cond);
1150
                    SDL_UnlockMutex(is->subpq_mutex);
1151
                } else {
1152
                    if (is->subpq_size > 0) {
1153
                        sp = &is->subpq[is->subpq_rindex];
1154

    
1155
                        if (is->subpq_size > 1)
1156
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1157
                        else
1158
                            sp2 = NULL;
1159

    
1160
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1161
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1162
                        {
1163
                            free_subpicture(sp);
1164

    
1165
                            /* update queue size and signal for next picture */
1166
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1167
                                is->subpq_rindex = 0;
1168

    
1169
                            SDL_LockMutex(is->subpq_mutex);
1170
                            is->subpq_size--;
1171
                            SDL_CondSignal(is->subpq_cond);
1172
                            SDL_UnlockMutex(is->subpq_mutex);
1173
                        }
1174
                    }
1175
                }
1176
            }
1177

    
1178
            /* display picture */
1179
            video_display(is);
1180

    
1181
            /* update queue size and signal for next picture */
1182
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1183
                is->pictq_rindex = 0;
1184

    
1185
            SDL_LockMutex(is->pictq_mutex);
1186
            vp->timer_id= 0;
1187
            is->pictq_size--;
1188
            SDL_CondSignal(is->pictq_cond);
1189
            SDL_UnlockMutex(is->pictq_mutex);
1190
        }
1191
    } else if (is->audio_st) {
1192
        /* draw the next audio frame */
1193

    
1194
        schedule_refresh(is, 40);
1195

    
1196
        /* if only audio stream, then display the audio bars (better
1197
           than nothing, just to test the implementation */
1198

    
1199
        /* display picture */
1200
        video_display(is);
1201
    } else {
1202
        schedule_refresh(is, 100);
1203
    }
1204
    if (show_status) {
1205
        static int64_t last_time;
1206
        int64_t cur_time;
1207
        int aqsize, vqsize, sqsize;
1208
        double av_diff;
1209

    
1210
        cur_time = av_gettime();
1211
        if (!last_time || (cur_time - last_time) >= 30000) {
1212
            aqsize = 0;
1213
            vqsize = 0;
1214
            sqsize = 0;
1215
            if (is->audio_st)
1216
                aqsize = is->audioq.size;
1217
            if (is->video_st)
1218
                vqsize = is->videoq.size;
1219
            if (is->subtitle_st)
1220
                sqsize = is->subtitleq.size;
1221
            av_diff = 0;
1222
            if (is->audio_st && is->video_st)
1223
                av_diff = get_audio_clock(is) - get_video_clock(is);
1224
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%Ld/%Ld   \r",
1225
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1226
            fflush(stdout);
1227
            last_time = cur_time;
1228
        }
1229
    }
1230
}
1231

    
1232
/* allocate a picture (needs to do that in main thread to avoid
1233
   potential locking problems */
1234
static void alloc_picture(void *opaque)
1235
{
1236
    VideoState *is = opaque;
1237
    VideoPicture *vp;
1238

    
1239
    vp = &is->pictq[is->pictq_windex];
1240

    
1241
    if (vp->bmp)
1242
        SDL_FreeYUVOverlay(vp->bmp);
1243

    
1244
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1245
                                   is->video_st->codec->height,
1246
                                   SDL_YV12_OVERLAY,
1247
                                   screen);
1248
    vp->width = is->video_st->codec->width;
1249
    vp->height = is->video_st->codec->height;
1250

    
1251
    SDL_LockMutex(is->pictq_mutex);
1252
    vp->allocated = 1;
1253
    SDL_CondSignal(is->pictq_cond);
1254
    SDL_UnlockMutex(is->pictq_mutex);
1255
}
1256

    
1257
/**
1258
 *
1259
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1260
 */
1261
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1262
{
1263
    VideoPicture *vp;
1264
    int dst_pix_fmt;
1265

    
1266
    /* wait until we have space to put a new picture */
1267
    SDL_LockMutex(is->pictq_mutex);
1268
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1269
           !is->videoq.abort_request) {
1270
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1271
    }
1272
    SDL_UnlockMutex(is->pictq_mutex);
1273

    
1274
    if (is->videoq.abort_request)
1275
        return -1;
1276

    
1277
    vp = &is->pictq[is->pictq_windex];
1278

    
1279
    /* alloc or resize hardware picture buffer */
1280
    if (!vp->bmp ||
1281
        vp->width != is->video_st->codec->width ||
1282
        vp->height != is->video_st->codec->height) {
1283
        SDL_Event event;
1284

    
1285
        vp->allocated = 0;
1286

    
1287
        /* the allocation must be done in the main thread to avoid
1288
           locking problems */
1289
        event.type = FF_ALLOC_EVENT;
1290
        event.user.data1 = is;
1291
        SDL_PushEvent(&event);
1292

    
1293
        /* wait until the picture is allocated */
1294
        SDL_LockMutex(is->pictq_mutex);
1295
        while (!vp->allocated && !is->videoq.abort_request) {
1296
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1297
        }
1298
        SDL_UnlockMutex(is->pictq_mutex);
1299

    
1300
        if (is->videoq.abort_request)
1301
            return -1;
1302
    }
1303

    
1304
    /* if the frame is not skipped, then display it */
1305
    if (vp->bmp) {
1306
        AVPicture pict;
1307

    
1308
        /* get a pointer on the bitmap */
1309
        SDL_LockYUVOverlay (vp->bmp);
1310

    
1311
        dst_pix_fmt = PIX_FMT_YUV420P;
1312
        memset(&pict,0,sizeof(AVPicture));
1313
        pict.data[0] = vp->bmp->pixels[0];
1314
        pict.data[1] = vp->bmp->pixels[2];
1315
        pict.data[2] = vp->bmp->pixels[1];
1316

    
1317
        pict.linesize[0] = vp->bmp->pitches[0];
1318
        pict.linesize[1] = vp->bmp->pitches[2];
1319
        pict.linesize[2] = vp->bmp->pitches[1];
1320
        sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1321
        is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1322
            is->video_st->codec->width, is->video_st->codec->height,
1323
            is->video_st->codec->pix_fmt,
1324
            is->video_st->codec->width, is->video_st->codec->height,
1325
            dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1326
        if (is->img_convert_ctx == NULL) {
1327
            fprintf(stderr, "Cannot initialize the conversion context\n");
1328
            exit(1);
1329
        }
1330
        sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1331
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1332
        /* update the bitmap content */
1333
        SDL_UnlockYUVOverlay(vp->bmp);
1334

    
1335
        vp->pts = pts;
1336
        vp->pos = pos;
1337

    
1338
        /* now we can update the picture count */
1339
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1340
            is->pictq_windex = 0;
1341
        SDL_LockMutex(is->pictq_mutex);
1342
        is->pictq_size++;
1343
        //We must schedule in a mutex as we must store the timer id before the timer dies or might end up freeing a alraedy freed id
1344
        vp->timer_id= schedule_refresh(is, (int)(compute_frame_delay(vp->pts, is) * 1000 + 0.5));
1345
        SDL_UnlockMutex(is->pictq_mutex);
1346
    }
1347
    return 0;
1348
}
1349

    
1350
/**
1351
 * compute the exact PTS for the picture if it is omitted in the stream
1352
 * @param pts1 the dts of the pkt / pts of the frame
1353
 */
1354
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1355
{
1356
    double frame_delay, pts;
1357

    
1358
    pts = pts1;
1359

    
1360
    if (pts != 0) {
1361
        /* update video clock with pts, if present */
1362
        is->video_clock = pts;
1363
    } else {
1364
        pts = is->video_clock;
1365
    }
1366
    /* update video clock for next frame */
1367
    frame_delay = av_q2d(is->video_st->codec->time_base);
1368
    /* for MPEG2, the frame can be repeated, so we update the
1369
       clock accordingly */
1370
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1371
    is->video_clock += frame_delay;
1372

    
1373
#if defined(DEBUG_SYNC) && 0
1374
    {
1375
        int ftype;
1376
        if (src_frame->pict_type == FF_B_TYPE)
1377
            ftype = 'B';
1378
        else if (src_frame->pict_type == FF_I_TYPE)
1379
            ftype = 'I';
1380
        else
1381
            ftype = 'P';
1382
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1383
               ftype, pts, pts1);
1384
    }
1385
#endif
1386
    return queue_picture(is, src_frame, pts, pos);
1387
}
1388

    
1389
static int video_thread(void *arg)
1390
{
1391
    VideoState *is = arg;
1392
    AVPacket pkt1, *pkt = &pkt1;
1393
    int len1, got_picture, i;
1394
    AVFrame *frame= avcodec_alloc_frame();
1395
    double pts;
1396

    
1397
    for(;;) {
1398
        while (is->paused && !is->videoq.abort_request) {
1399
            SDL_Delay(10);
1400
        }
1401
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1402
            break;
1403

    
1404
        if(pkt->data == flush_pkt.data){
1405
            avcodec_flush_buffers(is->video_st->codec);
1406

    
1407
            SDL_LockMutex(is->pictq_mutex);
1408
            //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1409
            for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1410
                if(is->pictq[i].timer_id){
1411
                    SDL_RemoveTimer(is->pictq[i].timer_id);
1412
                    is->pictq[i].timer_id=0;
1413
                    schedule_refresh(is, 1);
1414
                }
1415
            }
1416
            while (is->pictq_size && !is->videoq.abort_request) {
1417
                SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1418
            }
1419
            is->video_current_pos= -1;
1420
            SDL_UnlockMutex(is->pictq_mutex);
1421

    
1422
            is->last_dts_for_fault_detection=
1423
            is->last_pts_for_fault_detection= INT64_MIN;
1424
            is->frame_last_pts= AV_NOPTS_VALUE;
1425
            is->frame_last_delay = 0;
1426
            is->frame_timer = (double)av_gettime() / 1000000.0;
1427

    
1428
            continue;
1429
        }
1430

    
1431
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1432
           this packet, if any */
1433
        is->video_st->codec->reordered_opaque= pkt->pts;
1434
        len1 = avcodec_decode_video2(is->video_st->codec,
1435
                                    frame, &got_picture,
1436
                                    pkt);
1437

    
1438
        if (got_picture) {
1439
            if(pkt->dts != AV_NOPTS_VALUE){
1440
                is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1441
                is->last_dts_for_fault_detection= pkt->dts;
1442
            }
1443
            if(frame->reordered_opaque != AV_NOPTS_VALUE){
1444
                is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1445
                is->last_pts_for_fault_detection= frame->reordered_opaque;
1446
            }
1447
        }
1448

    
1449
        if(   (   decoder_reorder_pts==1
1450
               || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1451
               || pkt->dts == AV_NOPTS_VALUE)
1452
           && frame->reordered_opaque != AV_NOPTS_VALUE)
1453
            pts= frame->reordered_opaque;
1454
        else if(pkt->dts != AV_NOPTS_VALUE)
1455
            pts= pkt->dts;
1456
        else
1457
            pts= 0;
1458
        pts *= av_q2d(is->video_st->time_base);
1459

    
1460
//            if (len1 < 0)
1461
//                break;
1462
        if (got_picture) {
1463
            if (output_picture2(is, frame, pts, pkt->pos) < 0)
1464
                goto the_end;
1465
        }
1466
        av_free_packet(pkt);
1467
        if (step)
1468
            if (cur_stream)
1469
                stream_pause(cur_stream);
1470
    }
1471
 the_end:
1472
    av_free(frame);
1473
    return 0;
1474
}
1475

    
1476
static int subtitle_thread(void *arg)
1477
{
1478
    VideoState *is = arg;
1479
    SubPicture *sp;
1480
    AVPacket pkt1, *pkt = &pkt1;
1481
    int len1, got_subtitle;
1482
    double pts;
1483
    int i, j;
1484
    int r, g, b, y, u, v, a;
1485

    
1486
    for(;;) {
1487
        while (is->paused && !is->subtitleq.abort_request) {
1488
            SDL_Delay(10);
1489
        }
1490
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1491
            break;
1492

    
1493
        if(pkt->data == flush_pkt.data){
1494
            avcodec_flush_buffers(is->subtitle_st->codec);
1495
            continue;
1496
        }
1497
        SDL_LockMutex(is->subpq_mutex);
1498
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1499
               !is->subtitleq.abort_request) {
1500
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1501
        }
1502
        SDL_UnlockMutex(is->subpq_mutex);
1503

    
1504
        if (is->subtitleq.abort_request)
1505
            goto the_end;
1506

    
1507
        sp = &is->subpq[is->subpq_windex];
1508

    
1509
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1510
           this packet, if any */
1511
        pts = 0;
1512
        if (pkt->pts != AV_NOPTS_VALUE)
1513
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1514

    
1515
        len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1516
                                    &sp->sub, &got_subtitle,
1517
                                    pkt);
1518
//            if (len1 < 0)
1519
//                break;
1520
        if (got_subtitle && sp->sub.format == 0) {
1521
            sp->pts = pts;
1522

    
1523
            for (i = 0; i < sp->sub.num_rects; i++)
1524
            {
1525
                for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1526
                {
1527
                    RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1528
                    y = RGB_TO_Y_CCIR(r, g, b);
1529
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1530
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1531
                    YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1532
                }
1533
            }
1534

    
1535
            /* now we can update the picture count */
1536
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1537
                is->subpq_windex = 0;
1538
            SDL_LockMutex(is->subpq_mutex);
1539
            is->subpq_size++;
1540
            SDL_UnlockMutex(is->subpq_mutex);
1541
        }
1542
        av_free_packet(pkt);
1543
//        if (step)
1544
//            if (cur_stream)
1545
//                stream_pause(cur_stream);
1546
    }
1547
 the_end:
1548
    return 0;
1549
}
1550

    
1551
/* copy samples for viewing in editor window */
1552
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1553
{
1554
    int size, len, channels;
1555

    
1556
    channels = is->audio_st->codec->channels;
1557

    
1558
    size = samples_size / sizeof(short);
1559
    while (size > 0) {
1560
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1561
        if (len > size)
1562
            len = size;
1563
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1564
        samples += len;
1565
        is->sample_array_index += len;
1566
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1567
            is->sample_array_index = 0;
1568
        size -= len;
1569
    }
1570
}
1571

    
1572
/* return the new audio buffer size (samples can be added or deleted
1573
   to get better sync if video or external master clock) */
1574
static int synchronize_audio(VideoState *is, short *samples,
1575
                             int samples_size1, double pts)
1576
{
1577
    int n, samples_size;
1578
    double ref_clock;
1579

    
1580
    n = 2 * is->audio_st->codec->channels;
1581
    samples_size = samples_size1;
1582

    
1583
    /* if not master, then we try to remove or add samples to correct the clock */
1584
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1585
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1586
        double diff, avg_diff;
1587
        int wanted_size, min_size, max_size, nb_samples;
1588

    
1589
        ref_clock = get_master_clock(is);
1590
        diff = get_audio_clock(is) - ref_clock;
1591

    
1592
        if (diff < AV_NOSYNC_THRESHOLD) {
1593
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1594
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1595
                /* not enough measures to have a correct estimate */
1596
                is->audio_diff_avg_count++;
1597
            } else {
1598
                /* estimate the A-V difference */
1599
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1600

    
1601
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1602
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1603
                    nb_samples = samples_size / n;
1604

    
1605
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1606
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1607
                    if (wanted_size < min_size)
1608
                        wanted_size = min_size;
1609
                    else if (wanted_size > max_size)
1610
                        wanted_size = max_size;
1611

    
1612
                    /* add or remove samples to correction the synchro */
1613
                    if (wanted_size < samples_size) {
1614
                        /* remove samples */
1615
                        samples_size = wanted_size;
1616
                    } else if (wanted_size > samples_size) {
1617
                        uint8_t *samples_end, *q;
1618
                        int nb;
1619

    
1620
                        /* add samples */
1621
                        nb = (samples_size - wanted_size);
1622
                        samples_end = (uint8_t *)samples + samples_size - n;
1623
                        q = samples_end + n;
1624
                        while (nb > 0) {
1625
                            memcpy(q, samples_end, n);
1626
                            q += n;
1627
                            nb -= n;
1628
                        }
1629
                        samples_size = wanted_size;
1630
                    }
1631
                }
1632
#if 0
1633
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1634
                       diff, avg_diff, samples_size - samples_size1,
1635
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1636
#endif
1637
            }
1638
        } else {
1639
            /* too big difference : may be initial PTS errors, so
1640
               reset A-V filter */
1641
            is->audio_diff_avg_count = 0;
1642
            is->audio_diff_cum = 0;
1643
        }
1644
    }
1645

    
1646
    return samples_size;
1647
}
1648

    
1649
/* decode one audio frame and returns its uncompressed size */
1650
static int audio_decode_frame(VideoState *is, double *pts_ptr)
1651
{
1652
    AVPacket *pkt_temp = &is->audio_pkt_temp;
1653
    AVPacket *pkt = &is->audio_pkt;
1654
    AVCodecContext *dec= is->audio_st->codec;
1655
    int n, len1, data_size;
1656
    double pts;
1657

    
1658
    for(;;) {
1659
        /* NOTE: the audio packet can contain several frames */
1660
        while (pkt_temp->size > 0) {
1661
            data_size = sizeof(is->audio_buf1);
1662
            len1 = avcodec_decode_audio3(dec,
1663
                                        (int16_t *)is->audio_buf1, &data_size,
1664
                                        pkt_temp);
1665
            if (len1 < 0) {
1666
                /* if error, we skip the frame */
1667
                pkt_temp->size = 0;
1668
                break;
1669
            }
1670

    
1671
            pkt_temp->data += len1;
1672
            pkt_temp->size -= len1;
1673
            if (data_size <= 0)
1674
                continue;
1675

    
1676
            if (dec->sample_fmt != is->audio_src_fmt) {
1677
                if (is->reformat_ctx)
1678
                    av_audio_convert_free(is->reformat_ctx);
1679
                is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1680
                                                         dec->sample_fmt, 1, NULL, 0);
1681
                if (!is->reformat_ctx) {
1682
                    fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1683
                        avcodec_get_sample_fmt_name(dec->sample_fmt),
1684
                        avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1685
                        break;
1686
                }
1687
                is->audio_src_fmt= dec->sample_fmt;
1688
            }
1689

    
1690
            if (is->reformat_ctx) {
1691
                const void *ibuf[6]= {is->audio_buf1};
1692
                void *obuf[6]= {is->audio_buf2};
1693
                int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
1694
                int ostride[6]= {2};
1695
                int len= data_size/istride[0];
1696
                if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
1697
                    printf("av_audio_convert() failed\n");
1698
                    break;
1699
                }
1700
                is->audio_buf= is->audio_buf2;
1701
                /* FIXME: existing code assume that data_size equals framesize*channels*2
1702
                          remove this legacy cruft */
1703
                data_size= len*2;
1704
            }else{
1705
                is->audio_buf= is->audio_buf1;
1706
            }
1707

    
1708
            /* if no pts, then compute it */
1709
            pts = is->audio_clock;
1710
            *pts_ptr = pts;
1711
            n = 2 * dec->channels;
1712
            is->audio_clock += (double)data_size /
1713
                (double)(n * dec->sample_rate);
1714
#if defined(DEBUG_SYNC)
1715
            {
1716
                static double last_clock;
1717
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1718
                       is->audio_clock - last_clock,
1719
                       is->audio_clock, pts);
1720
                last_clock = is->audio_clock;
1721
            }
1722
#endif
1723
            return data_size;
1724
        }
1725

    
1726
        /* free the current packet */
1727
        if (pkt->data)
1728
            av_free_packet(pkt);
1729

    
1730
        if (is->paused || is->audioq.abort_request) {
1731
            return -1;
1732
        }
1733

    
1734
        /* read next packet */
1735
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1736
            return -1;
1737
        if(pkt->data == flush_pkt.data){
1738
            avcodec_flush_buffers(dec);
1739
            continue;
1740
        }
1741

    
1742
        pkt_temp->data = pkt->data;
1743
        pkt_temp->size = pkt->size;
1744

    
1745
        /* if update the audio clock with the pts */
1746
        if (pkt->pts != AV_NOPTS_VALUE) {
1747
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1748
        }
1749
    }
1750
}
1751

    
1752
/* get the current audio output buffer size, in samples. With SDL, we
1753
   cannot have a precise information */
1754
static int audio_write_get_buf_size(VideoState *is)
1755
{
1756
    return is->audio_buf_size - is->audio_buf_index;
1757
}
1758

    
1759

    
1760
/* prepare a new audio buffer */
1761
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1762
{
1763
    VideoState *is = opaque;
1764
    int audio_size, len1;
1765
    double pts;
1766

    
1767
    audio_callback_time = av_gettime();
1768

    
1769
    while (len > 0) {
1770
        if (is->audio_buf_index >= is->audio_buf_size) {
1771
           audio_size = audio_decode_frame(is, &pts);
1772
           if (audio_size < 0) {
1773
                /* if error, just output silence */
1774
               is->audio_buf = is->audio_buf1;
1775
               is->audio_buf_size = 1024;
1776
               memset(is->audio_buf, 0, is->audio_buf_size);
1777
           } else {
1778
               if (is->show_audio)
1779
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1780
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1781
                                              pts);
1782
               is->audio_buf_size = audio_size;
1783
           }
1784
           is->audio_buf_index = 0;
1785
        }
1786
        len1 = is->audio_buf_size - is->audio_buf_index;
1787
        if (len1 > len)
1788
            len1 = len;
1789
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1790
        len -= len1;
1791
        stream += len1;
1792
        is->audio_buf_index += len1;
1793
    }
1794
}
1795

    
1796
/* open a given stream. Return 0 if OK */
1797
static int stream_component_open(VideoState *is, int stream_index)
1798
{
1799
    AVFormatContext *ic = is->ic;
1800
    AVCodecContext *avctx;
1801
    AVCodec *codec;
1802
    SDL_AudioSpec wanted_spec, spec;
1803

    
1804
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1805
        return -1;
1806
    avctx = ic->streams[stream_index]->codec;
1807

    
1808
    /* prepare audio output */
1809
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
1810
        if (avctx->channels > 0) {
1811
            avctx->request_channels = FFMIN(2, avctx->channels);
1812
        } else {
1813
            avctx->request_channels = 2;
1814
        }
1815
    }
1816

    
1817
    codec = avcodec_find_decoder(avctx->codec_id);
1818
    avctx->debug_mv = debug_mv;
1819
    avctx->debug = debug;
1820
    avctx->workaround_bugs = workaround_bugs;
1821
    avctx->lowres = lowres;
1822
    if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
1823
    avctx->idct_algo= idct;
1824
    if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
1825
    avctx->skip_frame= skip_frame;
1826
    avctx->skip_idct= skip_idct;
1827
    avctx->skip_loop_filter= skip_loop_filter;
1828
    avctx->error_recognition= error_recognition;
1829
    avctx->error_concealment= error_concealment;
1830
    avcodec_thread_init(avctx, thread_count);
1831

    
1832
    set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
1833

    
1834
    if (!codec ||
1835
        avcodec_open(avctx, codec) < 0)
1836
        return -1;
1837

    
1838
    /* prepare audio output */
1839
    if (avctx->codec_type == CODEC_TYPE_AUDIO) {
1840
        wanted_spec.freq = avctx->sample_rate;
1841
        wanted_spec.format = AUDIO_S16SYS;
1842
        wanted_spec.channels = avctx->channels;
1843
        wanted_spec.silence = 0;
1844
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1845
        wanted_spec.callback = sdl_audio_callback;
1846
        wanted_spec.userdata = is;
1847
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1848
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1849
            return -1;
1850
        }
1851
        is->audio_hw_buf_size = spec.size;
1852
        is->audio_src_fmt= SAMPLE_FMT_S16;
1853
    }
1854

    
1855
    ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
1856
    switch(avctx->codec_type) {
1857
    case CODEC_TYPE_AUDIO:
1858
        is->audio_stream = stream_index;
1859
        is->audio_st = ic->streams[stream_index];
1860
        is->audio_buf_size = 0;
1861
        is->audio_buf_index = 0;
1862

    
1863
        /* init averaging filter */
1864
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1865
        is->audio_diff_avg_count = 0;
1866
        /* since we do not have a precise anough audio fifo fullness,
1867
           we correct audio sync only if larger than this threshold */
1868
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
1869

    
1870
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1871
        packet_queue_init(&is->audioq);
1872
        SDL_PauseAudio(0);
1873
        break;
1874
    case CODEC_TYPE_VIDEO:
1875
        is->video_stream = stream_index;
1876
        is->video_st = ic->streams[stream_index];
1877

    
1878
//        is->video_current_pts_time = av_gettime();
1879

    
1880
        packet_queue_init(&is->videoq);
1881
        is->video_tid = SDL_CreateThread(video_thread, is);
1882
        break;
1883
    case CODEC_TYPE_SUBTITLE:
1884
        is->subtitle_stream = stream_index;
1885
        is->subtitle_st = ic->streams[stream_index];
1886
        packet_queue_init(&is->subtitleq);
1887

    
1888
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1889
        break;
1890
    default:
1891
        break;
1892
    }
1893
    return 0;
1894
}
1895

    
1896
static void stream_component_close(VideoState *is, int stream_index)
1897
{
1898
    AVFormatContext *ic = is->ic;
1899
    AVCodecContext *avctx;
1900

    
1901
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1902
        return;
1903
    avctx = ic->streams[stream_index]->codec;
1904

    
1905
    switch(avctx->codec_type) {
1906
    case CODEC_TYPE_AUDIO:
1907
        packet_queue_abort(&is->audioq);
1908

    
1909
        SDL_CloseAudio();
1910

    
1911
        packet_queue_end(&is->audioq);
1912
        if (is->reformat_ctx)
1913
            av_audio_convert_free(is->reformat_ctx);
1914
        break;
1915
    case CODEC_TYPE_VIDEO:
1916
        packet_queue_abort(&is->videoq);
1917

    
1918
        /* note: we also signal this mutex to make sure we deblock the
1919
           video thread in all cases */
1920
        SDL_LockMutex(is->pictq_mutex);
1921
        SDL_CondSignal(is->pictq_cond);
1922
        SDL_UnlockMutex(is->pictq_mutex);
1923

    
1924
        SDL_WaitThread(is->video_tid, NULL);
1925

    
1926
        packet_queue_end(&is->videoq);
1927
        break;
1928
    case CODEC_TYPE_SUBTITLE:
1929
        packet_queue_abort(&is->subtitleq);
1930

    
1931
        /* note: we also signal this mutex to make sure we deblock the
1932
           video thread in all cases */
1933
        SDL_LockMutex(is->subpq_mutex);
1934
        is->subtitle_stream_changed = 1;
1935

    
1936
        SDL_CondSignal(is->subpq_cond);
1937
        SDL_UnlockMutex(is->subpq_mutex);
1938

    
1939
        SDL_WaitThread(is->subtitle_tid, NULL);
1940

    
1941
        packet_queue_end(&is->subtitleq);
1942
        break;
1943
    default:
1944
        break;
1945
    }
1946

    
1947
    ic->streams[stream_index]->discard = AVDISCARD_ALL;
1948
    avcodec_close(avctx);
1949
    switch(avctx->codec_type) {
1950
    case CODEC_TYPE_AUDIO:
1951
        is->audio_st = NULL;
1952
        is->audio_stream = -1;
1953
        break;
1954
    case CODEC_TYPE_VIDEO:
1955
        is->video_st = NULL;
1956
        is->video_stream = -1;
1957
        break;
1958
    case CODEC_TYPE_SUBTITLE:
1959
        is->subtitle_st = NULL;
1960
        is->subtitle_stream = -1;
1961
        break;
1962
    default:
1963
        break;
1964
    }
1965
}
1966

    
1967
/* since we have only one decoding thread, we can use a global
1968
   variable instead of a thread local variable */
1969
static VideoState *global_video_state;
1970

    
1971
static int decode_interrupt_cb(void)
1972
{
1973
    return (global_video_state && global_video_state->abort_request);
1974
}
1975

    
1976
/* this thread gets the stream from the disk or the network */
1977
static int decode_thread(void *arg)
1978
{
1979
    VideoState *is = arg;
1980
    AVFormatContext *ic;
1981
    int err, i, ret;
1982
    int st_index[CODEC_TYPE_NB];
1983
    int st_count[CODEC_TYPE_NB]={0};
1984
    int st_best_packet_count[CODEC_TYPE_NB];
1985
    AVPacket pkt1, *pkt = &pkt1;
1986
    AVFormatParameters params, *ap = &params;
1987
    int eof=0;
1988

    
1989
    ic = avformat_alloc_context();
1990

    
1991
    memset(st_index, -1, sizeof(st_index));
1992
    memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
1993
    is->video_stream = -1;
1994
    is->audio_stream = -1;
1995
    is->subtitle_stream = -1;
1996

    
1997
    global_video_state = is;
1998
    url_set_interrupt_cb(decode_interrupt_cb);
1999

    
2000
    memset(ap, 0, sizeof(*ap));
2001

    
2002
    ap->prealloced_context = 1;
2003
    ap->width = frame_width;
2004
    ap->height= frame_height;
2005
    ap->time_base= (AVRational){1, 25};
2006
    ap->pix_fmt = frame_pix_fmt;
2007

    
2008
    set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2009

    
2010
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2011
    if (err < 0) {
2012
        print_error(is->filename, err);
2013
        ret = -1;
2014
        goto fail;
2015
    }
2016
    is->ic = ic;
2017

    
2018
    if(genpts)
2019
        ic->flags |= AVFMT_FLAG_GENPTS;
2020

    
2021
    err = av_find_stream_info(ic);
2022
    if (err < 0) {
2023
        fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2024
        ret = -1;
2025
        goto fail;
2026
    }
2027
    if(ic->pb)
2028
        ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2029

    
2030
    if(seek_by_bytes<0)
2031
        seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2032

    
2033
    /* if seeking requested, we execute it */
2034
    if (start_time != AV_NOPTS_VALUE) {
2035
        int64_t timestamp;
2036

    
2037
        timestamp = start_time;
2038
        /* add the stream start time */
2039
        if (ic->start_time != AV_NOPTS_VALUE)
2040
            timestamp += ic->start_time;
2041
        ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2042
        if (ret < 0) {
2043
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
2044
                    is->filename, (double)timestamp / AV_TIME_BASE);
2045
        }
2046
    }
2047

    
2048
    for(i = 0; i < ic->nb_streams; i++) {
2049
        AVStream *st= ic->streams[i];
2050
        AVCodecContext *avctx = st->codec;
2051
        ic->streams[i]->discard = AVDISCARD_ALL;
2052
        if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2053
            exit(1);
2054
        if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2055
            continue;
2056

    
2057
        if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2058
            continue;
2059
        st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2060

    
2061
        switch(avctx->codec_type) {
2062
        case CODEC_TYPE_AUDIO:
2063
            if (!audio_disable)
2064
                st_index[CODEC_TYPE_AUDIO] = i;
2065
            break;
2066
        case CODEC_TYPE_VIDEO:
2067
        case CODEC_TYPE_SUBTITLE:
2068
            if (!video_disable)
2069
                st_index[avctx->codec_type] = i;
2070
            break;
2071
        default:
2072
            break;
2073
        }
2074
    }
2075
    if (show_status) {
2076
        dump_format(ic, 0, is->filename, 0);
2077
    }
2078

    
2079
    /* open the streams */
2080
    if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2081
        stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2082
    }
2083

    
2084
    ret=-1;
2085
    if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2086
        ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2087
    }
2088
    if(ret<0) {
2089
        /* add the refresh timer to draw the picture */
2090
        schedule_refresh(is, 40);
2091

    
2092
        if (!display_disable)
2093
            is->show_audio = 2;
2094
    }
2095

    
2096
    if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2097
        stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2098
    }
2099

    
2100
    if (is->video_stream < 0 && is->audio_stream < 0) {
2101
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
2102
        ret = -1;
2103
        goto fail;
2104
    }
2105

    
2106
    for(;;) {
2107
        if (is->abort_request)
2108
            break;
2109
        if (is->paused != is->last_paused) {
2110
            is->last_paused = is->paused;
2111
            if (is->paused)
2112
                is->read_pause_return= av_read_pause(ic);
2113
            else
2114
                av_read_play(ic);
2115
        }
2116
#if CONFIG_RTSP_DEMUXER
2117
        if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2118
            /* wait 10 ms to avoid trying to get another packet */
2119
            /* XXX: horrible */
2120
            SDL_Delay(10);
2121
            continue;
2122
        }
2123
#endif
2124
        if (is->seek_req) {
2125
            int64_t seek_target= is->seek_pos;
2126
            int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2127
            int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2128
//FIXME the +-2 is due to rounding being not done in the correct direction in generation
2129
//      of the seek_pos/seek_rel variables
2130

    
2131
            ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2132
            if (ret < 0) {
2133
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2134
            }else{
2135
                if (is->audio_stream >= 0) {
2136
                    packet_queue_flush(&is->audioq);
2137
                    packet_queue_put(&is->audioq, &flush_pkt);
2138
                }
2139
                if (is->subtitle_stream >= 0) {
2140
                    packet_queue_flush(&is->subtitleq);
2141
                    packet_queue_put(&is->subtitleq, &flush_pkt);
2142
                }
2143
                if (is->video_stream >= 0) {
2144
                    packet_queue_flush(&is->videoq);
2145
                    packet_queue_put(&is->videoq, &flush_pkt);
2146
                }
2147
            }
2148
            is->seek_req = 0;
2149
            eof= 0;
2150
        }
2151

    
2152
        /* if the queue are full, no need to read more */
2153
        if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2154
            || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2155
                && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2156
                && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2157
            /* wait 10 ms */
2158
            SDL_Delay(10);
2159
            continue;
2160
        }
2161
        if(url_feof(ic->pb) || eof) {
2162
            if(is->video_stream >= 0){
2163
                av_init_packet(pkt);
2164
                pkt->data=NULL;
2165
                pkt->size=0;
2166
                pkt->stream_index= is->video_stream;
2167
                packet_queue_put(&is->videoq, pkt);
2168
            }
2169
            SDL_Delay(10);
2170
            if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2171
                ret=AVERROR_EOF;
2172
                goto fail;
2173
            }
2174
            continue;
2175
        }
2176
        ret = av_read_frame(ic, pkt);
2177
        if (ret < 0) {
2178
            if (ret == AVERROR_EOF)
2179
                eof=1;
2180
            if (url_ferror(ic->pb))
2181
                break;
2182
            SDL_Delay(100); /* wait for user event */
2183
            continue;
2184
        }
2185
        if (pkt->stream_index == is->audio_stream) {
2186
            packet_queue_put(&is->audioq, pkt);
2187
        } else if (pkt->stream_index == is->video_stream) {
2188
            packet_queue_put(&is->videoq, pkt);
2189
        } else if (pkt->stream_index == is->subtitle_stream) {
2190
            packet_queue_put(&is->subtitleq, pkt);
2191
        } else {
2192
            av_free_packet(pkt);
2193
        }
2194
    }
2195
    /* wait until the end */
2196
    while (!is->abort_request) {
2197
        SDL_Delay(100);
2198
    }
2199

    
2200
    ret = 0;
2201
 fail:
2202
    /* disable interrupting */
2203
    global_video_state = NULL;
2204

    
2205
    /* close each stream */
2206
    if (is->audio_stream >= 0)
2207
        stream_component_close(is, is->audio_stream);
2208
    if (is->video_stream >= 0)
2209
        stream_component_close(is, is->video_stream);
2210
    if (is->subtitle_stream >= 0)
2211
        stream_component_close(is, is->subtitle_stream);
2212
    if (is->ic) {
2213
        av_close_input_file(is->ic);
2214
        is->ic = NULL; /* safety */
2215
    }
2216
    url_set_interrupt_cb(NULL);
2217

    
2218
    if (ret != 0) {
2219
        SDL_Event event;
2220

    
2221
        event.type = FF_QUIT_EVENT;
2222
        event.user.data1 = is;
2223
        SDL_PushEvent(&event);
2224
    }
2225
    return 0;
2226
}
2227

    
2228
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2229
{
2230
    VideoState *is;
2231

    
2232
    is = av_mallocz(sizeof(VideoState));
2233
    if (!is)
2234
        return NULL;
2235
    av_strlcpy(is->filename, filename, sizeof(is->filename));
2236
    is->iformat = iformat;
2237
    is->ytop = 0;
2238
    is->xleft = 0;
2239

    
2240
    /* start video display */
2241
    is->pictq_mutex = SDL_CreateMutex();
2242
    is->pictq_cond = SDL_CreateCond();
2243

    
2244
    is->subpq_mutex = SDL_CreateMutex();
2245
    is->subpq_cond = SDL_CreateCond();
2246

    
2247
    is->av_sync_type = av_sync_type;
2248
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2249
    if (!is->parse_tid) {
2250
        av_free(is);
2251
        return NULL;
2252
    }
2253
    return is;
2254
}
2255

    
2256
static void stream_close(VideoState *is)
2257
{
2258
    VideoPicture *vp;
2259
    int i;
2260
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2261
    is->abort_request = 1;
2262
    SDL_WaitThread(is->parse_tid, NULL);
2263

    
2264
    /* free all pictures */
2265
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2266
        vp = &is->pictq[i];
2267
        if (vp->bmp) {
2268
            SDL_FreeYUVOverlay(vp->bmp);
2269
            vp->bmp = NULL;
2270
        }
2271
    }
2272
    SDL_DestroyMutex(is->pictq_mutex);
2273
    SDL_DestroyCond(is->pictq_cond);
2274
    SDL_DestroyMutex(is->subpq_mutex);
2275
    SDL_DestroyCond(is->subpq_cond);
2276
    if (is->img_convert_ctx)
2277
        sws_freeContext(is->img_convert_ctx);
2278
    av_free(is);
2279
}
2280

    
2281
static void stream_cycle_channel(VideoState *is, int codec_type)
2282
{
2283
    AVFormatContext *ic = is->ic;
2284
    int start_index, stream_index;
2285
    AVStream *st;
2286

    
2287
    if (codec_type == CODEC_TYPE_VIDEO)
2288
        start_index = is->video_stream;
2289
    else if (codec_type == CODEC_TYPE_AUDIO)
2290
        start_index = is->audio_stream;
2291
    else
2292
        start_index = is->subtitle_stream;
2293
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2294
        return;
2295
    stream_index = start_index;
2296
    for(;;) {
2297
        if (++stream_index >= is->ic->nb_streams)
2298
        {
2299
            if (codec_type == CODEC_TYPE_SUBTITLE)
2300
            {
2301
                stream_index = -1;
2302
                goto the_end;
2303
            } else
2304
                stream_index = 0;
2305
        }
2306
        if (stream_index == start_index)
2307
            return;
2308
        st = ic->streams[stream_index];
2309
        if (st->codec->codec_type == codec_type) {
2310
            /* check that parameters are OK */
2311
            switch(codec_type) {
2312
            case CODEC_TYPE_AUDIO:
2313
                if (st->codec->sample_rate != 0 &&
2314
                    st->codec->channels != 0)
2315
                    goto the_end;
2316
                break;
2317
            case CODEC_TYPE_VIDEO:
2318
            case CODEC_TYPE_SUBTITLE:
2319
                goto the_end;
2320
            default:
2321
                break;
2322
            }
2323
        }
2324
    }
2325
 the_end:
2326
    stream_component_close(is, start_index);
2327
    stream_component_open(is, stream_index);
2328
}
2329

    
2330

    
2331
static void toggle_full_screen(void)
2332
{
2333
    is_full_screen = !is_full_screen;
2334
    if (!fs_screen_width) {
2335
        /* use default SDL method */
2336
//        SDL_WM_ToggleFullScreen(screen);
2337
    }
2338
    video_open(cur_stream);
2339
}
2340

    
2341
static void toggle_pause(void)
2342
{
2343
    if (cur_stream)
2344
        stream_pause(cur_stream);
2345
    step = 0;
2346
}
2347

    
2348
static void step_to_next_frame(void)
2349
{
2350
    if (cur_stream) {
2351
        /* if the stream is paused unpause it, then step */
2352
        if (cur_stream->paused)
2353
            stream_pause(cur_stream);
2354
    }
2355
    step = 1;
2356
}
2357

    
2358
static void do_exit(void)
2359
{
2360
    int i;
2361
    if (cur_stream) {
2362
        stream_close(cur_stream);
2363
        cur_stream = NULL;
2364
    }
2365
    for (i = 0; i < CODEC_TYPE_NB; i++)
2366
        av_free(avcodec_opts[i]);
2367
    av_free(avformat_opts);
2368
    av_free(sws_opts);
2369
    if (show_status)
2370
        printf("\n");
2371
    SDL_Quit();
2372
    exit(0);
2373
}
2374

    
2375
static void toggle_audio_display(void)
2376
{
2377
    if (cur_stream) {
2378
        int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2379
        cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2380
        fill_rectangle(screen,
2381
                    cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2382
                    bgcolor);
2383
        SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2384
    }
2385
}
2386

    
2387
/* handle an event sent by the GUI */
2388
static void event_loop(void)
2389
{
2390
    SDL_Event event;
2391
    double incr, pos, frac;
2392

    
2393
    for(;;) {
2394
        double x;
2395
        SDL_WaitEvent(&event);
2396
        switch(event.type) {
2397
        case SDL_KEYDOWN:
2398
            switch(event.key.keysym.sym) {
2399
            case SDLK_ESCAPE:
2400
            case SDLK_q:
2401
                do_exit();
2402
                break;
2403
            case SDLK_f:
2404
                toggle_full_screen();
2405
                break;
2406
            case SDLK_p:
2407
            case SDLK_SPACE:
2408
                toggle_pause();
2409
                break;
2410
            case SDLK_s: //S: Step to next frame
2411
                step_to_next_frame();
2412
                break;
2413
            case SDLK_a:
2414
                if (cur_stream)
2415
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2416
                break;
2417
            case SDLK_v:
2418
                if (cur_stream)
2419
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2420
                break;
2421
            case SDLK_t:
2422
                if (cur_stream)
2423
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2424
                break;
2425
            case SDLK_w:
2426
                toggle_audio_display();
2427
                break;
2428
            case SDLK_LEFT:
2429
                incr = -10.0;
2430
                goto do_seek;
2431
            case SDLK_RIGHT:
2432
                incr = 10.0;
2433
                goto do_seek;
2434
            case SDLK_UP:
2435
                incr = 60.0;
2436
                goto do_seek;
2437
            case SDLK_DOWN:
2438
                incr = -60.0;
2439
            do_seek:
2440
                if (cur_stream) {
2441
                    if (seek_by_bytes) {
2442
                        if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2443
                            pos= cur_stream->video_current_pos;
2444
                        }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2445
                            pos= cur_stream->audio_pkt.pos;
2446
                        }else
2447
                            pos = url_ftell(cur_stream->ic->pb);
2448
                        if (cur_stream->ic->bit_rate)
2449
                            incr *= cur_stream->ic->bit_rate / 8.0;
2450
                        else
2451
                            incr *= 180000.0;
2452
                        pos += incr;
2453
                        stream_seek(cur_stream, pos, incr, 1);
2454
                    } else {
2455
                        pos = get_master_clock(cur_stream);
2456
                        pos += incr;
2457
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2458
                    }
2459
                }
2460
                break;
2461
            default:
2462
                break;
2463
            }
2464
            break;
2465
        case SDL_MOUSEBUTTONDOWN:
2466
        case SDL_MOUSEMOTION:
2467
            if(event.type ==SDL_MOUSEBUTTONDOWN){
2468
                x= event.button.x;
2469
            }else{
2470
                if(event.motion.state != SDL_PRESSED)
2471
                    break;
2472
                x= event.motion.x;
2473
            }
2474
            if (cur_stream) {
2475
                if(seek_by_bytes || cur_stream->ic->duration<=0){
2476
                    uint64_t size=  url_fsize(cur_stream->ic->pb);
2477
                    stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2478
                }else{
2479
                    int64_t ts;
2480
                    int ns, hh, mm, ss;
2481
                    int tns, thh, tmm, tss;
2482
                    tns = cur_stream->ic->duration/1000000LL;
2483
                    thh = tns/3600;
2484
                    tmm = (tns%3600)/60;
2485
                    tss = (tns%60);
2486
                    frac = x/cur_stream->width;
2487
                    ns = frac*tns;
2488
                    hh = ns/3600;
2489
                    mm = (ns%3600)/60;
2490
                    ss = (ns%60);
2491
                    fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2492
                            hh, mm, ss, thh, tmm, tss);
2493
                    ts = frac*cur_stream->ic->duration;
2494
                    if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2495
                        ts += cur_stream->ic->start_time;
2496
                    stream_seek(cur_stream, ts, 0, 0);
2497
                }
2498
            }
2499
            break;
2500
        case SDL_VIDEORESIZE:
2501
            if (cur_stream) {
2502
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2503
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2504
                screen_width = cur_stream->width = event.resize.w;
2505
                screen_height= cur_stream->height= event.resize.h;
2506
            }
2507
            break;
2508
        case SDL_QUIT:
2509
        case FF_QUIT_EVENT:
2510
            do_exit();
2511
            break;
2512
        case FF_ALLOC_EVENT:
2513
            video_open(event.user.data1);
2514
            alloc_picture(event.user.data1);
2515
            break;
2516
        case FF_REFRESH_EVENT:
2517
            video_refresh_timer(event.user.data1);
2518
            break;
2519
        default:
2520
            break;
2521
        }
2522
    }
2523
}
2524

    
2525
static void opt_frame_size(const char *arg)
2526
{
2527
    if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2528
        fprintf(stderr, "Incorrect frame size\n");
2529
        exit(1);
2530
    }
2531
    if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2532
        fprintf(stderr, "Frame size must be a multiple of 2\n");
2533
        exit(1);
2534
    }
2535
}
2536

    
2537
static int opt_width(const char *opt, const char *arg)
2538
{
2539
    screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2540
    return 0;
2541
}
2542

    
2543
static int opt_height(const char *opt, const char *arg)
2544
{
2545
    screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2546
    return 0;
2547
}
2548

    
2549
static void opt_format(const char *arg)
2550
{
2551
    file_iformat = av_find_input_format(arg);
2552
    if (!file_iformat) {
2553
        fprintf(stderr, "Unknown input format: %s\n", arg);
2554
        exit(1);
2555
    }
2556
}
2557

    
2558
static void opt_frame_pix_fmt(const char *arg)
2559
{
2560
    frame_pix_fmt = av_get_pix_fmt(arg);
2561
}
2562

    
2563
static int opt_sync(const char *opt, const char *arg)
2564
{
2565
    if (!strcmp(arg, "audio"))
2566
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2567
    else if (!strcmp(arg, "video"))
2568
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2569
    else if (!strcmp(arg, "ext"))
2570
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2571
    else {
2572
        fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2573
        exit(1);
2574
    }
2575
    return 0;
2576
}
2577

    
2578
static int opt_seek(const char *opt, const char *arg)
2579
{
2580
    start_time = parse_time_or_die(opt, arg, 1);
2581
    return 0;
2582
}
2583

    
2584
static int opt_debug(const char *opt, const char *arg)
2585
{
2586
    av_log_set_level(99);
2587
    debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2588
    return 0;
2589
}
2590

    
2591
static int opt_vismv(const char *opt, const char *arg)
2592
{
2593
    debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2594
    return 0;
2595
}
2596

    
2597
static int opt_thread_count(const char *opt, const char *arg)
2598
{
2599
    thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2600
#if !HAVE_THREADS
2601
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2602
#endif
2603
    return 0;
2604
}
2605

    
2606
static const OptionDef options[] = {
2607
#include "cmdutils_common_opts.h"
2608
    { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2609
    { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2610
    { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2611
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2612
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2613
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2614
    { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2615
    { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2616
    { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2617
    { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2618
    { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2619
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2620
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2621
    { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2622
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2623
    { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2624
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2625
    { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2626
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2627
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2628
    { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2629
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2630
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2631
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2632
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2633
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2634
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2635
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2636
    { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2637
    { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2638
    { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2639
    { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2640
    { NULL, },
2641
};
2642

    
2643
static void show_usage(void)
2644
{
2645
    printf("Simple media player\n");
2646
    printf("usage: ffplay [options] input_file\n");
2647
    printf("\n");
2648
}
2649

    
2650
static void show_help(void)
2651
{
2652
    show_usage();
2653
    show_help_options(options, "Main options:\n",
2654
                      OPT_EXPERT, 0);
2655
    show_help_options(options, "\nAdvanced options:\n",
2656
                      OPT_EXPERT, OPT_EXPERT);
2657
    printf("\nWhile playing:\n"
2658
           "q, ESC              quit\n"
2659
           "f                   toggle full screen\n"
2660
           "p, SPC              pause\n"
2661
           "a                   cycle audio channel\n"
2662
           "v                   cycle video channel\n"
2663
           "t                   cycle subtitle channel\n"
2664
           "w                   show audio waves\n"
2665
           "left/right          seek backward/forward 10 seconds\n"
2666
           "down/up             seek backward/forward 1 minute\n"
2667
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2668
           );
2669
}
2670

    
2671
static void opt_input_file(const char *filename)
2672
{
2673
    if (!strcmp(filename, "-"))
2674
        filename = "pipe:";
2675
    input_filename = filename;
2676
}
2677

    
2678
/* Called from the main */
2679
int main(int argc, char **argv)
2680
{
2681
    int flags, i;
2682

    
2683
    /* register all codecs, demux and protocols */
2684
    avcodec_register_all();
2685
    avdevice_register_all();
2686
    av_register_all();
2687

    
2688
    for(i=0; i<CODEC_TYPE_NB; i++){
2689
        avcodec_opts[i]= avcodec_alloc_context2(i);
2690
    }
2691
    avformat_opts = avformat_alloc_context();
2692
    sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
2693

    
2694
    show_banner();
2695

    
2696
    parse_options(argc, argv, options, opt_input_file);
2697

    
2698
    if (!input_filename) {
2699
        show_usage();
2700
        fprintf(stderr, "An input file must be specified\n");
2701
        fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
2702
        exit(1);
2703
    }
2704

    
2705
    if (display_disable) {
2706
        video_disable = 1;
2707
    }
2708
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2709
#if !defined(__MINGW32__) && !defined(__APPLE__)
2710
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2711
#endif
2712
    if (SDL_Init (flags)) {
2713
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2714
        exit(1);
2715
    }
2716

    
2717
    if (!display_disable) {
2718
#if HAVE_SDL_VIDEO_SIZE
2719
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2720
        fs_screen_width = vi->current_w;
2721
        fs_screen_height = vi->current_h;
2722
#endif
2723
    }
2724

    
2725
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2726
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2727
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2728

    
2729
    av_init_packet(&flush_pkt);
2730
    flush_pkt.data= "FLUSH";
2731

    
2732
    cur_stream = stream_open(input_filename, file_iformat);
2733

    
2734
    event_loop();
2735

    
2736
    /* never returns */
2737

    
2738
    return 0;
2739
}