Statistics
| Branch: | Revision:

ffmpeg / ffplay.c @ 8c982c5d

History | View | Annotate | Download (73.7 KB)

1
/*
2
 * FFplay : Simple Media Player based on the ffmpeg libraries
3
 * Copyright (c) 2003 Fabrice Bellard
4
 *
5
 * This file is part of FFmpeg.
6
 *
7
 * FFmpeg is free software; you can redistribute it and/or
8
 * modify it under the terms of the GNU Lesser General Public
9
 * License as published by the Free Software Foundation; either
10
 * version 2.1 of the License, or (at your option) any later version.
11
 *
12
 * FFmpeg is distributed in the hope that it will be useful,
13
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15
 * Lesser General Public License for more details.
16
 *
17
 * You should have received a copy of the GNU Lesser General Public
18
 * License along with FFmpeg; if not, write to the Free Software
19
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
 */
21
#define HAVE_AV_CONFIG_H
22
#include "avformat.h"
23
#include "swscale.h"
24

    
25
#include "version.h"
26
#include "cmdutils.h"
27

    
28
#include <SDL.h>
29
#include <SDL_thread.h>
30

    
31
#ifdef __MINGW32__
32
#undef main /* We don't want SDL to override our main() */
33
#endif
34

    
35
#ifdef CONFIG_OS2
36
#define INCL_DOS
37
 #include <os2.h>
38
 #include <stdio.h>
39

    
40
 void MorphToPM()
41
 {
42
   PPIB pib;
43
   PTIB tib;
44

    
45
   DosGetInfoBlocks(&tib, &pib);
46

    
47
   // Change flag from VIO to PM:
48
   if (pib->pib_ultype==2) pib->pib_ultype = 3;
49
 }
50
#endif
51

    
52
//#define DEBUG_SYNC
53

    
54
#define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
55
#define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
56
#define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
57

    
58
/* SDL audio buffer size, in samples. Should be small to have precise
59
   A/V sync as SDL does not have hardware buffer fullness info. */
60
#define SDL_AUDIO_BUFFER_SIZE 1024
61

    
62
/* no AV sync correction is done if below the AV sync threshold */
63
#define AV_SYNC_THRESHOLD 0.01
64
/* no AV correction is done if too big error */
65
#define AV_NOSYNC_THRESHOLD 10.0
66

    
67
/* maximum audio speed change to get correct sync */
68
#define SAMPLE_CORRECTION_PERCENT_MAX 10
69

    
70
/* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
71
#define AUDIO_DIFF_AVG_NB   20
72

    
73
/* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
74
#define SAMPLE_ARRAY_SIZE (2*65536)
75

    
76
static int sws_flags = SWS_BICUBIC;
77

    
78
typedef struct PacketQueue {
79
    AVPacketList *first_pkt, *last_pkt;
80
    int nb_packets;
81
    int size;
82
    int abort_request;
83
    SDL_mutex *mutex;
84
    SDL_cond *cond;
85
} PacketQueue;
86

    
87
#define VIDEO_PICTURE_QUEUE_SIZE 1
88
#define SUBPICTURE_QUEUE_SIZE 4
89

    
90
typedef struct VideoPicture {
91
    double pts;                                  ///<presentation time stamp for this picture
92
    SDL_Overlay *bmp;
93
    int width, height; /* source height & width */
94
    int allocated;
95
} VideoPicture;
96

    
97
typedef struct SubPicture {
98
    double pts; /* presentation time stamp for this picture */
99
    AVSubtitle sub;
100
} SubPicture;
101

    
102
enum {
103
    AV_SYNC_AUDIO_MASTER, /* default choice */
104
    AV_SYNC_VIDEO_MASTER,
105
    AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
106
};
107

    
108
typedef struct VideoState {
109
    SDL_Thread *parse_tid;
110
    SDL_Thread *video_tid;
111
    AVInputFormat *iformat;
112
    int no_background;
113
    int abort_request;
114
    int paused;
115
    int last_paused;
116
    int seek_req;
117
    int seek_flags;
118
    int64_t seek_pos;
119
    AVFormatContext *ic;
120
    int dtg_active_format;
121

    
122
    int audio_stream;
123

    
124
    int av_sync_type;
125
    double external_clock; /* external clock base */
126
    int64_t external_clock_time;
127

    
128
    double audio_clock;
129
    double audio_diff_cum; /* used for AV difference average computation */
130
    double audio_diff_avg_coef;
131
    double audio_diff_threshold;
132
    int audio_diff_avg_count;
133
    AVStream *audio_st;
134
    PacketQueue audioq;
135
    int audio_hw_buf_size;
136
    /* samples output by the codec. we reserve more space for avsync
137
       compensation */
138
    DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
139
    unsigned int audio_buf_size; /* in bytes */
140
    int audio_buf_index; /* in bytes */
141
    AVPacket audio_pkt;
142
    uint8_t *audio_pkt_data;
143
    int audio_pkt_size;
144

    
145
    int show_audio; /* if true, display audio samples */
146
    int16_t sample_array[SAMPLE_ARRAY_SIZE];
147
    int sample_array_index;
148
    int last_i_start;
149

    
150
    SDL_Thread *subtitle_tid;
151
    int subtitle_stream;
152
    int subtitle_stream_changed;
153
    AVStream *subtitle_st;
154
    PacketQueue subtitleq;
155
    SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
156
    int subpq_size, subpq_rindex, subpq_windex;
157
    SDL_mutex *subpq_mutex;
158
    SDL_cond *subpq_cond;
159

    
160
    double frame_timer;
161
    double frame_last_pts;
162
    double frame_last_delay;
163
    double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
164
    int video_stream;
165
    AVStream *video_st;
166
    PacketQueue videoq;
167
    double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
168
    int64_t video_current_pts_time;              ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
169
    VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
170
    int pictq_size, pictq_rindex, pictq_windex;
171
    SDL_mutex *pictq_mutex;
172
    SDL_cond *pictq_cond;
173

    
174
    //    QETimer *video_timer;
175
    char filename[1024];
176
    int width, height, xleft, ytop;
177
} VideoState;
178

    
179
void show_help(void);
180
static int audio_write_get_buf_size(VideoState *is);
181

    
182
/* options specified by the user */
183
static AVInputFormat *file_iformat;
184
static const char *input_filename;
185
static int fs_screen_width;
186
static int fs_screen_height;
187
static int screen_width = 0;
188
static int screen_height = 0;
189
static int audio_disable;
190
static int video_disable;
191
static int seek_by_bytes;
192
static int display_disable;
193
static int show_status;
194
static int av_sync_type = AV_SYNC_AUDIO_MASTER;
195
static int64_t start_time = AV_NOPTS_VALUE;
196
static int debug = 0;
197
static int debug_mv = 0;
198
static int step = 0;
199
static int thread_count = 1;
200
static int workaround_bugs = 1;
201
static int fast = 0;
202
static int genpts = 0;
203
static int lowres = 0;
204
static int idct = FF_IDCT_AUTO;
205
static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
206
static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
207
static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
208
static int error_resilience = FF_ER_CAREFUL;
209
static int error_concealment = 3;
210

    
211
/* current context */
212
static int is_full_screen;
213
static VideoState *cur_stream;
214
static int64_t audio_callback_time;
215

    
216
AVPacket flush_pkt;
217

    
218
#define FF_ALLOC_EVENT   (SDL_USEREVENT)
219
#define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
220
#define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
221

    
222
SDL_Surface *screen;
223

    
224
/* packet queue handling */
225
static void packet_queue_init(PacketQueue *q)
226
{
227
    memset(q, 0, sizeof(PacketQueue));
228
    q->mutex = SDL_CreateMutex();
229
    q->cond = SDL_CreateCond();
230
}
231

    
232
static void packet_queue_flush(PacketQueue *q)
233
{
234
    AVPacketList *pkt, *pkt1;
235

    
236
    SDL_LockMutex(q->mutex);
237
    for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
238
        pkt1 = pkt->next;
239
        av_free_packet(&pkt->pkt);
240
        av_freep(&pkt);
241
    }
242
    q->last_pkt = NULL;
243
    q->first_pkt = NULL;
244
    q->nb_packets = 0;
245
    q->size = 0;
246
    SDL_UnlockMutex(q->mutex);
247
}
248

    
249
static void packet_queue_end(PacketQueue *q)
250
{
251
    packet_queue_flush(q);
252
    SDL_DestroyMutex(q->mutex);
253
    SDL_DestroyCond(q->cond);
254
}
255

    
256
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
257
{
258
    AVPacketList *pkt1;
259

    
260
    /* duplicate the packet */
261
    if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
262
        return -1;
263

    
264
    pkt1 = av_malloc(sizeof(AVPacketList));
265
    if (!pkt1)
266
        return -1;
267
    pkt1->pkt = *pkt;
268
    pkt1->next = NULL;
269

    
270

    
271
    SDL_LockMutex(q->mutex);
272

    
273
    if (!q->last_pkt)
274

    
275
        q->first_pkt = pkt1;
276
    else
277
        q->last_pkt->next = pkt1;
278
    q->last_pkt = pkt1;
279
    q->nb_packets++;
280
    q->size += pkt1->pkt.size;
281
    /* XXX: should duplicate packet data in DV case */
282
    SDL_CondSignal(q->cond);
283

    
284
    SDL_UnlockMutex(q->mutex);
285
    return 0;
286
}
287

    
288
static void packet_queue_abort(PacketQueue *q)
289
{
290
    SDL_LockMutex(q->mutex);
291

    
292
    q->abort_request = 1;
293

    
294
    SDL_CondSignal(q->cond);
295

    
296
    SDL_UnlockMutex(q->mutex);
297
}
298

    
299
/* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
300
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
301
{
302
    AVPacketList *pkt1;
303
    int ret;
304

    
305
    SDL_LockMutex(q->mutex);
306

    
307
    for(;;) {
308
        if (q->abort_request) {
309
            ret = -1;
310
            break;
311
        }
312

    
313
        pkt1 = q->first_pkt;
314
        if (pkt1) {
315
            q->first_pkt = pkt1->next;
316
            if (!q->first_pkt)
317
                q->last_pkt = NULL;
318
            q->nb_packets--;
319
            q->size -= pkt1->pkt.size;
320
            *pkt = pkt1->pkt;
321
            av_free(pkt1);
322
            ret = 1;
323
            break;
324
        } else if (!block) {
325
            ret = 0;
326
            break;
327
        } else {
328
            SDL_CondWait(q->cond, q->mutex);
329
        }
330
    }
331
    SDL_UnlockMutex(q->mutex);
332
    return ret;
333
}
334

    
335
static inline void fill_rectangle(SDL_Surface *screen,
336
                                  int x, int y, int w, int h, int color)
337
{
338
    SDL_Rect rect;
339
    rect.x = x;
340
    rect.y = y;
341
    rect.w = w;
342
    rect.h = h;
343
    SDL_FillRect(screen, &rect, color);
344
}
345

    
346
#if 0
347
/* draw only the border of a rectangle */
348
void fill_border(VideoState *s, int x, int y, int w, int h, int color)
349
{
350
    int w1, w2, h1, h2;
351

352
    /* fill the background */
353
    w1 = x;
354
    if (w1 < 0)
355
        w1 = 0;
356
    w2 = s->width - (x + w);
357
    if (w2 < 0)
358
        w2 = 0;
359
    h1 = y;
360
    if (h1 < 0)
361
        h1 = 0;
362
    h2 = s->height - (y + h);
363
    if (h2 < 0)
364
        h2 = 0;
365
    fill_rectangle(screen,
366
                   s->xleft, s->ytop,
367
                   w1, s->height,
368
                   color);
369
    fill_rectangle(screen,
370
                   s->xleft + s->width - w2, s->ytop,
371
                   w2, s->height,
372
                   color);
373
    fill_rectangle(screen,
374
                   s->xleft + w1, s->ytop,
375
                   s->width - w1 - w2, h1,
376
                   color);
377
    fill_rectangle(screen,
378
                   s->xleft + w1, s->ytop + s->height - h2,
379
                   s->width - w1 - w2, h2,
380
                   color);
381
}
382
#endif
383

    
384

    
385

    
386
#define SCALEBITS 10
387
#define ONE_HALF  (1 << (SCALEBITS - 1))
388
#define FIX(x)    ((int) ((x) * (1<<SCALEBITS) + 0.5))
389

    
390
#define RGB_TO_Y_CCIR(r, g, b) \
391
((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
392
  FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
393

    
394
#define RGB_TO_U_CCIR(r1, g1, b1, shift)\
395
(((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 +         \
396
     FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
397

    
398
#define RGB_TO_V_CCIR(r1, g1, b1, shift)\
399
(((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 -           \
400
   FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401

    
402
#define ALPHA_BLEND(a, oldp, newp, s)\
403
((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404

    
405
#define RGBA_IN(r, g, b, a, s)\
406
{\
407
    unsigned int v = ((const uint32_t *)(s))[0];\
408
    a = (v >> 24) & 0xff;\
409
    r = (v >> 16) & 0xff;\
410
    g = (v >> 8) & 0xff;\
411
    b = v & 0xff;\
412
}
413

    
414
#define YUVA_IN(y, u, v, a, s, pal)\
415
{\
416
    unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)s];\
417
    a = (val >> 24) & 0xff;\
418
    y = (val >> 16) & 0xff;\
419
    u = (val >> 8) & 0xff;\
420
    v = val & 0xff;\
421
}
422

    
423
#define YUVA_OUT(d, y, u, v, a)\
424
{\
425
    ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
426
}
427

    
428

    
429
#define BPP 1
430

    
431
static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect)
432
{
433
    int wrap, wrap3, width2, skip2;
434
    int y, u, v, a, u1, v1, a1, w, h;
435
    uint8_t *lum, *cb, *cr;
436
    const uint8_t *p;
437
    const uint32_t *pal;
438

    
439
    lum = dst->data[0] + rect->y * dst->linesize[0];
440
    cb = dst->data[1] + (rect->y >> 1) * dst->linesize[1];
441
    cr = dst->data[2] + (rect->y >> 1) * dst->linesize[2];
442

    
443
    width2 = (rect->w + 1) >> 1;
444
    skip2 = rect->x >> 1;
445
    wrap = dst->linesize[0];
446
    wrap3 = rect->linesize;
447
    p = rect->bitmap;
448
    pal = rect->rgba_palette;  /* Now in YCrCb! */
449

    
450
    if (rect->y & 1) {
451
        lum += rect->x;
452
        cb += skip2;
453
        cr += skip2;
454

    
455
        if (rect->x & 1) {
456
            YUVA_IN(y, u, v, a, p, pal);
457
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
458
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
459
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
460
            cb++;
461
            cr++;
462
            lum++;
463
            p += BPP;
464
        }
465
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
466
            YUVA_IN(y, u, v, a, p, pal);
467
            u1 = u;
468
            v1 = v;
469
            a1 = a;
470
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
471

    
472
            YUVA_IN(y, u, v, a, p + BPP, pal);
473
            u1 += u;
474
            v1 += v;
475
            a1 += a;
476
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
477
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
478
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
479
            cb++;
480
            cr++;
481
            p += 2 * BPP;
482
            lum += 2;
483
        }
484
        if (w) {
485
            YUVA_IN(y, u, v, a, p, pal);
486
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
487
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
488
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
489
        }
490
        p += wrap3 + (wrap3 - rect->w * BPP);
491
        lum += wrap + (wrap - rect->w - rect->x);
492
        cb += dst->linesize[1] - width2 - skip2;
493
        cr += dst->linesize[2] - width2 - skip2;
494
    }
495
    for(h = rect->h - (rect->y & 1); h >= 2; h -= 2) {
496
        lum += rect->x;
497
        cb += skip2;
498
        cr += skip2;
499

    
500
        if (rect->x & 1) {
501
            YUVA_IN(y, u, v, a, p, pal);
502
            u1 = u;
503
            v1 = v;
504
            a1 = a;
505
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
506
            p += wrap3;
507
            lum += wrap;
508
            YUVA_IN(y, u, v, a, p, pal);
509
            u1 += u;
510
            v1 += v;
511
            a1 += a;
512
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
513
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
514
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
515
            cb++;
516
            cr++;
517
            p += -wrap3 + BPP;
518
            lum += -wrap + 1;
519
        }
520
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
521
            YUVA_IN(y, u, v, a, p, pal);
522
            u1 = u;
523
            v1 = v;
524
            a1 = a;
525
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
526

    
527
            YUVA_IN(y, u, v, a, p, pal);
528
            u1 += u;
529
            v1 += v;
530
            a1 += a;
531
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
532
            p += wrap3;
533
            lum += wrap;
534

    
535
            YUVA_IN(y, u, v, a, p, pal);
536
            u1 += u;
537
            v1 += v;
538
            a1 += a;
539
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
540

    
541
            YUVA_IN(y, u, v, a, p, pal);
542
            u1 += u;
543
            v1 += v;
544
            a1 += a;
545
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
546

    
547
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
548
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
549

    
550
            cb++;
551
            cr++;
552
            p += -wrap3 + 2 * BPP;
553
            lum += -wrap + 2;
554
        }
555
        if (w) {
556
            YUVA_IN(y, u, v, a, p, pal);
557
            u1 = u;
558
            v1 = v;
559
            a1 = a;
560
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561
            p += wrap3;
562
            lum += wrap;
563
            YUVA_IN(y, u, v, a, p, pal);
564
            u1 += u;
565
            v1 += v;
566
            a1 += a;
567
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
568
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
569
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
570
            cb++;
571
            cr++;
572
            p += -wrap3 + BPP;
573
            lum += -wrap + 1;
574
        }
575
        p += wrap3 + (wrap3 - rect->w * BPP);
576
        lum += wrap + (wrap - rect->w - rect->x);
577
        cb += dst->linesize[1] - width2 - skip2;
578
        cr += dst->linesize[2] - width2 - skip2;
579
    }
580
    /* handle odd height */
581
    if (h) {
582
        lum += rect->x;
583
        cb += skip2;
584
        cr += skip2;
585

    
586
        if (rect->x & 1) {
587
            YUVA_IN(y, u, v, a, p, pal);
588
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
589
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
590
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
591
            cb++;
592
            cr++;
593
            lum++;
594
            p += BPP;
595
        }
596
        for(w = rect->w - (rect->x & 1); w >= 2; w -= 2) {
597
            YUVA_IN(y, u, v, a, p, pal);
598
            u1 = u;
599
            v1 = v;
600
            a1 = a;
601
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
602

    
603
            YUVA_IN(y, u, v, a, p + BPP, pal);
604
            u1 += u;
605
            v1 += v;
606
            a1 += a;
607
            lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
608
            cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
609
            cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
610
            cb++;
611
            cr++;
612
            p += 2 * BPP;
613
            lum += 2;
614
        }
615
        if (w) {
616
            YUVA_IN(y, u, v, a, p, pal);
617
            lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
618
            cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
619
            cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
620
        }
621
    }
622
}
623

    
624
static void free_subpicture(SubPicture *sp)
625
{
626
    int i;
627

    
628
    for (i = 0; i < sp->sub.num_rects; i++)
629
    {
630
        av_free(sp->sub.rects[i].bitmap);
631
        av_free(sp->sub.rects[i].rgba_palette);
632
    }
633

    
634
    av_free(sp->sub.rects);
635

    
636
    memset(&sp->sub, 0, sizeof(AVSubtitle));
637
}
638

    
639
static void video_image_display(VideoState *is)
640
{
641
    VideoPicture *vp;
642
    SubPicture *sp;
643
    AVPicture pict;
644
    float aspect_ratio;
645
    int width, height, x, y;
646
    SDL_Rect rect;
647
    int i;
648

    
649
    vp = &is->pictq[is->pictq_rindex];
650
    if (vp->bmp) {
651
        /* XXX: use variable in the frame */
652
        if (is->video_st->codec->sample_aspect_ratio.num == 0)
653
            aspect_ratio = 0;
654
        else
655
            aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
656
                * is->video_st->codec->width / is->video_st->codec->height;;
657
        if (aspect_ratio <= 0.0)
658
            aspect_ratio = (float)is->video_st->codec->width /
659
                (float)is->video_st->codec->height;
660
        /* if an active format is indicated, then it overrides the
661
           mpeg format */
662
#if 0
663
        if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
664
            is->dtg_active_format = is->video_st->codec->dtg_active_format;
665
            printf("dtg_active_format=%d\n", is->dtg_active_format);
666
        }
667
#endif
668
#if 0
669
        switch(is->video_st->codec->dtg_active_format) {
670
        case FF_DTG_AFD_SAME:
671
        default:
672
            /* nothing to do */
673
            break;
674
        case FF_DTG_AFD_4_3:
675
            aspect_ratio = 4.0 / 3.0;
676
            break;
677
        case FF_DTG_AFD_16_9:
678
            aspect_ratio = 16.0 / 9.0;
679
            break;
680
        case FF_DTG_AFD_14_9:
681
            aspect_ratio = 14.0 / 9.0;
682
            break;
683
        case FF_DTG_AFD_4_3_SP_14_9:
684
            aspect_ratio = 14.0 / 9.0;
685
            break;
686
        case FF_DTG_AFD_16_9_SP_14_9:
687
            aspect_ratio = 14.0 / 9.0;
688
            break;
689
        case FF_DTG_AFD_SP_4_3:
690
            aspect_ratio = 4.0 / 3.0;
691
            break;
692
        }
693
#endif
694

    
695
        if (is->subtitle_st)
696
        {
697
            if (is->subpq_size > 0)
698
            {
699
                sp = &is->subpq[is->subpq_rindex];
700

    
701
                if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
702
                {
703
                    SDL_LockYUVOverlay (vp->bmp);
704

    
705
                    pict.data[0] = vp->bmp->pixels[0];
706
                    pict.data[1] = vp->bmp->pixels[2];
707
                    pict.data[2] = vp->bmp->pixels[1];
708

    
709
                    pict.linesize[0] = vp->bmp->pitches[0];
710
                    pict.linesize[1] = vp->bmp->pitches[2];
711
                    pict.linesize[2] = vp->bmp->pitches[1];
712

    
713
                    for (i = 0; i < sp->sub.num_rects; i++)
714
                        blend_subrect(&pict, &sp->sub.rects[i]);
715

    
716
                    SDL_UnlockYUVOverlay (vp->bmp);
717
                }
718
            }
719
        }
720

    
721

    
722
        /* XXX: we suppose the screen has a 1.0 pixel ratio */
723
        height = is->height;
724
        width = ((int)rint(height * aspect_ratio)) & -3;
725
        if (width > is->width) {
726
            width = is->width;
727
            height = ((int)rint(width / aspect_ratio)) & -3;
728
        }
729
        x = (is->width - width) / 2;
730
        y = (is->height - height) / 2;
731
        if (!is->no_background) {
732
            /* fill the background */
733
            //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
734
        } else {
735
            is->no_background = 0;
736
        }
737
        rect.x = is->xleft + x;
738
        rect.y = is->xleft + y;
739
        rect.w = width;
740
        rect.h = height;
741
        SDL_DisplayYUVOverlay(vp->bmp, &rect);
742
    } else {
743
#if 0
744
        fill_rectangle(screen,
745
                       is->xleft, is->ytop, is->width, is->height,
746
                       QERGB(0x00, 0x00, 0x00));
747
#endif
748
    }
749
}
750

    
751
static inline int compute_mod(int a, int b)
752
{
753
    a = a % b;
754
    if (a >= 0)
755
        return a;
756
    else
757
        return a + b;
758
}
759

    
760
static void video_audio_display(VideoState *s)
761
{
762
    int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
763
    int ch, channels, h, h2, bgcolor, fgcolor;
764
    int16_t time_diff;
765

    
766
    /* compute display index : center on currently output samples */
767
    channels = s->audio_st->codec->channels;
768
    nb_display_channels = channels;
769
    if (!s->paused) {
770
        n = 2 * channels;
771
        delay = audio_write_get_buf_size(s);
772
        delay /= n;
773

    
774
        /* to be more precise, we take into account the time spent since
775
           the last buffer computation */
776
        if (audio_callback_time) {
777
            time_diff = av_gettime() - audio_callback_time;
778
            delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
779
        }
780

    
781
        delay -= s->width / 2;
782
        if (delay < s->width)
783
            delay = s->width;
784
        i_start = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
785
        s->last_i_start = i_start;
786
    } else {
787
        i_start = s->last_i_start;
788
    }
789

    
790
    bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
791
    fill_rectangle(screen,
792
                   s->xleft, s->ytop, s->width, s->height,
793
                   bgcolor);
794

    
795
    fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
796

    
797
    /* total height for one channel */
798
    h = s->height / nb_display_channels;
799
    /* graph height / 2 */
800
    h2 = (h * 9) / 20;
801
    for(ch = 0;ch < nb_display_channels; ch++) {
802
        i = i_start + ch;
803
        y1 = s->ytop + ch * h + (h / 2); /* position of center line */
804
        for(x = 0; x < s->width; x++) {
805
            y = (s->sample_array[i] * h2) >> 15;
806
            if (y < 0) {
807
                y = -y;
808
                ys = y1 - y;
809
            } else {
810
                ys = y1;
811
            }
812
            fill_rectangle(screen,
813
                           s->xleft + x, ys, 1, y,
814
                           fgcolor);
815
            i += channels;
816
            if (i >= SAMPLE_ARRAY_SIZE)
817
                i -= SAMPLE_ARRAY_SIZE;
818
        }
819
    }
820

    
821
    fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
822

    
823
    for(ch = 1;ch < nb_display_channels; ch++) {
824
        y = s->ytop + ch * h;
825
        fill_rectangle(screen,
826
                       s->xleft, y, s->width, 1,
827
                       fgcolor);
828
    }
829
    SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
830
}
831

    
832
static int video_open(VideoState *is);
833

    
834
/* display the current picture, if any */
835
static void video_display(VideoState *is)
836
{
837
    if(!screen)
838
        video_open(cur_stream);
839
    if (is->audio_st && is->show_audio)
840
        video_audio_display(is);
841
    else if (is->video_st)
842
        video_image_display(is);
843
}
844

    
845
static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
846
{
847
    SDL_Event event;
848
    event.type = FF_REFRESH_EVENT;
849
    event.user.data1 = opaque;
850
    SDL_PushEvent(&event);
851
    return 0; /* 0 means stop timer */
852
}
853

    
854
/* schedule a video refresh in 'delay' ms */
855
static void schedule_refresh(VideoState *is, int delay)
856
{
857
    SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
858
}
859

    
860
/* get the current audio clock value */
861
static double get_audio_clock(VideoState *is)
862
{
863
    double pts;
864
    int hw_buf_size, bytes_per_sec;
865
    pts = is->audio_clock;
866
    hw_buf_size = audio_write_get_buf_size(is);
867
    bytes_per_sec = 0;
868
    if (is->audio_st) {
869
        bytes_per_sec = is->audio_st->codec->sample_rate *
870
            2 * is->audio_st->codec->channels;
871
    }
872
    if (bytes_per_sec)
873
        pts -= (double)hw_buf_size / bytes_per_sec;
874
    return pts;
875
}
876

    
877
/* get the current video clock value */
878
static double get_video_clock(VideoState *is)
879
{
880
    double delta;
881
    if (is->paused) {
882
        delta = 0;
883
    } else {
884
        delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
885
    }
886
    return is->video_current_pts + delta;
887
}
888

    
889
/* get the current external clock value */
890
static double get_external_clock(VideoState *is)
891
{
892
    int64_t ti;
893
    ti = av_gettime();
894
    return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
895
}
896

    
897
/* get the current master clock value */
898
static double get_master_clock(VideoState *is)
899
{
900
    double val;
901

    
902
    if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
903
        if (is->video_st)
904
            val = get_video_clock(is);
905
        else
906
            val = get_audio_clock(is);
907
    } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
908
        if (is->audio_st)
909
            val = get_audio_clock(is);
910
        else
911
            val = get_video_clock(is);
912
    } else {
913
        val = get_external_clock(is);
914
    }
915
    return val;
916
}
917

    
918
/* seek in the stream */
919
static void stream_seek(VideoState *is, int64_t pos, int rel)
920
{
921
    if (!is->seek_req) {
922
        is->seek_pos = pos;
923
        is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
924
        if (seek_by_bytes)
925
            is->seek_flags |= AVSEEK_FLAG_BYTE;
926
        is->seek_req = 1;
927
    }
928
}
929

    
930
/* pause or resume the video */
931
static void stream_pause(VideoState *is)
932
{
933
    is->paused = !is->paused;
934
    if (is->paused) {
935
        is->video_current_pts = get_video_clock(is);
936
    }
937
}
938

    
939
/* called to display each frame */
940
static void video_refresh_timer(void *opaque)
941
{
942
    VideoState *is = opaque;
943
    VideoPicture *vp;
944
    double actual_delay, delay, sync_threshold, ref_clock, diff;
945

    
946
    SubPicture *sp, *sp2;
947

    
948
    if (is->video_st) {
949
        if (is->pictq_size == 0) {
950
            /* if no picture, need to wait */
951
            schedule_refresh(is, 1);
952
        } else {
953
            /* dequeue the picture */
954
            vp = &is->pictq[is->pictq_rindex];
955

    
956
            /* update current video pts */
957
            is->video_current_pts = vp->pts;
958
            is->video_current_pts_time = av_gettime();
959

    
960
            /* compute nominal delay */
961
            delay = vp->pts - is->frame_last_pts;
962
            if (delay <= 0 || delay >= 1.0) {
963
                /* if incorrect delay, use previous one */
964
                delay = is->frame_last_delay;
965
            }
966
            is->frame_last_delay = delay;
967
            is->frame_last_pts = vp->pts;
968

    
969
            /* update delay to follow master synchronisation source */
970
            if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
971
                 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
972
                /* if video is slave, we try to correct big delays by
973
                   duplicating or deleting a frame */
974
                ref_clock = get_master_clock(is);
975
                diff = vp->pts - ref_clock;
976

    
977
                /* skip or repeat frame. We take into account the
978
                   delay to compute the threshold. I still don't know
979
                   if it is the best guess */
980
                sync_threshold = AV_SYNC_THRESHOLD;
981
                if (delay > sync_threshold)
982
                    sync_threshold = delay;
983
                if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
984
                    if (diff <= -sync_threshold)
985
                        delay = 0;
986
                    else if (diff >= sync_threshold)
987
                        delay = 2 * delay;
988
                }
989
            }
990

    
991
            is->frame_timer += delay;
992
            /* compute the REAL delay (we need to do that to avoid
993
               long term errors */
994
            actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
995
            if (actual_delay < 0.010) {
996
                /* XXX: should skip picture */
997
                actual_delay = 0.010;
998
            }
999
            /* launch timer for next picture */
1000
            schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1001

    
1002
#if defined(DEBUG_SYNC)
1003
            printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1004
                   delay, actual_delay, vp->pts, -diff);
1005
#endif
1006

    
1007
            if(is->subtitle_st) {
1008
                if (is->subtitle_stream_changed) {
1009
                    SDL_LockMutex(is->subpq_mutex);
1010

    
1011
                    while (is->subpq_size) {
1012
                        free_subpicture(&is->subpq[is->subpq_rindex]);
1013

    
1014
                        /* update queue size and signal for next picture */
1015
                        if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1016
                            is->subpq_rindex = 0;
1017

    
1018
                        is->subpq_size--;
1019
                    }
1020
                    is->subtitle_stream_changed = 0;
1021

    
1022
                    SDL_CondSignal(is->subpq_cond);
1023
                    SDL_UnlockMutex(is->subpq_mutex);
1024
                } else {
1025
                    if (is->subpq_size > 0) {
1026
                        sp = &is->subpq[is->subpq_rindex];
1027

    
1028
                        if (is->subpq_size > 1)
1029
                            sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1030
                        else
1031
                            sp2 = NULL;
1032

    
1033
                        if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1034
                                || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1035
                        {
1036
                            free_subpicture(sp);
1037

    
1038
                            /* update queue size and signal for next picture */
1039
                            if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1040
                                is->subpq_rindex = 0;
1041

    
1042
                            SDL_LockMutex(is->subpq_mutex);
1043
                            is->subpq_size--;
1044
                            SDL_CondSignal(is->subpq_cond);
1045
                            SDL_UnlockMutex(is->subpq_mutex);
1046
                        }
1047
                    }
1048
                }
1049
            }
1050

    
1051
            /* display picture */
1052
            video_display(is);
1053

    
1054
            /* update queue size and signal for next picture */
1055
            if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1056
                is->pictq_rindex = 0;
1057

    
1058
            SDL_LockMutex(is->pictq_mutex);
1059
            is->pictq_size--;
1060
            SDL_CondSignal(is->pictq_cond);
1061
            SDL_UnlockMutex(is->pictq_mutex);
1062
        }
1063
    } else if (is->audio_st) {
1064
        /* draw the next audio frame */
1065

    
1066
        schedule_refresh(is, 40);
1067

    
1068
        /* if only audio stream, then display the audio bars (better
1069
           than nothing, just to test the implementation */
1070

    
1071
        /* display picture */
1072
        video_display(is);
1073
    } else {
1074
        schedule_refresh(is, 100);
1075
    }
1076
    if (show_status) {
1077
        static int64_t last_time;
1078
        int64_t cur_time;
1079
        int aqsize, vqsize, sqsize;
1080
        double av_diff;
1081

    
1082
        cur_time = av_gettime();
1083
        if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1084
            aqsize = 0;
1085
            vqsize = 0;
1086
            sqsize = 0;
1087
            if (is->audio_st)
1088
                aqsize = is->audioq.size;
1089
            if (is->video_st)
1090
                vqsize = is->videoq.size;
1091
            if (is->subtitle_st)
1092
                sqsize = is->subtitleq.size;
1093
            av_diff = 0;
1094
            if (is->audio_st && is->video_st)
1095
                av_diff = get_audio_clock(is) - get_video_clock(is);
1096
            printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB    \r",
1097
                   get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1098
            fflush(stdout);
1099
            last_time = cur_time;
1100
        }
1101
    }
1102
}
1103

    
1104
/* allocate a picture (needs to do that in main thread to avoid
1105
   potential locking problems */
1106
static void alloc_picture(void *opaque)
1107
{
1108
    VideoState *is = opaque;
1109
    VideoPicture *vp;
1110

    
1111
    vp = &is->pictq[is->pictq_windex];
1112

    
1113
    if (vp->bmp)
1114
        SDL_FreeYUVOverlay(vp->bmp);
1115

    
1116
#if 0
1117
    /* XXX: use generic function */
1118
    /* XXX: disable overlay if no hardware acceleration or if RGB format */
1119
    switch(is->video_st->codec->pix_fmt) {
1120
    case PIX_FMT_YUV420P:
1121
    case PIX_FMT_YUV422P:
1122
    case PIX_FMT_YUV444P:
1123
    case PIX_FMT_YUV422:
1124
    case PIX_FMT_YUV410P:
1125
    case PIX_FMT_YUV411P:
1126
        is_yuv = 1;
1127
        break;
1128
    default:
1129
        is_yuv = 0;
1130
        break;
1131
    }
1132
#endif
1133
    vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1134
                                   is->video_st->codec->height,
1135
                                   SDL_YV12_OVERLAY,
1136
                                   screen);
1137
    vp->width = is->video_st->codec->width;
1138
    vp->height = is->video_st->codec->height;
1139

    
1140
    SDL_LockMutex(is->pictq_mutex);
1141
    vp->allocated = 1;
1142
    SDL_CondSignal(is->pictq_cond);
1143
    SDL_UnlockMutex(is->pictq_mutex);
1144
}
1145

    
1146
/**
1147
 *
1148
 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1149
 */
1150
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1151
{
1152
    VideoPicture *vp;
1153
    int dst_pix_fmt;
1154
    AVPicture pict;
1155
    static struct SwsContext *img_convert_ctx;
1156

    
1157
    /* wait until we have space to put a new picture */
1158
    SDL_LockMutex(is->pictq_mutex);
1159
    while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1160
           !is->videoq.abort_request) {
1161
        SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1162
    }
1163
    SDL_UnlockMutex(is->pictq_mutex);
1164

    
1165
    if (is->videoq.abort_request)
1166
        return -1;
1167

    
1168
    vp = &is->pictq[is->pictq_windex];
1169

    
1170
    /* alloc or resize hardware picture buffer */
1171
    if (!vp->bmp ||
1172
        vp->width != is->video_st->codec->width ||
1173
        vp->height != is->video_st->codec->height) {
1174
        SDL_Event event;
1175

    
1176
        vp->allocated = 0;
1177

    
1178
        /* the allocation must be done in the main thread to avoid
1179
           locking problems */
1180
        event.type = FF_ALLOC_EVENT;
1181
        event.user.data1 = is;
1182
        SDL_PushEvent(&event);
1183

    
1184
        /* wait until the picture is allocated */
1185
        SDL_LockMutex(is->pictq_mutex);
1186
        while (!vp->allocated && !is->videoq.abort_request) {
1187
            SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1188
        }
1189
        SDL_UnlockMutex(is->pictq_mutex);
1190

    
1191
        if (is->videoq.abort_request)
1192
            return -1;
1193
    }
1194

    
1195
    /* if the frame is not skipped, then display it */
1196
    if (vp->bmp) {
1197
        /* get a pointer on the bitmap */
1198
        SDL_LockYUVOverlay (vp->bmp);
1199

    
1200
        dst_pix_fmt = PIX_FMT_YUV420P;
1201
        pict.data[0] = vp->bmp->pixels[0];
1202
        pict.data[1] = vp->bmp->pixels[2];
1203
        pict.data[2] = vp->bmp->pixels[1];
1204

    
1205
        pict.linesize[0] = vp->bmp->pitches[0];
1206
        pict.linesize[1] = vp->bmp->pitches[2];
1207
        pict.linesize[2] = vp->bmp->pitches[1];
1208
        if (img_convert_ctx == NULL) {
1209
            img_convert_ctx = sws_getContext(is->video_st->codec->width,
1210
                    is->video_st->codec->height, is->video_st->codec->pix_fmt,
1211
                    is->video_st->codec->width, is->video_st->codec->height,
1212
                    dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1213
            if (img_convert_ctx == NULL) {
1214
                fprintf(stderr, "Cannot initialize the conversion context\n");
1215
                exit(1);
1216
            }
1217
        }
1218
        sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1219
                  0, is->video_st->codec->height, pict.data, pict.linesize);
1220
        /* update the bitmap content */
1221
        SDL_UnlockYUVOverlay(vp->bmp);
1222

    
1223
        vp->pts = pts;
1224

    
1225
        /* now we can update the picture count */
1226
        if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1227
            is->pictq_windex = 0;
1228
        SDL_LockMutex(is->pictq_mutex);
1229
        is->pictq_size++;
1230
        SDL_UnlockMutex(is->pictq_mutex);
1231
    }
1232
    return 0;
1233
}
1234

    
1235
/**
1236
 * compute the exact PTS for the picture if it is omitted in the stream
1237
 * @param pts1 the dts of the pkt / pts of the frame
1238
 */
1239
static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1240
{
1241
    double frame_delay, pts;
1242

    
1243
    pts = pts1;
1244

    
1245
    if (pts != 0) {
1246
        /* update video clock with pts, if present */
1247
        is->video_clock = pts;
1248
    } else {
1249
        pts = is->video_clock;
1250
    }
1251
    /* update video clock for next frame */
1252
    frame_delay = av_q2d(is->video_st->codec->time_base);
1253
    /* for MPEG2, the frame can be repeated, so we update the
1254
       clock accordingly */
1255
    frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1256
    is->video_clock += frame_delay;
1257

    
1258
#if defined(DEBUG_SYNC) && 0
1259
    {
1260
        int ftype;
1261
        if (src_frame->pict_type == FF_B_TYPE)
1262
            ftype = 'B';
1263
        else if (src_frame->pict_type == FF_I_TYPE)
1264
            ftype = 'I';
1265
        else
1266
            ftype = 'P';
1267
        printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1268
               ftype, pts, pts1);
1269
    }
1270
#endif
1271
    return queue_picture(is, src_frame, pts);
1272
}
1273

    
1274
static int video_thread(void *arg)
1275
{
1276
    VideoState *is = arg;
1277
    AVPacket pkt1, *pkt = &pkt1;
1278
    int len1, got_picture;
1279
    AVFrame *frame= avcodec_alloc_frame();
1280
    double pts;
1281

    
1282
    for(;;) {
1283
        while (is->paused && !is->videoq.abort_request) {
1284
            SDL_Delay(10);
1285
        }
1286
        if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1287
            break;
1288

    
1289
        if(pkt->data == flush_pkt.data){
1290
            avcodec_flush_buffers(is->video_st->codec);
1291
            continue;
1292
        }
1293

    
1294
        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1295
           this packet, if any */
1296
        pts = 0;
1297
        if (pkt->dts != AV_NOPTS_VALUE)
1298
            pts = av_q2d(is->video_st->time_base)*pkt->dts;
1299

    
1300
            len1 = avcodec_decode_video(is->video_st->codec,
1301
                                        frame, &got_picture,
1302
                                        pkt->data, pkt->size);
1303
//            if (len1 < 0)
1304
//                break;
1305
            if (got_picture) {
1306
                if (output_picture2(is, frame, pts) < 0)
1307
                    goto the_end;
1308
            }
1309
        av_free_packet(pkt);
1310
        if (step)
1311
            if (cur_stream)
1312
                stream_pause(cur_stream);
1313
    }
1314
 the_end:
1315
    av_free(frame);
1316
    return 0;
1317
}
1318

    
1319
static int subtitle_thread(void *arg)
1320
{
1321
    VideoState *is = arg;
1322
    SubPicture *sp;
1323
    AVPacket pkt1, *pkt = &pkt1;
1324
    int len1, got_subtitle;
1325
    double pts;
1326
    int i, j;
1327
    int r, g, b, y, u, v, a;
1328

    
1329
    for(;;) {
1330
        while (is->paused && !is->subtitleq.abort_request) {
1331
            SDL_Delay(10);
1332
        }
1333
        if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1334
            break;
1335

    
1336
        if(pkt->data == flush_pkt.data){
1337
            avcodec_flush_buffers(is->subtitle_st->codec);
1338
            continue;
1339
        }
1340
        SDL_LockMutex(is->subpq_mutex);
1341
        while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1342
               !is->subtitleq.abort_request) {
1343
            SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1344
        }
1345
        SDL_UnlockMutex(is->subpq_mutex);
1346

    
1347
        if (is->subtitleq.abort_request)
1348
            goto the_end;
1349

    
1350
        sp = &is->subpq[is->subpq_windex];
1351

    
1352
       /* NOTE: ipts is the PTS of the _first_ picture beginning in
1353
           this packet, if any */
1354
        pts = 0;
1355
        if (pkt->pts != AV_NOPTS_VALUE)
1356
            pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1357

    
1358
        len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1359
                                    &sp->sub, &got_subtitle,
1360
                                    pkt->data, pkt->size);
1361
//            if (len1 < 0)
1362
//                break;
1363
        if (got_subtitle && sp->sub.format == 0) {
1364
            sp->pts = pts;
1365

    
1366
            for (i = 0; i < sp->sub.num_rects; i++)
1367
            {
1368
                for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1369
                {
1370
                    RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1371
                    y = RGB_TO_Y_CCIR(r, g, b);
1372
                    u = RGB_TO_U_CCIR(r, g, b, 0);
1373
                    v = RGB_TO_V_CCIR(r, g, b, 0);
1374
                    YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1375
                }
1376
            }
1377

    
1378
            /* now we can update the picture count */
1379
            if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1380
                is->subpq_windex = 0;
1381
            SDL_LockMutex(is->subpq_mutex);
1382
            is->subpq_size++;
1383
            SDL_UnlockMutex(is->subpq_mutex);
1384
        }
1385
        av_free_packet(pkt);
1386
//        if (step)
1387
//            if (cur_stream)
1388
//                stream_pause(cur_stream);
1389
    }
1390
 the_end:
1391
    return 0;
1392
}
1393

    
1394
/* copy samples for viewing in editor window */
1395
static void update_sample_display(VideoState *is, short *samples, int samples_size)
1396
{
1397
    int size, len, channels;
1398

    
1399
    channels = is->audio_st->codec->channels;
1400

    
1401
    size = samples_size / sizeof(short);
1402
    while (size > 0) {
1403
        len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1404
        if (len > size)
1405
            len = size;
1406
        memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1407
        samples += len;
1408
        is->sample_array_index += len;
1409
        if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1410
            is->sample_array_index = 0;
1411
        size -= len;
1412
    }
1413
}
1414

    
1415
/* return the new audio buffer size (samples can be added or deleted
1416
   to get better sync if video or external master clock) */
1417
static int synchronize_audio(VideoState *is, short *samples,
1418
                             int samples_size1, double pts)
1419
{
1420
    int n, samples_size;
1421
    double ref_clock;
1422

    
1423
    n = 2 * is->audio_st->codec->channels;
1424
    samples_size = samples_size1;
1425

    
1426
    /* if not master, then we try to remove or add samples to correct the clock */
1427
    if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1428
         is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1429
        double diff, avg_diff;
1430
        int wanted_size, min_size, max_size, nb_samples;
1431

    
1432
        ref_clock = get_master_clock(is);
1433
        diff = get_audio_clock(is) - ref_clock;
1434

    
1435
        if (diff < AV_NOSYNC_THRESHOLD) {
1436
            is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1437
            if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1438
                /* not enough measures to have a correct estimate */
1439
                is->audio_diff_avg_count++;
1440
            } else {
1441
                /* estimate the A-V difference */
1442
                avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1443

    
1444
                if (fabs(avg_diff) >= is->audio_diff_threshold) {
1445
                    wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1446
                    nb_samples = samples_size / n;
1447

    
1448
                    min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1449
                    max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1450
                    if (wanted_size < min_size)
1451
                        wanted_size = min_size;
1452
                    else if (wanted_size > max_size)
1453
                        wanted_size = max_size;
1454

    
1455
                    /* add or remove samples to correction the synchro */
1456
                    if (wanted_size < samples_size) {
1457
                        /* remove samples */
1458
                        samples_size = wanted_size;
1459
                    } else if (wanted_size > samples_size) {
1460
                        uint8_t *samples_end, *q;
1461
                        int nb;
1462

    
1463
                        /* add samples */
1464
                        nb = (samples_size - wanted_size);
1465
                        samples_end = (uint8_t *)samples + samples_size - n;
1466
                        q = samples_end + n;
1467
                        while (nb > 0) {
1468
                            memcpy(q, samples_end, n);
1469
                            q += n;
1470
                            nb -= n;
1471
                        }
1472
                        samples_size = wanted_size;
1473
                    }
1474
                }
1475
#if 0
1476
                printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1477
                       diff, avg_diff, samples_size - samples_size1,
1478
                       is->audio_clock, is->video_clock, is->audio_diff_threshold);
1479
#endif
1480
            }
1481
        } else {
1482
            /* too big difference : may be initial PTS errors, so
1483
               reset A-V filter */
1484
            is->audio_diff_avg_count = 0;
1485
            is->audio_diff_cum = 0;
1486
        }
1487
    }
1488

    
1489
    return samples_size;
1490
}
1491

    
1492
/* decode one audio frame and returns its uncompressed size */
1493
static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, double *pts_ptr)
1494
{
1495
    AVPacket *pkt = &is->audio_pkt;
1496
    int n, len1, data_size;
1497
    double pts;
1498

    
1499
    for(;;) {
1500
        /* NOTE: the audio packet can contain several frames */
1501
        while (is->audio_pkt_size > 0) {
1502
            len1 = avcodec_decode_audio(is->audio_st->codec,
1503
                                        (int16_t *)audio_buf, &data_size,
1504
                                        is->audio_pkt_data, is->audio_pkt_size);
1505
            if (len1 < 0) {
1506
                /* if error, we skip the frame */
1507
                is->audio_pkt_size = 0;
1508
                break;
1509
            }
1510

    
1511
            is->audio_pkt_data += len1;
1512
            is->audio_pkt_size -= len1;
1513
            if (data_size <= 0)
1514
                continue;
1515
            /* if no pts, then compute it */
1516
            pts = is->audio_clock;
1517
            *pts_ptr = pts;
1518
            n = 2 * is->audio_st->codec->channels;
1519
            is->audio_clock += (double)data_size /
1520
                (double)(n * is->audio_st->codec->sample_rate);
1521
#if defined(DEBUG_SYNC)
1522
            {
1523
                static double last_clock;
1524
                printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1525
                       is->audio_clock - last_clock,
1526
                       is->audio_clock, pts);
1527
                last_clock = is->audio_clock;
1528
            }
1529
#endif
1530
            return data_size;
1531
        }
1532

    
1533
        /* free the current packet */
1534
        if (pkt->data)
1535
            av_free_packet(pkt);
1536

    
1537
        if (is->paused || is->audioq.abort_request) {
1538
            return -1;
1539
        }
1540

    
1541
        /* read next packet */
1542
        if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1543
            return -1;
1544
        if(pkt->data == flush_pkt.data){
1545
            avcodec_flush_buffers(is->audio_st->codec);
1546
            continue;
1547
        }
1548

    
1549
        is->audio_pkt_data = pkt->data;
1550
        is->audio_pkt_size = pkt->size;
1551

    
1552
        /* if update the audio clock with the pts */
1553
        if (pkt->pts != AV_NOPTS_VALUE) {
1554
            is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1555
        }
1556
    }
1557
}
1558

    
1559
/* get the current audio output buffer size, in samples. With SDL, we
1560
   cannot have a precise information */
1561
static int audio_write_get_buf_size(VideoState *is)
1562
{
1563
    return is->audio_hw_buf_size - is->audio_buf_index;
1564
}
1565

    
1566

    
1567
/* prepare a new audio buffer */
1568
void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1569
{
1570
    VideoState *is = opaque;
1571
    int audio_size, len1;
1572
    double pts;
1573

    
1574
    audio_callback_time = av_gettime();
1575

    
1576
    while (len > 0) {
1577
        if (is->audio_buf_index >= is->audio_buf_size) {
1578
           audio_size = audio_decode_frame(is, is->audio_buf, &pts);
1579
           if (audio_size < 0) {
1580
                /* if error, just output silence */
1581
               is->audio_buf_size = 1024;
1582
               memset(is->audio_buf, 0, is->audio_buf_size);
1583
           } else {
1584
               if (is->show_audio)
1585
                   update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1586
               audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1587
                                              pts);
1588
               is->audio_buf_size = audio_size;
1589
           }
1590
           is->audio_buf_index = 0;
1591
        }
1592
        len1 = is->audio_buf_size - is->audio_buf_index;
1593
        if (len1 > len)
1594
            len1 = len;
1595
        memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1596
        len -= len1;
1597
        stream += len1;
1598
        is->audio_buf_index += len1;
1599
    }
1600
}
1601

    
1602
static int video_open(VideoState *is){
1603
    int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
1604
    int w,h;
1605

    
1606
    if (is_full_screen && fs_screen_width) {
1607
        w = fs_screen_width;
1608
        h = fs_screen_height;
1609
        flags |= SDL_FULLSCREEN;
1610
    } else {
1611
        if(screen_width){
1612
            w = screen_width;
1613
            h = screen_height;
1614
        }else if (is->video_st && is->video_st->codec->width){
1615
            w = is->video_st->codec->width;
1616
            h = is->video_st->codec->height;
1617
        } else {
1618
            w = 640;
1619
            h = 480;
1620
        }
1621
        flags |= SDL_RESIZABLE;
1622
    }
1623
#ifndef CONFIG_DARWIN
1624
    screen = SDL_SetVideoMode(w, h, 0, flags);
1625
#else
1626
    /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
1627
    screen = SDL_SetVideoMode(w, h, 24, flags);
1628
#endif
1629
    if (!screen) {
1630
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
1631
        return -1;
1632
    }
1633
    SDL_WM_SetCaption("FFplay", "FFplay");
1634

    
1635
    is->width = screen->w;
1636
    is->height = screen->h;
1637

    
1638
    return 0;
1639
}
1640

    
1641
/* open a given stream. Return 0 if OK */
1642
static int stream_component_open(VideoState *is, int stream_index)
1643
{
1644
    AVFormatContext *ic = is->ic;
1645
    AVCodecContext *enc;
1646
    AVCodec *codec;
1647
    SDL_AudioSpec wanted_spec, spec;
1648

    
1649
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1650
        return -1;
1651
    enc = ic->streams[stream_index]->codec;
1652

    
1653
    /* prepare audio output */
1654
    if (enc->codec_type == CODEC_TYPE_AUDIO) {
1655
        wanted_spec.freq = enc->sample_rate;
1656
        wanted_spec.format = AUDIO_S16SYS;
1657
        /* hack for AC3. XXX: suppress that */
1658
        if (enc->channels > 2)
1659
            enc->channels = 2;
1660
        wanted_spec.channels = enc->channels;
1661
        wanted_spec.silence = 0;
1662
        wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1663
        wanted_spec.callback = sdl_audio_callback;
1664
        wanted_spec.userdata = is;
1665
        if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1666
            fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1667
            return -1;
1668
        }
1669
        is->audio_hw_buf_size = spec.size;
1670
    }
1671

    
1672
    codec = avcodec_find_decoder(enc->codec_id);
1673
    enc->debug_mv = debug_mv;
1674
    enc->debug = debug;
1675
    enc->workaround_bugs = workaround_bugs;
1676
    enc->lowres = lowres;
1677
    if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1678
    enc->idct_algo= idct;
1679
    if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1680
    enc->skip_frame= skip_frame;
1681
    enc->skip_idct= skip_idct;
1682
    enc->skip_loop_filter= skip_loop_filter;
1683
    enc->error_resilience= error_resilience;
1684
    enc->error_concealment= error_concealment;
1685
    if (!codec ||
1686
        avcodec_open(enc, codec) < 0)
1687
        return -1;
1688
#if defined(HAVE_THREADS)
1689
    if(thread_count>1)
1690
        avcodec_thread_init(enc, thread_count);
1691
#endif
1692
    enc->thread_count= thread_count;
1693
    switch(enc->codec_type) {
1694
    case CODEC_TYPE_AUDIO:
1695
        is->audio_stream = stream_index;
1696
        is->audio_st = ic->streams[stream_index];
1697
        is->audio_buf_size = 0;
1698
        is->audio_buf_index = 0;
1699

    
1700
        /* init averaging filter */
1701
        is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1702
        is->audio_diff_avg_count = 0;
1703
        /* since we do not have a precise anough audio fifo fullness,
1704
           we correct audio sync only if larger than this threshold */
1705
        is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1706

    
1707
        memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1708
        packet_queue_init(&is->audioq);
1709
        SDL_PauseAudio(0);
1710
        break;
1711
    case CODEC_TYPE_VIDEO:
1712
        is->video_stream = stream_index;
1713
        is->video_st = ic->streams[stream_index];
1714

    
1715
        is->frame_last_delay = 40e-3;
1716
        is->frame_timer = (double)av_gettime() / 1000000.0;
1717
        is->video_current_pts_time = av_gettime();
1718

    
1719
        packet_queue_init(&is->videoq);
1720
        is->video_tid = SDL_CreateThread(video_thread, is);
1721
        break;
1722
    case CODEC_TYPE_SUBTITLE:
1723
        is->subtitle_stream = stream_index;
1724
        is->subtitle_st = ic->streams[stream_index];
1725
        packet_queue_init(&is->subtitleq);
1726

    
1727
        is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1728
        break;
1729
    default:
1730
        break;
1731
    }
1732
    return 0;
1733
}
1734

    
1735
static void stream_component_close(VideoState *is, int stream_index)
1736
{
1737
    AVFormatContext *ic = is->ic;
1738
    AVCodecContext *enc;
1739

    
1740
    if (stream_index < 0 || stream_index >= ic->nb_streams)
1741
        return;
1742
    enc = ic->streams[stream_index]->codec;
1743

    
1744
    switch(enc->codec_type) {
1745
    case CODEC_TYPE_AUDIO:
1746
        packet_queue_abort(&is->audioq);
1747

    
1748
        SDL_CloseAudio();
1749

    
1750
        packet_queue_end(&is->audioq);
1751
        break;
1752
    case CODEC_TYPE_VIDEO:
1753
        packet_queue_abort(&is->videoq);
1754

    
1755
        /* note: we also signal this mutex to make sure we deblock the
1756
           video thread in all cases */
1757
        SDL_LockMutex(is->pictq_mutex);
1758
        SDL_CondSignal(is->pictq_cond);
1759
        SDL_UnlockMutex(is->pictq_mutex);
1760

    
1761
        SDL_WaitThread(is->video_tid, NULL);
1762

    
1763
        packet_queue_end(&is->videoq);
1764
        break;
1765
    case CODEC_TYPE_SUBTITLE:
1766
        packet_queue_abort(&is->subtitleq);
1767

    
1768
        /* note: we also signal this mutex to make sure we deblock the
1769
           video thread in all cases */
1770
        SDL_LockMutex(is->subpq_mutex);
1771
        is->subtitle_stream_changed = 1;
1772

    
1773
        SDL_CondSignal(is->subpq_cond);
1774
        SDL_UnlockMutex(is->subpq_mutex);
1775

    
1776
        SDL_WaitThread(is->subtitle_tid, NULL);
1777

    
1778
        packet_queue_end(&is->subtitleq);
1779
        break;
1780
    default:
1781
        break;
1782
    }
1783

    
1784
    avcodec_close(enc);
1785
    switch(enc->codec_type) {
1786
    case CODEC_TYPE_AUDIO:
1787
        is->audio_st = NULL;
1788
        is->audio_stream = -1;
1789
        break;
1790
    case CODEC_TYPE_VIDEO:
1791
        is->video_st = NULL;
1792
        is->video_stream = -1;
1793
        break;
1794
    case CODEC_TYPE_SUBTITLE:
1795
        is->subtitle_st = NULL;
1796
        is->subtitle_stream = -1;
1797
        break;
1798
    default:
1799
        break;
1800
    }
1801
}
1802

    
1803
static void dump_stream_info(const AVFormatContext *s)
1804
{
1805
    if (s->track != 0)
1806
        fprintf(stderr, "Track: %d\n", s->track);
1807
    if (s->title[0] != '\0')
1808
        fprintf(stderr, "Title: %s\n", s->title);
1809
    if (s->author[0] != '\0')
1810
        fprintf(stderr, "Author: %s\n", s->author);
1811
    if (s->copyright[0] != '\0')
1812
        fprintf(stderr, "Copyright: %s\n", s->copyright);
1813
    if (s->comment[0] != '\0')
1814
        fprintf(stderr, "Comment: %s\n", s->comment);
1815
    if (s->album[0] != '\0')
1816
        fprintf(stderr, "Album: %s\n", s->album);
1817
    if (s->year != 0)
1818
        fprintf(stderr, "Year: %d\n", s->year);
1819
    if (s->genre[0] != '\0')
1820
        fprintf(stderr, "Genre: %s\n", s->genre);
1821
}
1822

    
1823
/* since we have only one decoding thread, we can use a global
1824
   variable instead of a thread local variable */
1825
static VideoState *global_video_state;
1826

    
1827
static int decode_interrupt_cb(void)
1828
{
1829
    return (global_video_state && global_video_state->abort_request);
1830
}
1831

    
1832
/* this thread gets the stream from the disk or the network */
1833
static int decode_thread(void *arg)
1834
{
1835
    VideoState *is = arg;
1836
    AVFormatContext *ic;
1837
    int err, i, ret, video_index, audio_index, use_play;
1838
    AVPacket pkt1, *pkt = &pkt1;
1839
    AVFormatParameters params, *ap = &params;
1840

    
1841
    video_index = -1;
1842
    audio_index = -1;
1843
    is->video_stream = -1;
1844
    is->audio_stream = -1;
1845
    is->subtitle_stream = -1;
1846

    
1847
    global_video_state = is;
1848
    url_set_interrupt_cb(decode_interrupt_cb);
1849

    
1850
    memset(ap, 0, sizeof(*ap));
1851
    ap->initial_pause = 1; /* we force a pause when starting an RTSP
1852
                              stream */
1853

    
1854
    err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1855
    if (err < 0) {
1856
        print_error(is->filename, err);
1857
        ret = -1;
1858
        goto fail;
1859
    }
1860
    is->ic = ic;
1861
#ifdef CONFIG_NETWORK
1862
    use_play = (ic->iformat == &rtsp_demuxer);
1863
#else
1864
    use_play = 0;
1865
#endif
1866

    
1867
    if(genpts)
1868
        ic->flags |= AVFMT_FLAG_GENPTS;
1869

    
1870
    if (!use_play) {
1871
        err = av_find_stream_info(ic);
1872
        if (err < 0) {
1873
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1874
            ret = -1;
1875
            goto fail;
1876
        }
1877
        ic->pb.eof_reached= 0; //FIXME hack, ffplay maybe shouldnt use url_feof() to test for the end
1878
    }
1879

    
1880
    /* if seeking requested, we execute it */
1881
    if (start_time != AV_NOPTS_VALUE) {
1882
        int64_t timestamp;
1883

    
1884
        timestamp = start_time;
1885
        /* add the stream start time */
1886
        if (ic->start_time != AV_NOPTS_VALUE)
1887
            timestamp += ic->start_time;
1888
        ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1889
        if (ret < 0) {
1890
            fprintf(stderr, "%s: could not seek to position %0.3f\n",
1891
                    is->filename, (double)timestamp / AV_TIME_BASE);
1892
        }
1893
    }
1894

    
1895
    /* now we can begin to play (RTSP stream only) */
1896
    av_read_play(ic);
1897

    
1898
    if (use_play) {
1899
        err = av_find_stream_info(ic);
1900
        if (err < 0) {
1901
            fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1902
            ret = -1;
1903
            goto fail;
1904
        }
1905
    }
1906

    
1907
    for(i = 0; i < ic->nb_streams; i++) {
1908
        AVCodecContext *enc = ic->streams[i]->codec;
1909
        switch(enc->codec_type) {
1910
        case CODEC_TYPE_AUDIO:
1911
            if (audio_index < 0 && !audio_disable)
1912
                audio_index = i;
1913
            break;
1914
        case CODEC_TYPE_VIDEO:
1915
            if (video_index < 0 && !video_disable)
1916
                video_index = i;
1917
            break;
1918
        default:
1919
            break;
1920
        }
1921
    }
1922
    if (show_status) {
1923
        dump_format(ic, 0, is->filename, 0);
1924
        dump_stream_info(ic);
1925
    }
1926

    
1927
    /* open the streams */
1928
    if (audio_index >= 0) {
1929
        stream_component_open(is, audio_index);
1930
    }
1931

    
1932
    if (video_index >= 0) {
1933
        stream_component_open(is, video_index);
1934
    } else {
1935
        if (!display_disable)
1936
            is->show_audio = 1;
1937
    }
1938

    
1939
    if (is->video_stream < 0 && is->audio_stream < 0) {
1940
        fprintf(stderr, "%s: could not open codecs\n", is->filename);
1941
        ret = -1;
1942
        goto fail;
1943
    }
1944

    
1945
    for(;;) {
1946
        if (is->abort_request)
1947
            break;
1948
#ifdef CONFIG_NETWORK
1949
        if (is->paused != is->last_paused) {
1950
            is->last_paused = is->paused;
1951
            if (is->paused)
1952
                av_read_pause(ic);
1953
            else
1954
                av_read_play(ic);
1955
        }
1956
        if (is->paused && ic->iformat == &rtsp_demuxer) {
1957
            /* wait 10 ms to avoid trying to get another packet */
1958
            /* XXX: horrible */
1959
            SDL_Delay(10);
1960
            continue;
1961
        }
1962
#endif
1963
        if (is->seek_req) {
1964
            ret = av_seek_frame(is->ic, -1, is->seek_pos, is->seek_flags);
1965
            if (ret < 0) {
1966
                fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
1967
            }else{
1968
                if (is->audio_stream >= 0) {
1969
                    packet_queue_flush(&is->audioq);
1970
                    packet_queue_put(&is->audioq, &flush_pkt);
1971
                }
1972
                if (is->subtitle_stream >= 0) {
1973
                    packet_queue_flush(&is->subtitleq);
1974
                    packet_queue_put(&is->subtitleq, &flush_pkt);
1975
                }
1976
                if (is->video_stream >= 0) {
1977
                    packet_queue_flush(&is->videoq);
1978
                    packet_queue_put(&is->videoq, &flush_pkt);
1979
                }
1980
            }
1981
            is->seek_req = 0;
1982
        }
1983

    
1984
        /* if the queue are full, no need to read more */
1985
        if (is->audioq.size > MAX_AUDIOQ_SIZE ||
1986
            is->videoq.size > MAX_VIDEOQ_SIZE ||
1987
            is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
1988
            url_feof(&ic->pb)) {
1989
            /* wait 10 ms */
1990
            SDL_Delay(10);
1991
            continue;
1992
        }
1993
        ret = av_read_frame(ic, pkt);
1994
        if (ret < 0) {
1995
            if (url_ferror(&ic->pb) == 0) {
1996
                SDL_Delay(100); /* wait for user event */
1997
                continue;
1998
            } else
1999
                break;
2000
        }
2001
        if (pkt->stream_index == is->audio_stream) {
2002
            packet_queue_put(&is->audioq, pkt);
2003
        } else if (pkt->stream_index == is->video_stream) {
2004
            packet_queue_put(&is->videoq, pkt);
2005
        } else if (pkt->stream_index == is->subtitle_stream) {
2006
            packet_queue_put(&is->subtitleq, pkt);
2007
        } else {
2008
            av_free_packet(pkt);
2009
        }
2010
    }
2011
    /* wait until the end */
2012
    while (!is->abort_request) {
2013
        SDL_Delay(100);
2014
    }
2015

    
2016
    ret = 0;
2017
 fail:
2018
    /* disable interrupting */
2019
    global_video_state = NULL;
2020

    
2021
    /* close each stream */
2022
    if (is->audio_stream >= 0)
2023
        stream_component_close(is, is->audio_stream);
2024
    if (is->video_stream >= 0)
2025
        stream_component_close(is, is->video_stream);
2026
    if (is->subtitle_stream >= 0)
2027
        stream_component_close(is, is->subtitle_stream);
2028
    if (is->ic) {
2029
        av_close_input_file(is->ic);
2030
        is->ic = NULL; /* safety */
2031
    }
2032
    url_set_interrupt_cb(NULL);
2033

    
2034
    if (ret != 0) {
2035
        SDL_Event event;
2036

    
2037
        event.type = FF_QUIT_EVENT;
2038
        event.user.data1 = is;
2039
        SDL_PushEvent(&event);
2040
    }
2041
    return 0;
2042
}
2043

    
2044
static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2045
{
2046
    VideoState *is;
2047

    
2048
    is = av_mallocz(sizeof(VideoState));
2049
    if (!is)
2050
        return NULL;
2051
    pstrcpy(is->filename, sizeof(is->filename), filename);
2052
    is->iformat = iformat;
2053
    is->ytop = 0;
2054
    is->xleft = 0;
2055

    
2056
    /* start video display */
2057
    is->pictq_mutex = SDL_CreateMutex();
2058
    is->pictq_cond = SDL_CreateCond();
2059

    
2060
    is->subpq_mutex = SDL_CreateMutex();
2061
    is->subpq_cond = SDL_CreateCond();
2062

    
2063
    /* add the refresh timer to draw the picture */
2064
    schedule_refresh(is, 40);
2065

    
2066
    is->av_sync_type = av_sync_type;
2067
    is->parse_tid = SDL_CreateThread(decode_thread, is);
2068
    if (!is->parse_tid) {
2069
        av_free(is);
2070
        return NULL;
2071
    }
2072
    return is;
2073
}
2074

    
2075
static void stream_close(VideoState *is)
2076
{
2077
    VideoPicture *vp;
2078
    int i;
2079
    /* XXX: use a special url_shutdown call to abort parse cleanly */
2080
    is->abort_request = 1;
2081
    SDL_WaitThread(is->parse_tid, NULL);
2082

    
2083
    /* free all pictures */
2084
    for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2085
        vp = &is->pictq[i];
2086
        if (vp->bmp) {
2087
            SDL_FreeYUVOverlay(vp->bmp);
2088
            vp->bmp = NULL;
2089
        }
2090
    }
2091
    SDL_DestroyMutex(is->pictq_mutex);
2092
    SDL_DestroyCond(is->pictq_cond);
2093
    SDL_DestroyMutex(is->subpq_mutex);
2094
    SDL_DestroyCond(is->subpq_cond);
2095
}
2096

    
2097
static void stream_cycle_channel(VideoState *is, int codec_type)
2098
{
2099
    AVFormatContext *ic = is->ic;
2100
    int start_index, stream_index;
2101
    AVStream *st;
2102

    
2103
    if (codec_type == CODEC_TYPE_VIDEO)
2104
        start_index = is->video_stream;
2105
    else if (codec_type == CODEC_TYPE_AUDIO)
2106
        start_index = is->audio_stream;
2107
    else
2108
        start_index = is->subtitle_stream;
2109
    if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2110
        return;
2111
    stream_index = start_index;
2112
    for(;;) {
2113
        if (++stream_index >= is->ic->nb_streams)
2114
        {
2115
            if (codec_type == CODEC_TYPE_SUBTITLE)
2116
            {
2117
                stream_index = -1;
2118
                goto the_end;
2119
            } else
2120
                stream_index = 0;
2121
        }
2122
        if (stream_index == start_index)
2123
            return;
2124
        st = ic->streams[stream_index];
2125
        if (st->codec->codec_type == codec_type) {
2126
            /* check that parameters are OK */
2127
            switch(codec_type) {
2128
            case CODEC_TYPE_AUDIO:
2129
                if (st->codec->sample_rate != 0 &&
2130
                    st->codec->channels != 0)
2131
                    goto the_end;
2132
                break;
2133
            case CODEC_TYPE_VIDEO:
2134
            case CODEC_TYPE_SUBTITLE:
2135
                goto the_end;
2136
            default:
2137
                break;
2138
            }
2139
        }
2140
    }
2141
 the_end:
2142
    stream_component_close(is, start_index);
2143
    stream_component_open(is, stream_index);
2144
}
2145

    
2146

    
2147
static void toggle_full_screen(void)
2148
{
2149
    is_full_screen = !is_full_screen;
2150
    if (!fs_screen_width) {
2151
        /* use default SDL method */
2152
        SDL_WM_ToggleFullScreen(screen);
2153
    } else {
2154
        /* use the recorded resolution */
2155
        video_open(cur_stream);
2156
    }
2157
}
2158

    
2159
static void toggle_pause(void)
2160
{
2161
    if (cur_stream)
2162
        stream_pause(cur_stream);
2163
    step = 0;
2164
}
2165

    
2166
static void step_to_next_frame(void)
2167
{
2168
    if (cur_stream) {
2169
        if (cur_stream->paused)
2170
            cur_stream->paused=0;
2171
        cur_stream->video_current_pts = get_video_clock(cur_stream);
2172
    }
2173
    step = 1;
2174
}
2175

    
2176
static void do_exit(void)
2177
{
2178
    if (cur_stream) {
2179
        stream_close(cur_stream);
2180
        cur_stream = NULL;
2181
    }
2182
    if (show_status)
2183
        printf("\n");
2184
    SDL_Quit();
2185
    exit(0);
2186
}
2187

    
2188
static void toggle_audio_display(void)
2189
{
2190
    if (cur_stream) {
2191
        cur_stream->show_audio = !cur_stream->show_audio;
2192
    }
2193
}
2194

    
2195
/* handle an event sent by the GUI */
2196
static void event_loop(void)
2197
{
2198
    SDL_Event event;
2199
    double incr, pos, frac;
2200

    
2201
    for(;;) {
2202
        SDL_WaitEvent(&event);
2203
        switch(event.type) {
2204
        case SDL_KEYDOWN:
2205
            switch(event.key.keysym.sym) {
2206
            case SDLK_ESCAPE:
2207
            case SDLK_q:
2208
                do_exit();
2209
                break;
2210
            case SDLK_f:
2211
                toggle_full_screen();
2212
                break;
2213
            case SDLK_p:
2214
            case SDLK_SPACE:
2215
                toggle_pause();
2216
                break;
2217
            case SDLK_s: //S: Step to next frame
2218
                step_to_next_frame();
2219
                break;
2220
            case SDLK_a:
2221
                if (cur_stream)
2222
                    stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2223
                break;
2224
            case SDLK_v:
2225
                if (cur_stream)
2226
                    stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2227
                break;
2228
            case SDLK_t:
2229
                if (cur_stream)
2230
                    stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2231
                break;
2232
            case SDLK_w:
2233
                toggle_audio_display();
2234
                break;
2235
            case SDLK_LEFT:
2236
                incr = -10.0;
2237
                goto do_seek;
2238
            case SDLK_RIGHT:
2239
                incr = 10.0;
2240
                goto do_seek;
2241
            case SDLK_UP:
2242
                incr = 60.0;
2243
                goto do_seek;
2244
            case SDLK_DOWN:
2245
                incr = -60.0;
2246
            do_seek:
2247
                if (cur_stream) {
2248
                    if (seek_by_bytes) {
2249
                        pos = url_ftell(&cur_stream->ic->pb);
2250
                        if (cur_stream->ic->bit_rate)
2251
                            incr *= cur_stream->ic->bit_rate / 60.0;
2252
                        else
2253
                            incr *= 180000.0;
2254
                        pos += incr;
2255
                        stream_seek(cur_stream, pos, incr);
2256
                    } else {
2257
                        pos = get_master_clock(cur_stream);
2258
                        pos += incr;
2259
                        stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2260
                    }
2261
                }
2262
                break;
2263
            default:
2264
                break;
2265
            }
2266
            break;
2267
        case SDL_MOUSEBUTTONDOWN:
2268
            if (cur_stream) {
2269
                int ns, hh, mm, ss;
2270
                int tns, thh, tmm, tss;
2271
                tns = cur_stream->ic->duration/1000000LL;
2272
                thh = tns/3600;
2273
                tmm = (tns%3600)/60;
2274
                tss = (tns%60);
2275
                frac = (double)event.button.x/(double)cur_stream->width;
2276
                ns = frac*tns;
2277
                hh = ns/3600;
2278
                mm = (ns%3600)/60;
2279
                ss = (ns%60);
2280
                fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2281
                        hh, mm, ss, thh, tmm, tss);
2282
                stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2283
            }
2284
            break;
2285
        case SDL_VIDEORESIZE:
2286
            if (cur_stream) {
2287
                screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2288
                                          SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2289
                cur_stream->width = event.resize.w;
2290
                cur_stream->height = event.resize.h;
2291
            }
2292
            break;
2293
        case SDL_QUIT:
2294
        case FF_QUIT_EVENT:
2295
            do_exit();
2296
            break;
2297
        case FF_ALLOC_EVENT:
2298
            video_open(event.user.data1);
2299
            alloc_picture(event.user.data1);
2300
            break;
2301
        case FF_REFRESH_EVENT:
2302
            video_refresh_timer(event.user.data1);
2303
            break;
2304
        default:
2305
            break;
2306
        }
2307
    }
2308
}
2309

    
2310
void opt_width(const char *arg)
2311
{
2312
    screen_width = atoi(arg);
2313
}
2314

    
2315
void opt_height(const char *arg)
2316
{
2317
    screen_height = atoi(arg);
2318
}
2319

    
2320
static void opt_format(const char *arg)
2321
{
2322
    file_iformat = av_find_input_format(arg);
2323
    if (!file_iformat) {
2324
        fprintf(stderr, "Unknown input format: %s\n", arg);
2325
        exit(1);
2326
    }
2327
}
2328

    
2329
#ifdef CONFIG_NETWORK
2330
void opt_rtp_tcp(void)
2331
{
2332
    /* only tcp protocol */
2333
    rtsp_default_protocols = (1 << RTSP_PROTOCOL_RTP_TCP);
2334
}
2335
#endif
2336

    
2337
void opt_sync(const char *arg)
2338
{
2339
    if (!strcmp(arg, "audio"))
2340
        av_sync_type = AV_SYNC_AUDIO_MASTER;
2341
    else if (!strcmp(arg, "video"))
2342
        av_sync_type = AV_SYNC_VIDEO_MASTER;
2343
    else if (!strcmp(arg, "ext"))
2344
        av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2345
    else
2346
        show_help();
2347
}
2348

    
2349
void opt_seek(const char *arg)
2350
{
2351
    start_time = parse_date(arg, 1);
2352
}
2353

    
2354
static void opt_debug(const char *arg)
2355
{
2356
    av_log_set_level(99);
2357
    debug = atoi(arg);
2358
}
2359

    
2360
static void opt_vismv(const char *arg)
2361
{
2362
    debug_mv = atoi(arg);
2363
}
2364

    
2365
static void opt_thread_count(const char *arg)
2366
{
2367
    thread_count= atoi(arg);
2368
#if !defined(HAVE_THREADS)
2369
    fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2370
#endif
2371
}
2372

    
2373
const OptionDef options[] = {
2374
    { "h", 0, {(void*)show_help}, "show help" },
2375
    { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2376
    { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2377
    { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2378
    { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2379
    { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2380
    { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2381
    { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2382
    { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2383
    { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2384
    { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2385
    { "debug", HAS_ARG | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2386
    { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2387
    { "vismv", HAS_ARG | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2388
    { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2389
    { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2390
    { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2391
    { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2392
    { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2393
    { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2394
    { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2395
    { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)",  "threshold" },
2396
    { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2397
#ifdef CONFIG_NETWORK
2398
    { "rtp_tcp", OPT_EXPERT, {(void*)&opt_rtp_tcp}, "force RTP/TCP protocol usage", "" },
2399
#endif
2400
    { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2401
    { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2402
    { NULL, },
2403
};
2404

    
2405
void show_help(void)
2406
{
2407
    printf("ffplay version " FFMPEG_VERSION ", Copyright (c) 2003-2006 Fabrice Bellard, et al.\n"
2408
           "usage: ffplay [options] input_file\n"
2409
           "Simple media player\n");
2410
    printf("\n");
2411
    show_help_options(options, "Main options:\n",
2412
                      OPT_EXPERT, 0);
2413
    show_help_options(options, "\nAdvanced options:\n",
2414
                      OPT_EXPERT, OPT_EXPERT);
2415
    printf("\nWhile playing:\n"
2416
           "q, ESC              quit\n"
2417
           "f                   toggle full screen\n"
2418
           "p, SPC              pause\n"
2419
           "a                   cycle audio channel\n"
2420
           "v                   cycle video channel\n"
2421
           "t                   cycle subtitle channel\n"
2422
           "w                   show audio waves\n"
2423
           "left/right          seek backward/forward 10 seconds\n"
2424
           "down/up             seek backward/forward 1 minute\n"
2425
           "mouse click         seek to percentage in file corresponding to fraction of width\n"
2426
           );
2427
    exit(1);
2428
}
2429

    
2430
void parse_arg_file(const char *filename)
2431
{
2432
    if (!strcmp(filename, "-"))
2433
                    filename = "pipe:";
2434
    input_filename = filename;
2435
}
2436

    
2437
/* Called from the main */
2438
int main(int argc, char **argv)
2439
{
2440
    int flags;
2441

    
2442
    /* register all codecs, demux and protocols */
2443
    av_register_all();
2444

    
2445
    #ifdef CONFIG_OS2
2446
      MorphToPM(); // Morph the VIO application to a PM one to be able to use Win* functions
2447

    
2448
      // Make stdout and stderr unbuffered
2449
      setbuf( stdout, NULL );
2450
      setbuf( stderr, NULL );
2451
    #endif
2452

    
2453
    parse_options(argc, argv, options);
2454

    
2455
    if (!input_filename)
2456
        show_help();
2457

    
2458
    if (display_disable) {
2459
        video_disable = 1;
2460
    }
2461
    flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2462
#if !defined(__MINGW32__) && !defined(CONFIG_DARWIN)
2463
    flags |= SDL_INIT_EVENTTHREAD; /* Not supported on win32 or darwin */
2464
#endif
2465
    if (SDL_Init (flags)) {
2466
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2467
        exit(1);
2468
    }
2469

    
2470
    if (!display_disable) {
2471
#ifdef HAVE_SDL_VIDEO_SIZE
2472
        const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2473
        fs_screen_width = vi->current_w;
2474
        fs_screen_height = vi->current_h;
2475
#endif
2476
    }
2477

    
2478
    SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2479
    SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2480
    SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2481
    SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2482

    
2483
    av_init_packet(&flush_pkt);
2484
    flush_pkt.data= "FLUSH";
2485

    
2486
    cur_stream = stream_open(input_filename, file_iformat);
2487

    
2488
    event_loop();
2489

    
2490
    /* never returns */
2491

    
2492
    return 0;
2493
}