Statistics
| Branch: | Revision:

ffmpeg / libavcodec / mpegvideo.c @ 6b56c616

History | View | Annotate | Download (166 KB)

1
/*
2
 * The simplest mpeg encoder (well, it was the simplest!)
3
 * Copyright (c) 2000,2001 Fabrice Bellard.
4
 *
5
 * This library is free software; you can redistribute it and/or
6
 * modify it under the terms of the GNU Lesser General Public
7
 * License as published by the Free Software Foundation; either
8
 * version 2 of the License, or (at your option) any later version.
9
 *
10
 * This library is distributed in the hope that it will be useful,
11
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13
 * Lesser General Public License for more details.
14
 *
15
 * You should have received a copy of the GNU Lesser General Public
16
 * License along with this library; if not, write to the Free Software
17
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18
 *
19
 * 4MV & hq & b-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
20
 */
21
 
22
/**
23
 * @file mpegvideo.c
24
 * The simplest mpeg encoder (well, it was the simplest!).
25
 */ 
26
 
27
#include <limits.h>
28
#include "avcodec.h"
29
#include "dsputil.h"
30
#include "mpegvideo.h"
31
#include "faandct.h"
32

    
33
#ifdef USE_FASTMEMCPY
34
#include "fastmemcpy.h"
35
#endif
36

    
37
//#undef NDEBUG
38
//#include <assert.h>
39

    
40
#ifdef CONFIG_ENCODERS
41
static void encode_picture(MpegEncContext *s, int picture_number);
42
#endif //CONFIG_ENCODERS
43
static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
44
                                   DCTELEM *block, int n, int qscale);
45
static void dct_unquantize_mpeg2_c(MpegEncContext *s,
46
                                   DCTELEM *block, int n, int qscale);
47
static void dct_unquantize_h263_c(MpegEncContext *s, 
48
                                  DCTELEM *block, int n, int qscale);
49
static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w);
50
#ifdef CONFIG_ENCODERS
51
static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
52
static int dct_quantize_trellis_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
53
static int sse_mb(MpegEncContext *s);
54
#endif //CONFIG_ENCODERS
55

    
56
#ifdef HAVE_XVMC
57
extern int  XVMC_field_start(MpegEncContext*s, AVCodecContext *avctx);
58
extern void XVMC_field_end(MpegEncContext *s);
59
extern void XVMC_decode_mb(MpegEncContext *s, DCTELEM block[6][64]);
60
#endif
61

    
62
void (*draw_edges)(uint8_t *buf, int wrap, int width, int height, int w)= draw_edges_c;
63

    
64

    
65
/* enable all paranoid tests for rounding, overflows, etc... */
66
//#define PARANOID
67

    
68
//#define DEBUG
69

    
70

    
71
/* for jpeg fast DCT */
72
#define CONST_BITS 14
73

    
74
static const uint16_t aanscales[64] = {
75
    /* precomputed values scaled up by 14 bits */
76
    16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
77
    22725, 31521, 29692, 26722, 22725, 17855, 12299,  6270,
78
    21407, 29692, 27969, 25172, 21407, 16819, 11585,  5906,
79
    19266, 26722, 25172, 22654, 19266, 15137, 10426,  5315,
80
    16384, 22725, 21407, 19266, 16384, 12873,  8867,  4520,
81
    12873, 17855, 16819, 15137, 12873, 10114,  6967,  3552,
82
    8867 , 12299, 11585, 10426,  8867,  6967,  4799,  2446,
83
    4520 ,  6270,  5906,  5315,  4520,  3552,  2446,  1247
84
};
85

    
86
static const uint8_t h263_chroma_roundtab[16] = {
87
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15
88
    0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2,
89
};
90

    
91
#ifdef CONFIG_ENCODERS
92
static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL;
93
static uint8_t default_fcode_tab[MAX_MV*2+1];
94

    
95
enum PixelFormat ff_yuv420p_list[2]= {PIX_FMT_YUV420P, -1};
96

    
97
static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64],
98
                           const uint16_t *quant_matrix, int bias, int qmin, int qmax)
99
{
100
    int qscale;
101

    
102
    for(qscale=qmin; qscale<=qmax; qscale++){
103
        int i;
104
        if (dsp->fdct == ff_jpeg_fdct_islow 
105
#ifdef FAAN_POSTSCALE
106
            || dsp->fdct == ff_faandct
107
#endif
108
            ) {
109
            for(i=0;i<64;i++) {
110
                const int j= dsp->idct_permutation[i];
111
                /* 16 <= qscale * quant_matrix[i] <= 7905 */
112
                /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
113
                /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
114
                /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
115
                
116
                qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / 
117
                                (qscale * quant_matrix[j]));
118
            }
119
        } else if (dsp->fdct == fdct_ifast
120
#ifndef FAAN_POSTSCALE
121
                   || dsp->fdct == ff_faandct
122
#endif
123
                   ) {
124
            for(i=0;i<64;i++) {
125
                const int j= dsp->idct_permutation[i];
126
                /* 16 <= qscale * quant_matrix[i] <= 7905 */
127
                /* 19952         <= aanscales[i] * qscale * quant_matrix[i]           <= 249205026 */
128
                /* (1<<36)/19952 >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= (1<<36)/249205026 */
129
                /* 3444240       >= (1<<36)/(aanscales[i] * qscale * quant_matrix[i]) >= 275 */
130
                
131
                qmat[qscale][i] = (int)((uint64_t_C(1) << (QMAT_SHIFT + 14)) / 
132
                                (aanscales[i] * qscale * quant_matrix[j]));
133
            }
134
        } else {
135
            for(i=0;i<64;i++) {
136
                const int j= dsp->idct_permutation[i];
137
                /* We can safely suppose that 16 <= quant_matrix[i] <= 255
138
                   So 16           <= qscale * quant_matrix[i]             <= 7905
139
                   so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905
140
                   so 32768        >= (1<<19) / (qscale * quant_matrix[i]) >= 67
141
                */
142
                qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[j]));
143
//                qmat  [qscale][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]);
144
                qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[j]);
145

    
146
                if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1;
147
                qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]);
148
            }
149
        }
150
    }
151
}
152

    
153
static inline void update_qscale(MpegEncContext *s){
154
    s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
155
    s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax);
156
    
157
    s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
158
}
159
#endif //CONFIG_ENCODERS
160

    
161
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable){
162
    int i;
163
    int end;
164
    
165
    st->scantable= src_scantable;
166

    
167
    for(i=0; i<64; i++){
168
        int j;
169
        j = src_scantable[i];
170
        st->permutated[i] = permutation[j];
171
#ifdef ARCH_POWERPC
172
        st->inverse[j] = i;
173
#endif
174
    }
175
    
176
    end=-1;
177
    for(i=0; i<64; i++){
178
        int j;
179
        j = st->permutated[i];
180
        if(j>end) end=j;
181
        st->raster_end[i]= end;
182
    }
183
}
184

    
185
#ifdef CONFIG_ENCODERS
186
void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix){
187
    int i;
188

    
189
    if(matrix){
190
        put_bits(pb, 1, 1);
191
        for(i=0;i<64;i++) {
192
            put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]);
193
        }
194
    }else
195
        put_bits(pb, 1, 0);
196
}
197
#endif //CONFIG_ENCODERS
198

    
199
/* init common dct for both encoder and decoder */
200
int DCT_common_init(MpegEncContext *s)
201
{
202
    s->dct_unquantize_h263 = dct_unquantize_h263_c;
203
    s->dct_unquantize_mpeg1 = dct_unquantize_mpeg1_c;
204
    s->dct_unquantize_mpeg2 = dct_unquantize_mpeg2_c;
205

    
206
#ifdef CONFIG_ENCODERS
207
    s->dct_quantize= dct_quantize_c;
208
#endif
209
        
210
#ifdef HAVE_MMX
211
    MPV_common_init_mmx(s);
212
#endif
213
#ifdef ARCH_ALPHA
214
    MPV_common_init_axp(s);
215
#endif
216
#ifdef HAVE_MLIB
217
    MPV_common_init_mlib(s);
218
#endif
219
#ifdef HAVE_MMI
220
    MPV_common_init_mmi(s);
221
#endif
222
#ifdef ARCH_ARMV4L
223
    MPV_common_init_armv4l(s);
224
#endif
225
#ifdef ARCH_POWERPC
226
    MPV_common_init_ppc(s);
227
#endif
228

    
229
#ifdef CONFIG_ENCODERS
230
    s->fast_dct_quantize= s->dct_quantize;
231

    
232
    if(s->flags&CODEC_FLAG_TRELLIS_QUANT){
233
        s->dct_quantize= dct_quantize_trellis_c; //move before MPV_common_init_*
234
    }
235

    
236
#endif //CONFIG_ENCODERS
237

    
238
    /* load & permutate scantables
239
       note: only wmv uses differnt ones 
240
    */
241
    ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
242
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
243
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
244
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
245

    
246
    s->picture_structure= PICT_FRAME;
247
    
248
    return 0;
249
}
250

    
251
static void copy_picture(Picture *dst, Picture *src){
252
    *dst = *src;
253
    dst->type= FF_BUFFER_TYPE_COPY;
254
}
255

    
256
/**
257
 * allocates a Picture
258
 * The pixels are allocated/set by calling get_buffer() if shared=0
259
 */
260
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
261
    const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
262
    const int mb_array_size= s->mb_stride*s->mb_height;
263
    int i;
264
    
265
    if(shared){
266
        assert(pic->data[0]);
267
        assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268
        pic->type= FF_BUFFER_TYPE_SHARED;
269
    }else{
270
        int r;
271
        
272
        assert(!pic->data[0]);
273
        
274
        r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
275
        
276
        if(r<0 || !pic->age || !pic->type || !pic->data[0]){
277
            fprintf(stderr, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
278
            return -1;
279
        }
280

    
281
        if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){
282
            fprintf(stderr, "get_buffer() failed (stride changed)\n");
283
            return -1;
284
        }
285

    
286
        if(pic->linesize[1] != pic->linesize[2]){
287
            fprintf(stderr, "get_buffer() failed (uv stride missmatch)\n");
288
            return -1;
289
        }
290

    
291
        s->linesize  = pic->linesize[0];
292
        s->uvlinesize= pic->linesize[1];
293
    }
294
    
295
    if(pic->qscale_table==NULL){
296
        if (s->encoding) {        
297
            CHECKED_ALLOCZ(pic->mb_var   , mb_array_size * sizeof(int16_t))
298
            CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t))
299
            CHECKED_ALLOCZ(pic->mb_mean  , mb_array_size * sizeof(int8_t))
300
            CHECKED_ALLOCZ(pic->mb_cmp_score, mb_array_size * sizeof(int32_t))
301
        }
302

    
303
        CHECKED_ALLOCZ(pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2) //the +2 is for the slice end check
304
        CHECKED_ALLOCZ(pic->qscale_table , mb_array_size * sizeof(uint8_t))
305
        CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num    * sizeof(int))
306
        pic->mb_type= pic->mb_type_base + s->mb_stride+1;
307
        if(s->out_format == FMT_H264){
308
            for(i=0; i<2; i++){
309
                CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
310
                CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
311
            }
312
        }
313
        pic->qstride= s->mb_stride;
314
        CHECKED_ALLOCZ(pic->pan_scan , 1 * sizeof(AVPanScan))
315
    }
316

    
317
    //it might be nicer if the application would keep track of these but it would require a API change
318
    memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
319
    s->prev_pict_types[0]= s->pict_type;
320
    if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE)
321
        pic->age= INT_MAX; // skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway
322
    
323
    return 0;
324
fail: //for the CHECKED_ALLOCZ macro
325
    return -1;
326
}
327

    
328
/**
329
 * deallocates a picture
330
 */
331
static void free_picture(MpegEncContext *s, Picture *pic){
332
    int i;
333

    
334
    if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
335
        s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
336
    }
337

    
338
    av_freep(&pic->mb_var);
339
    av_freep(&pic->mc_mb_var);
340
    av_freep(&pic->mb_mean);
341
    av_freep(&pic->mb_cmp_score);
342
    av_freep(&pic->mbskip_table);
343
    av_freep(&pic->qscale_table);
344
    av_freep(&pic->mb_type_base);
345
    av_freep(&pic->pan_scan);
346
    pic->mb_type= NULL;
347
    for(i=0; i<2; i++){
348
        av_freep(&pic->motion_val[i]);
349
        av_freep(&pic->ref_index[i]);
350
    }
351
    
352
    if(pic->type == FF_BUFFER_TYPE_SHARED){
353
        for(i=0; i<4; i++){
354
            pic->base[i]=
355
            pic->data[i]= NULL;
356
        }
357
        pic->type= 0;        
358
    }
359
}
360

    
361
/* init common structure for both encoder and decoder */
362
int MPV_common_init(MpegEncContext *s)
363
{
364
    int y_size, c_size, yc_size, i, mb_array_size, x, y;
365

    
366
    dsputil_init(&s->dsp, s->avctx);
367
    DCT_common_init(s);
368

    
369
    s->flags= s->avctx->flags;
370

    
371
    s->mb_width  = (s->width  + 15) / 16;
372
    s->mb_height = (s->height + 15) / 16;
373
    s->mb_stride = s->mb_width + 1;
374
    mb_array_size= s->mb_height * s->mb_stride;
375

    
376
    /* set default edge pos, will be overriden in decode_header if needed */
377
    s->h_edge_pos= s->mb_width*16;
378
    s->v_edge_pos= s->mb_height*16;
379

    
380
    s->mb_num = s->mb_width * s->mb_height;
381
    
382
    s->block_wrap[0]=
383
    s->block_wrap[1]=
384
    s->block_wrap[2]=
385
    s->block_wrap[3]= s->mb_width*2 + 2;
386
    s->block_wrap[4]=
387
    s->block_wrap[5]= s->mb_width + 2;
388

    
389
    y_size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
390
    c_size = (s->mb_width + 2) * (s->mb_height + 2);
391
    yc_size = y_size + 2 * c_size;
392

    
393
    /* convert fourcc to upper case */
394
    s->avctx->codec_tag=   toupper( s->avctx->codec_tag     &0xFF)          
395
                        + (toupper((s->avctx->codec_tag>>8 )&0xFF)<<8 )
396
                        + (toupper((s->avctx->codec_tag>>16)&0xFF)<<16) 
397
                        + (toupper((s->avctx->codec_tag>>24)&0xFF)<<24);
398

    
399
    s->avctx->stream_codec_tag=   toupper( s->avctx->stream_codec_tag     &0xFF)          
400
                               + (toupper((s->avctx->stream_codec_tag>>8 )&0xFF)<<8 )
401
                               + (toupper((s->avctx->stream_codec_tag>>16)&0xFF)<<16) 
402
                               + (toupper((s->avctx->stream_codec_tag>>24)&0xFF)<<24);
403

    
404
    CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); //(width + edge + align)*interlaced*MBsize*tolerance
405
    s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17;
406

    
407
    s->avctx->coded_frame= (AVFrame*)&s->current_picture;
408

    
409
    CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) //error ressilience code looks cleaner with this
410
    for(y=0; y<s->mb_height; y++){
411
        for(x=0; x<s->mb_width; x++){
412
            s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
413
        }
414
    }
415
    s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
416
    
417
    if (s->encoding) {
418
        int mv_table_size= s->mb_stride * (s->mb_height+2) + 1;
419

    
420
        /* Allocate MV tables */
421
        CHECKED_ALLOCZ(s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t))
422
        CHECKED_ALLOCZ(s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
423
        CHECKED_ALLOCZ(s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t))
424
        CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
425
        CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t))
426
        CHECKED_ALLOCZ(s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t))
427
        s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
428
        s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
429
        s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
430
        s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
431
        s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
432
        s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
433

    
434
        //FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()
435
        CHECKED_ALLOCZ(s->me.scratchpad,  s->width*2*16*3*sizeof(uint8_t)) 
436
        
437
        CHECKED_ALLOCZ(s->me.map      , ME_MAP_SIZE*sizeof(uint32_t))
438
        CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t))
439

    
440
        if(s->codec_id==CODEC_ID_MPEG4){
441
            CHECKED_ALLOCZ(s->tex_pb_buffer, PB_BUFFER_SIZE);
442
            CHECKED_ALLOCZ(   s->pb2_buffer, PB_BUFFER_SIZE);
443
        }
444
        
445
        if(s->msmpeg4_version){
446
            CHECKED_ALLOCZ(s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int));
447
        }
448
        CHECKED_ALLOCZ(s->avctx->stats_out, 256);
449

    
450
        /* Allocate MB type table */
451
        CHECKED_ALLOCZ(s->mb_type  , mb_array_size * sizeof(uint8_t)) //needed for encoding
452
        
453
        CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int))
454
        
455
        CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int))
456
        CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int))
457
        CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t))
458
        CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t))
459
        CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
460
        CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*))
461
    }
462
    CHECKED_ALLOCZ(s->blocks, 64*6*2 * sizeof(DCTELEM))
463
        
464
    CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture))
465

    
466
    CHECKED_ALLOCZ(s->error_status_table, mb_array_size*sizeof(uint8_t))
467
    
468
    if (s->out_format == FMT_H263 || s->encoding) {
469
        int size;
470

    
471
        /* MV prediction */
472
        size = (2 * s->mb_width + 2) * (2 * s->mb_height + 2);
473
        CHECKED_ALLOCZ(s->motion_val, size * 2 * sizeof(int16_t));
474
    }
475

    
476
    if(s->codec_id==CODEC_ID_MPEG4){
477
        /* interlaced direct mode decoding tables */
478
        CHECKED_ALLOCZ(s->field_mv_table, mb_array_size*2*2 * sizeof(int16_t))
479
        CHECKED_ALLOCZ(s->field_select_table, mb_array_size*2* sizeof(int8_t))
480
    }
481
    if (s->out_format == FMT_H263) {
482
        /* ac values */
483
        CHECKED_ALLOCZ(s->ac_val[0], yc_size * sizeof(int16_t) * 16);
484
        s->ac_val[1] = s->ac_val[0] + y_size;
485
        s->ac_val[2] = s->ac_val[1] + c_size;
486
        
487
        /* cbp values */
488
        CHECKED_ALLOCZ(s->coded_block, y_size);
489
        
490
        /* divx501 bitstream reorder buffer */
491
        CHECKED_ALLOCZ(s->bitstream_buffer, BITSTREAM_BUFFER_SIZE);
492

    
493
        /* cbp, ac_pred, pred_dir */
494
        CHECKED_ALLOCZ(s->cbp_table  , mb_array_size * sizeof(uint8_t))
495
        CHECKED_ALLOCZ(s->pred_dir_table, mb_array_size * sizeof(uint8_t))
496
    }
497
    
498
    if (s->h263_pred || s->h263_plus || !s->encoding) {
499
        /* dc values */
500
        //MN: we need these for error resilience of intra-frames
501
        CHECKED_ALLOCZ(s->dc_val[0], yc_size * sizeof(int16_t));
502
        s->dc_val[1] = s->dc_val[0] + y_size;
503
        s->dc_val[2] = s->dc_val[1] + c_size;
504
        for(i=0;i<yc_size;i++)
505
            s->dc_val[0][i] = 1024;
506
    }
507

    
508
    /* which mb is a intra block */
509
    CHECKED_ALLOCZ(s->mbintra_table, mb_array_size);
510
    memset(s->mbintra_table, 1, mb_array_size);
511
    
512
    /* default structure is frame */
513
    s->picture_structure = PICT_FRAME;
514
    
515
    /* init macroblock skip table */
516
    CHECKED_ALLOCZ(s->mbskip_table, mb_array_size+2);
517
    //Note the +1 is for a quicker mpeg4 slice_end detection
518
    CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
519
    
520
    s->block= s->blocks[0];
521

    
522
    s->parse_context.state= -1;
523

    
524
    s->context_initialized = 1;
525
    return 0;
526
 fail:
527
    MPV_common_end(s);
528
    return -1;
529
}
530

    
531

    
532
//extern int sads;
533

    
534
/* init common structure for both encoder and decoder */
535
void MPV_common_end(MpegEncContext *s)
536
{
537
    int i;
538

    
539
    av_freep(&s->parse_context.buffer);
540
    s->parse_context.buffer_size=0;
541

    
542
    av_freep(&s->mb_type);
543
    av_freep(&s->p_mv_table_base);
544
    av_freep(&s->b_forw_mv_table_base);
545
    av_freep(&s->b_back_mv_table_base);
546
    av_freep(&s->b_bidir_forw_mv_table_base);
547
    av_freep(&s->b_bidir_back_mv_table_base);
548
    av_freep(&s->b_direct_mv_table_base);
549
    s->p_mv_table= NULL;
550
    s->b_forw_mv_table= NULL;
551
    s->b_back_mv_table= NULL;
552
    s->b_bidir_forw_mv_table= NULL;
553
    s->b_bidir_back_mv_table= NULL;
554
    s->b_direct_mv_table= NULL;
555
    
556
    av_freep(&s->motion_val);
557
    av_freep(&s->dc_val[0]);
558
    av_freep(&s->ac_val[0]);
559
    av_freep(&s->coded_block);
560
    av_freep(&s->mbintra_table);
561
    av_freep(&s->cbp_table);
562
    av_freep(&s->pred_dir_table);
563
    av_freep(&s->me.scratchpad);
564
    av_freep(&s->me.map);
565
    av_freep(&s->me.score_map);
566
    
567
    av_freep(&s->mbskip_table);
568
    av_freep(&s->prev_pict_types);
569
    av_freep(&s->bitstream_buffer);
570
    av_freep(&s->tex_pb_buffer);
571
    av_freep(&s->pb2_buffer);
572
    av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
573
    av_freep(&s->field_mv_table);
574
    av_freep(&s->field_select_table);
575
    av_freep(&s->avctx->stats_out);
576
    av_freep(&s->ac_stats);
577
    av_freep(&s->error_status_table);
578
    av_freep(&s->mb_index2xy);
579
    av_freep(&s->lambda_table);
580
    av_freep(&s->q_intra_matrix);
581
    av_freep(&s->q_inter_matrix);
582
    av_freep(&s->q_intra_matrix16);
583
    av_freep(&s->q_inter_matrix16);
584
    av_freep(&s->blocks);
585
    av_freep(&s->input_picture);
586
    av_freep(&s->reordered_input_picture);
587

    
588
    if(s->picture){
589
        for(i=0; i<MAX_PICTURE_COUNT; i++){
590
            free_picture(s, &s->picture[i]);
591
        }
592
    }
593
    av_freep(&s->picture);
594
    avcodec_default_free_buffers(s->avctx);
595
    s->context_initialized = 0;
596
    s->last_picture_ptr=
597
    s->next_picture_ptr=
598
    s->current_picture_ptr= NULL;
599
}
600

    
601
#ifdef CONFIG_ENCODERS
602

    
603
/* init video encoder */
604
int MPV_encode_init(AVCodecContext *avctx)
605
{
606
    MpegEncContext *s = avctx->priv_data;
607
    int i, dummy;
608
    int chroma_h_shift, chroma_v_shift;
609

    
610
    avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
611

    
612
    s->bit_rate = avctx->bit_rate;
613
    s->bit_rate_tolerance = avctx->bit_rate_tolerance;
614
    s->width = avctx->width;
615
    s->height = avctx->height;
616
    if(avctx->gop_size > 600){
617
        fprintf(stderr, "Warning keyframe interval too large! reducing it ...\n");
618
        avctx->gop_size=600;
619
    }
620
    s->gop_size = avctx->gop_size;
621
    s->rtp_mode = avctx->rtp_mode;
622
    s->rtp_payload_size = avctx->rtp_payload_size;
623
    if (avctx->rtp_callback)
624
        s->rtp_callback = avctx->rtp_callback;
625
    s->max_qdiff= avctx->max_qdiff;
626
    s->qcompress= avctx->qcompress;
627
    s->qblur= avctx->qblur;
628
    s->avctx = avctx;
629
    s->flags= avctx->flags;
630
    s->max_b_frames= avctx->max_b_frames;
631
    s->b_frame_strategy= avctx->b_frame_strategy;
632
    s->codec_id= avctx->codec->id;
633
    s->luma_elim_threshold  = avctx->luma_elim_threshold;
634
    s->chroma_elim_threshold= avctx->chroma_elim_threshold;
635
    s->strict_std_compliance= avctx->strict_std_compliance;
636
    s->data_partitioning= avctx->flags & CODEC_FLAG_PART;
637
    s->quarter_sample= (avctx->flags & CODEC_FLAG_QPEL)!=0;
638
    s->mpeg_quant= avctx->mpeg_quant;
639

    
640
    if (s->gop_size <= 1) {
641
        s->intra_only = 1;
642
        s->gop_size = 12;
643
    } else {
644
        s->intra_only = 0;
645
    }
646

    
647
    s->me_method = avctx->me_method;
648

    
649
    /* Fixed QSCALE */
650
    s->fixed_qscale = (avctx->flags & CODEC_FLAG_QSCALE);
651
    
652
    s->adaptive_quant= (   s->avctx->lumi_masking
653
                        || s->avctx->dark_masking
654
                        || s->avctx->temporal_cplx_masking 
655
                        || s->avctx->spatial_cplx_masking
656
                        || s->avctx->p_masking)
657
                       && !s->fixed_qscale;
658
    
659
    s->progressive_sequence= !(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
660

    
661
    if((s->flags & CODEC_FLAG_4MV) && s->codec_id != CODEC_ID_MPEG4){
662
        fprintf(stderr, "4MV not supporetd by codec\n");
663
        return -1;
664
    }
665
    
666
    if(s->quarter_sample && s->codec_id != CODEC_ID_MPEG4){
667
        fprintf(stderr, "qpel not supporetd by codec\n");
668
        return -1;
669
    }
670

    
671
    if(s->data_partitioning && s->codec_id != CODEC_ID_MPEG4){
672
        fprintf(stderr, "data partitioning not supporetd by codec\n");
673
        return -1;
674
    }
675
    
676
    if(s->max_b_frames && s->codec_id != CODEC_ID_MPEG4 && s->codec_id != CODEC_ID_MPEG1VIDEO && s->codec_id != CODEC_ID_MPEG2VIDEO){
677
        fprintf(stderr, "b frames not supporetd by codec\n");
678
        return -1;
679
    }
680
    
681
    if(s->mpeg_quant && s->codec_id != CODEC_ID_MPEG4){ //FIXME mpeg2 uses that too
682
        fprintf(stderr, "mpeg2 style quantization not supporetd by codec\n");
683
        return -1;
684
    }
685
        
686
    if((s->flags & CODEC_FLAG_CBP_RD) && !(s->flags & CODEC_FLAG_TRELLIS_QUANT)){
687
        fprintf(stderr, "CBP RD needs trellis quant\n");
688
        return -1;
689
    }
690

    
691
    if(s->codec_id==CODEC_ID_MJPEG){
692
        s->intra_quant_bias= 1<<(QUANT_BIAS_SHIFT-1); //(a + x/2)/x
693
        s->inter_quant_bias= 0;
694
    }else if(s->mpeg_quant || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO){
695
        s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); //(a + x*3/8)/x
696
        s->inter_quant_bias= 0;
697
    }else{
698
        s->intra_quant_bias=0;
699
        s->inter_quant_bias=-(1<<(QUANT_BIAS_SHIFT-2)); //(a - x/4)/x
700
    }
701
    
702
    if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
703
        s->intra_quant_bias= avctx->intra_quant_bias;
704
    if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS)
705
        s->inter_quant_bias= avctx->inter_quant_bias;
706
        
707
    avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift);
708

    
709
    av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1);
710
    s->time_increment_bits = av_log2(s->time_increment_resolution - 1) + 1;
711

    
712
    switch(avctx->codec->id) {
713
    case CODEC_ID_MPEG1VIDEO:
714
        s->out_format = FMT_MPEG1;
715
        s->low_delay= 0; //s->max_b_frames ? 0 : 1;
716
        avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
717
        break;
718
    case CODEC_ID_MPEG2VIDEO:
719
        s->out_format = FMT_MPEG1;
720
        s->low_delay= 0; //s->max_b_frames ? 0 : 1;
721
        avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
722
        s->rtp_mode= 1; // mpeg2 must have slices
723
        if(s->rtp_payload_size == 0) s->rtp_payload_size= 256*256*256;
724
        break;
725
    case CODEC_ID_LJPEG:
726
    case CODEC_ID_MJPEG:
727
        s->out_format = FMT_MJPEG;
728
        s->intra_only = 1; /* force intra only for jpeg */
729
        s->mjpeg_write_tables = 1; /* write all tables */
730
        s->mjpeg_data_only_frames = 0; /* write all the needed headers */
731
        s->mjpeg_vsample[0] = 1<<chroma_v_shift;
732
        s->mjpeg_vsample[1] = 1;
733
        s->mjpeg_vsample[2] = 1; 
734
        s->mjpeg_hsample[0] = 1<<chroma_h_shift;
735
        s->mjpeg_hsample[1] = 1; 
736
        s->mjpeg_hsample[2] = 1; 
737
        if (mjpeg_init(s) < 0)
738
            return -1;
739
        avctx->delay=0;
740
        s->low_delay=1;
741
        break;
742
#ifdef CONFIG_RISKY
743
    case CODEC_ID_H263:
744
        if (h263_get_picture_format(s->width, s->height) == 7) {
745
            printf("Input picture size isn't suitable for h263 codec! try h263+\n");
746
            return -1;
747
        }
748
        s->out_format = FMT_H263;
749
        avctx->delay=0;
750
        s->low_delay=1;
751
        break;
752
    case CODEC_ID_H263P:
753
        s->out_format = FMT_H263;
754
        s->h263_plus = 1;
755
        /* Fx */
756
        s->unrestricted_mv=(avctx->flags & CODEC_FLAG_H263P_UMV) ? 1:0;
757
        s->h263_aic= (avctx->flags & CODEC_FLAG_H263P_AIC) ? 1:0;
758
        /* /Fx */
759
        /* These are just to be sure */
760
        s->umvplus = 1;
761
        avctx->delay=0;
762
        s->low_delay=1;
763
        break;
764
    case CODEC_ID_FLV1:
765
        s->out_format = FMT_H263;
766
        s->h263_flv = 2; /* format = 1; 11-bit codes */
767
        s->unrestricted_mv = 1;
768
        s->rtp_mode=0; /* don't allow GOB */
769
        avctx->delay=0;
770
        s->low_delay=1;
771
        break;
772
    case CODEC_ID_RV10:
773
        s->out_format = FMT_H263;
774
        s->h263_rv10 = 1;
775
        avctx->delay=0;
776
        s->low_delay=1;
777
        break;
778
    case CODEC_ID_MPEG4:
779
        s->out_format = FMT_H263;
780
        s->h263_pred = 1;
781
        s->unrestricted_mv = 1;
782
        s->low_delay= s->max_b_frames ? 0 : 1;
783
        avctx->delay= s->low_delay ? 0 : (s->max_b_frames + 1);
784
        break;
785
    case CODEC_ID_MSMPEG4V1:
786
        s->out_format = FMT_H263;
787
        s->h263_msmpeg4 = 1;
788
        s->h263_pred = 1;
789
        s->unrestricted_mv = 1;
790
        s->msmpeg4_version= 1;
791
        avctx->delay=0;
792
        s->low_delay=1;
793
        break;
794
    case CODEC_ID_MSMPEG4V2:
795
        s->out_format = FMT_H263;
796
        s->h263_msmpeg4 = 1;
797
        s->h263_pred = 1;
798
        s->unrestricted_mv = 1;
799
        s->msmpeg4_version= 2;
800
        avctx->delay=0;
801
        s->low_delay=1;
802
        break;
803
    case CODEC_ID_MSMPEG4V3:
804
        s->out_format = FMT_H263;
805
        s->h263_msmpeg4 = 1;
806
        s->h263_pred = 1;
807
        s->unrestricted_mv = 1;
808
        s->msmpeg4_version= 3;
809
        s->flipflop_rounding=1;
810
        avctx->delay=0;
811
        s->low_delay=1;
812
        break;
813
    case CODEC_ID_WMV1:
814
        s->out_format = FMT_H263;
815
        s->h263_msmpeg4 = 1;
816
        s->h263_pred = 1;
817
        s->unrestricted_mv = 1;
818
        s->msmpeg4_version= 4;
819
        s->flipflop_rounding=1;
820
        avctx->delay=0;
821
        s->low_delay=1;
822
        break;
823
    case CODEC_ID_WMV2:
824
        s->out_format = FMT_H263;
825
        s->h263_msmpeg4 = 1;
826
        s->h263_pred = 1;
827
        s->unrestricted_mv = 1;
828
        s->msmpeg4_version= 5;
829
        s->flipflop_rounding=1;
830
        avctx->delay=0;
831
        s->low_delay=1;
832
        break;
833
#endif
834
    default:
835
        return -1;
836
    }
837
    
838
    { /* set up some save defaults, some codecs might override them later */
839
        static int done=0;
840
        if(!done){
841
            int i;
842
            done=1;
843

    
844
            default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) );
845
            memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1));
846
            memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1));
847

    
848
            for(i=-16; i<16; i++){
849
                default_fcode_tab[i + MAX_MV]= 1;
850
            }
851
        }
852
    }
853
    s->me.mv_penalty= default_mv_penalty;
854
    s->fcode_tab= default_fcode_tab;
855
    s->y_dc_scale_table=
856
    s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
857
 
858
    /* dont use mv_penalty table for crap MV as it would be confused */
859
    //FIXME remove after fixing / removing old ME
860
    if (s->me_method < ME_EPZS) s->me.mv_penalty = default_mv_penalty;
861

    
862
    s->encoding = 1;
863

    
864
    /* init */
865
    if (MPV_common_init(s) < 0)
866
        return -1;
867
    
868
    ff_init_me(s);
869

    
870
#ifdef CONFIG_ENCODERS
871
#ifdef CONFIG_RISKY
872
    if (s->out_format == FMT_H263)
873
        h263_encode_init(s);
874
    if(s->msmpeg4_version)
875
        ff_msmpeg4_encode_init(s);
876
#endif
877
    if (s->out_format == FMT_MPEG1)
878
        ff_mpeg1_encode_init(s);
879
#endif
880

    
881
    /* init default q matrix */
882
    for(i=0;i<64;i++) {
883
        int j= s->dsp.idct_permutation[i];
884
#ifdef CONFIG_RISKY
885
        if(s->codec_id==CODEC_ID_MPEG4 && s->mpeg_quant){
886
            s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
887
            s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
888
        }else if(s->out_format == FMT_H263){
889
            s->intra_matrix[j] =
890
            s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
891
        }else
892
#endif
893
        { /* mpeg1/2 */
894
            s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
895
            s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
896
        }
897
        if(s->avctx->intra_matrix)
898
            s->intra_matrix[j] = s->avctx->intra_matrix[i];
899
        if(s->avctx->inter_matrix)
900
            s->inter_matrix[j] = s->avctx->inter_matrix[i];
901
    }
902

    
903
    /* precompute matrix */
904
    /* for mjpeg, we do include qscale in the matrix */
905
    if (s->out_format != FMT_MJPEG) {
906
        convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, 
907
                       s->intra_matrix, s->intra_quant_bias, 1, 31);
908
        convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, 
909
                       s->inter_matrix, s->inter_quant_bias, 1, 31);
910
    }
911

    
912
    if(ff_rate_control_init(s) < 0)
913
        return -1;
914

    
915
    s->picture_number = 0;
916
    s->picture_in_gop_number = 0;
917
    s->fake_picture_number = 0;
918
    /* motion detector init */
919
    s->f_code = 1;
920
    s->b_code = 1;
921

    
922
    return 0;
923
}
924

    
925
int MPV_encode_end(AVCodecContext *avctx)
926
{
927
    MpegEncContext *s = avctx->priv_data;
928

    
929
#ifdef STATS
930
    print_stats();
931
#endif
932

    
933
    ff_rate_control_uninit(s);
934

    
935
    MPV_common_end(s);
936
    if (s->out_format == FMT_MJPEG)
937
        mjpeg_close(s);
938

    
939
    av_freep(&avctx->extradata);
940
      
941
    return 0;
942
}
943

    
944
#endif //CONFIG_ENCODERS
945

    
946
void init_rl(RLTable *rl)
947
{
948
    int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
949
    uint8_t index_run[MAX_RUN+1];
950
    int last, run, level, start, end, i;
951

    
952
    /* compute max_level[], max_run[] and index_run[] */
953
    for(last=0;last<2;last++) {
954
        if (last == 0) {
955
            start = 0;
956
            end = rl->last;
957
        } else {
958
            start = rl->last;
959
            end = rl->n;
960
        }
961

    
962
        memset(max_level, 0, MAX_RUN + 1);
963
        memset(max_run, 0, MAX_LEVEL + 1);
964
        memset(index_run, rl->n, MAX_RUN + 1);
965
        for(i=start;i<end;i++) {
966
            run = rl->table_run[i];
967
            level = rl->table_level[i];
968
            if (index_run[run] == rl->n)
969
                index_run[run] = i;
970
            if (level > max_level[run])
971
                max_level[run] = level;
972
            if (run > max_run[level])
973
                max_run[level] = run;
974
        }
975
        rl->max_level[last] = av_malloc(MAX_RUN + 1);
976
        memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
977
        rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
978
        memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
979
        rl->index_run[last] = av_malloc(MAX_RUN + 1);
980
        memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
981
    }
982
}
983

    
984
/* draw the edges of width 'w' of an image of size width, height */
985
//FIXME check that this is ok for mpeg4 interlaced
986
static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
987
{
988
    uint8_t *ptr, *last_line;
989
    int i;
990

    
991
    last_line = buf + (height - 1) * wrap;
992
    for(i=0;i<w;i++) {
993
        /* top and bottom */
994
        memcpy(buf - (i + 1) * wrap, buf, width);
995
        memcpy(last_line + (i + 1) * wrap, last_line, width);
996
    }
997
    /* left and right */
998
    ptr = buf;
999
    for(i=0;i<height;i++) {
1000
        memset(ptr - w, ptr[0], w);
1001
        memset(ptr + width, ptr[width-1], w);
1002
        ptr += wrap;
1003
    }
1004
    /* corners */
1005
    for(i=0;i<w;i++) {
1006
        memset(buf - (i + 1) * wrap - w, buf[0], w); /* top left */
1007
        memset(buf - (i + 1) * wrap + width, buf[width-1], w); /* top right */
1008
        memset(last_line + (i + 1) * wrap - w, last_line[0], w); /* top left */
1009
        memset(last_line + (i + 1) * wrap + width, last_line[width-1], w); /* top right */
1010
    }
1011
}
1012

    
1013
static int find_unused_picture(MpegEncContext *s, int shared){
1014
    int i;
1015
    
1016
    if(shared){
1017
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1018
            if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break;
1019
        }
1020
    }else{
1021
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1022
            if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME
1023
        }
1024
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1025
            if(s->picture[i].data[0]==NULL) break;
1026
        }
1027
    }
1028

    
1029
    assert(i<MAX_PICTURE_COUNT);
1030
    return i;
1031
}
1032

    
1033
/* generic function for encode/decode called before a frame is coded/decoded */
1034
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1035
{
1036
    int i;
1037
    AVFrame *pic;
1038

    
1039
    s->mb_skiped = 0;
1040

    
1041
    assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1042

    
1043
    /* mark&release old frames */
1044
    if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr->data[0]) {
1045
        avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr);
1046

    
1047
        /* release forgotten pictures */
1048
        /* if(mpeg124/h263) */
1049
        if(!s->encoding){
1050
            for(i=0; i<MAX_PICTURE_COUNT; i++){
1051
                if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
1052
                    fprintf(stderr, "releasing zombie picture\n");
1053
                    avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);                
1054
                }
1055
            }
1056
        }
1057
    }
1058
alloc:
1059
    if(!s->encoding){
1060
        /* release non refernce frames */
1061
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1062
            if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1063
                s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1064
            }
1065
        }
1066

    
1067
        i= find_unused_picture(s, 0);
1068
    
1069
        pic= (AVFrame*)&s->picture[i];
1070
        pic->reference= s->pict_type != B_TYPE ? 3 : 0;
1071

    
1072
        if(s->current_picture_ptr)
1073
            pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
1074
        
1075
        if( alloc_picture(s, (Picture*)pic, 0) < 0)
1076
            return -1;
1077

    
1078
        s->current_picture_ptr= &s->picture[i];
1079
    }
1080

    
1081
    s->current_picture_ptr->pict_type= s->pict_type;
1082
//    if(s->flags && CODEC_FLAG_QSCALE) 
1083
  //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1084
    s->current_picture_ptr->key_frame= s->pict_type == I_TYPE;
1085

    
1086
    copy_picture(&s->current_picture, s->current_picture_ptr);
1087
  
1088
  if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1089
    if (s->pict_type != B_TYPE) {
1090
        s->last_picture_ptr= s->next_picture_ptr;
1091
        s->next_picture_ptr= s->current_picture_ptr;
1092
    }
1093
    
1094
    if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr);
1095
    if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr);
1096
    
1097
    if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){
1098
        fprintf(stderr, "warning: first frame is no keyframe\n");
1099
        assert(s->pict_type != B_TYPE); //these should have been dropped if we dont have a reference
1100
        goto alloc;
1101
    }
1102

    
1103
    assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1104

    
1105
    if(s->picture_structure!=PICT_FRAME){
1106
        int i;
1107
        for(i=0; i<4; i++){
1108
            if(s->picture_structure == PICT_BOTTOM_FIELD){
1109
                 s->current_picture.data[i] += s->current_picture.linesize[i];
1110
            } 
1111
            s->current_picture.linesize[i] *= 2;
1112
            s->last_picture.linesize[i] *=2;
1113
            s->next_picture.linesize[i] *=2;
1114
        }
1115
    }
1116
  }
1117
   
1118
    s->hurry_up= s->avctx->hurry_up;
1119
    s->error_resilience= avctx->error_resilience;
1120

    
1121
    /* set dequantizer, we cant do it during init as it might change for mpeg4
1122
       and we cant do it in the header decode as init isnt called for mpeg4 there yet */
1123
    if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO) 
1124
        s->dct_unquantize = s->dct_unquantize_mpeg2;
1125
    else if(s->out_format == FMT_H263)
1126
        s->dct_unquantize = s->dct_unquantize_h263;
1127
    else 
1128
        s->dct_unquantize = s->dct_unquantize_mpeg1;
1129

    
1130
#ifdef HAVE_XVMC
1131
    if(s->avctx->xvmc_acceleration)
1132
        return XVMC_field_start(s, avctx);
1133
#endif
1134
    return 0;
1135
}
1136

    
1137
/* generic function for encode/decode called after a frame has been coded/decoded */
1138
void MPV_frame_end(MpegEncContext *s)
1139
{
1140
    int i;
1141
    /* draw edge for correct motion prediction if outside */
1142
#ifdef HAVE_XVMC
1143
//just to make sure that all data is rendered.
1144
    if(s->avctx->xvmc_acceleration){
1145
        XVMC_field_end(s);
1146
    }else
1147
#endif
1148
    if(s->unrestricted_mv && s->pict_type != B_TYPE && !s->intra_only && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1149
            draw_edges(s->current_picture.data[0], s->linesize  , s->h_edge_pos   , s->v_edge_pos   , EDGE_WIDTH  );
1150
            draw_edges(s->current_picture.data[1], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1151
            draw_edges(s->current_picture.data[2], s->uvlinesize, s->h_edge_pos>>1, s->v_edge_pos>>1, EDGE_WIDTH/2);
1152
    }
1153
    emms_c();
1154
    
1155
    s->last_pict_type    = s->pict_type;
1156
    if(s->pict_type!=B_TYPE){
1157
        s->last_non_b_pict_type= s->pict_type;
1158
    }
1159
#if 0
1160
        /* copy back current_picture variables */
1161
    for(i=0; i<MAX_PICTURE_COUNT; i++){
1162
        if(s->picture[i].data[0] == s->current_picture.data[0]){
1163
            s->picture[i]= s->current_picture;
1164
            break;
1165
        }    
1166
    }
1167
    assert(i<MAX_PICTURE_COUNT);
1168
#endif    
1169

    
1170
    if(s->encoding){
1171
        /* release non refernce frames */
1172
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1173
            if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1174
                s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]);
1175
            }
1176
        }
1177
    }
1178
    // clear copies, to avoid confusion
1179
#if 0
1180
    memset(&s->last_picture, 0, sizeof(Picture));
1181
    memset(&s->next_picture, 0, sizeof(Picture));
1182
    memset(&s->current_picture, 0, sizeof(Picture));
1183
#endif
1184
}
1185

    
1186
/**
1187
 * draws an line from (ex, ey) -> (sx, sy).
1188
 * @param w width of the image
1189
 * @param h height of the image
1190
 * @param stride stride/linesize of the image
1191
 * @param color color of the arrow
1192
 */
1193
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1194
    int t, x, y, f;
1195
    
1196
    sx= clip(sx, 0, w-1);
1197
    sy= clip(sy, 0, h-1);
1198
    ex= clip(ex, 0, w-1);
1199
    ey= clip(ey, 0, h-1);
1200
    
1201
    buf[sy*stride + sx]+= color;
1202
    
1203
    if(ABS(ex - sx) > ABS(ey - sy)){
1204
        if(sx > ex){
1205
            t=sx; sx=ex; ex=t;
1206
            t=sy; sy=ey; ey=t;
1207
        }
1208
        buf+= sx + sy*stride;
1209
        ex-= sx;
1210
        f= ((ey-sy)<<16)/ex;
1211
        for(x= 0; x <= ex; x++){
1212
            y= ((x*f) + (1<<15))>>16;
1213
            buf[y*stride + x]+= color;
1214
        }
1215
    }else{
1216
        if(sy > ey){
1217
            t=sx; sx=ex; ex=t;
1218
            t=sy; sy=ey; ey=t;
1219
        }
1220
        buf+= sx + sy*stride;
1221
        ey-= sy;
1222
        if(ey) f= ((ex-sx)<<16)/ey;
1223
        else   f= 0;
1224
        for(y= 0; y <= ey; y++){
1225
            x= ((y*f) + (1<<15))>>16;
1226
            buf[y*stride + x]+= color;
1227
        }
1228
    }
1229
}
1230

    
1231
/**
1232
 * draws an arrow from (ex, ey) -> (sx, sy).
1233
 * @param w width of the image
1234
 * @param h height of the image
1235
 * @param stride stride/linesize of the image
1236
 * @param color color of the arrow
1237
 */
1238
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){ 
1239
    int dx,dy;
1240

    
1241
    sx= clip(sx, -100, w+100);
1242
    sy= clip(sy, -100, h+100);
1243
    ex= clip(ex, -100, w+100);
1244
    ey= clip(ey, -100, h+100);
1245
    
1246
    dx= ex - sx;
1247
    dy= ey - sy;
1248
    
1249
    if(dx*dx + dy*dy > 3*3){
1250
        int rx=  dx + dy;
1251
        int ry= -dx + dy;
1252
        int length= ff_sqrt((rx*rx + ry*ry)<<8);
1253
        
1254
        //FIXME subpixel accuracy
1255
        rx= ROUNDED_DIV(rx*3<<4, length);
1256
        ry= ROUNDED_DIV(ry*3<<4, length);
1257
        
1258
        draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1259
        draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1260
    }
1261
    draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1262
}
1263

    
1264
/**
1265
 * prints debuging info for the given picture.
1266
 */
1267
void ff_print_debug_info(MpegEncContext *s, Picture *pict){
1268

    
1269
    if(!pict || !pict->mb_type) return;
1270

    
1271
    if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1272
        int x,y;
1273

    
1274
        for(y=0; y<s->mb_height; y++){
1275
            for(x=0; x<s->mb_width; x++){
1276
                if(s->avctx->debug&FF_DEBUG_SKIP){
1277
                    int count= s->mbskip_table[x + y*s->mb_stride];
1278
                    if(count>9) count=9;
1279
                    printf("%1d", count);
1280
                }
1281
                if(s->avctx->debug&FF_DEBUG_QP){
1282
                    printf("%2d", pict->qscale_table[x + y*s->mb_stride]);
1283
                }
1284
                if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1285
                    int mb_type= pict->mb_type[x + y*s->mb_stride];
1286
                    
1287
                    //Type & MV direction
1288
                    if(IS_PCM(mb_type))
1289
                        printf("P");
1290
                    else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1291
                        printf("A");
1292
                    else if(IS_INTRA4x4(mb_type))
1293
                        printf("i");
1294
                    else if(IS_INTRA16x16(mb_type))
1295
                        printf("I");
1296
                    else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1297
                        printf("d");
1298
                    else if(IS_DIRECT(mb_type))
1299
                        printf("D");
1300
                    else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1301
                        printf("g");
1302
                    else if(IS_GMC(mb_type))
1303
                        printf("G");
1304
                    else if(IS_SKIP(mb_type))
1305
                        printf("S");
1306
                    else if(!USES_LIST(mb_type, 1))
1307
                        printf(">");
1308
                    else if(!USES_LIST(mb_type, 0))
1309
                        printf("<");
1310
                    else{
1311
                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1312
                        printf("X");
1313
                    }
1314
                    
1315
                    //segmentation
1316
                    if(IS_8X8(mb_type))
1317
                        printf("+");
1318
                    else if(IS_16X8(mb_type))
1319
                        printf("-");
1320
                    else if(IS_8X16(mb_type))
1321
                        printf("?");
1322
                    else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1323
                        printf(" ");
1324
                    else
1325
                        printf("?");
1326
                    
1327
                        
1328
                    if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264)
1329
                        printf("=");
1330
                    else
1331
                        printf(" ");
1332
                }
1333
//                printf(" ");
1334
            }
1335
            printf("\n");
1336
        }
1337
    }
1338
    
1339
    if((s->avctx->debug&FF_DEBUG_VIS_MV) && s->motion_val){
1340
        const int shift= 1 + s->quarter_sample;
1341
        int mb_y;
1342
        uint8_t *ptr= pict->data[0];
1343
        s->low_delay=0; //needed to see the vectors without trashing the buffers
1344

    
1345
        for(mb_y=0; mb_y<s->mb_height; mb_y++){
1346
            int mb_x;
1347
            for(mb_x=0; mb_x<s->mb_width; mb_x++){
1348
                const int mb_index= mb_x + mb_y*s->mb_stride;
1349
                if(IS_8X8(s->current_picture.mb_type[mb_index])){
1350
                    int i;
1351
                    for(i=0; i<4; i++){
1352
                        int sx= mb_x*16 + 4 + 8*(i&1);
1353
                        int sy= mb_y*16 + 4 + 8*(i>>1);
1354
                        int xy= 1 + mb_x*2 + (i&1) + (mb_y*2 + 1 + (i>>1))*(s->mb_width*2 + 2);
1355
                        int mx= (s->motion_val[xy][0]>>shift) + sx;
1356
                        int my= (s->motion_val[xy][1]>>shift) + sy;
1357
                        draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1358
                    }
1359
                }else{
1360
                    int sx= mb_x*16 + 8;
1361
                    int sy= mb_y*16 + 8;
1362
                    int xy= 1 + mb_x*2 + (mb_y*2 + 1)*(s->mb_width*2 + 2);
1363
                    int mx= (s->motion_val[xy][0]>>shift) + sx;
1364
                    int my= (s->motion_val[xy][1]>>shift) + sy;
1365
                    draw_arrow(ptr, sx, sy, mx, my, s->width, s->height, s->linesize, 100);
1366
                }
1367
                s->mbskip_table[mb_index]=0;
1368
            }
1369
        }
1370
    }
1371
}
1372

    
1373
#ifdef CONFIG_ENCODERS
1374

    
1375
static int get_sae(uint8_t *src, int ref, int stride){
1376
    int x,y;
1377
    int acc=0;
1378
    
1379
    for(y=0; y<16; y++){
1380
        for(x=0; x<16; x++){
1381
            acc+= ABS(src[x+y*stride] - ref);
1382
        }
1383
    }
1384
    
1385
    return acc;
1386
}
1387

    
1388
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride){
1389
    int x, y, w, h;
1390
    int acc=0;
1391
    
1392
    w= s->width &~15;
1393
    h= s->height&~15;
1394
    
1395
    for(y=0; y<h; y+=16){
1396
        for(x=0; x<w; x+=16){
1397
            int offset= x + y*stride;
1398
            int sad = s->dsp.pix_abs16x16(src + offset, ref + offset, stride);
1399
            int mean= (s->dsp.pix_sum(src + offset, stride) + 128)>>8;
1400
            int sae = get_sae(src + offset, mean, stride);
1401
            
1402
            acc+= sae + 500 < sad;
1403
        }
1404
    }
1405
    return acc;
1406
}
1407

    
1408

    
1409
static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
1410
    AVFrame *pic=NULL;
1411
    int i;
1412
    const int encoding_delay= s->max_b_frames;
1413
    int direct=1;
1414
    
1415
  if(pic_arg){
1416
    if(encoding_delay && !(s->flags&CODEC_FLAG_INPUT_PRESERVED)) direct=0;
1417
    if(pic_arg->linesize[0] != s->linesize) direct=0;
1418
    if(pic_arg->linesize[1] != s->uvlinesize) direct=0;
1419
    if(pic_arg->linesize[2] != s->uvlinesize) direct=0;
1420
  
1421
//    printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
1422
    
1423
    if(direct){
1424
        i= find_unused_picture(s, 1);
1425

    
1426
        pic= (AVFrame*)&s->picture[i];
1427
        pic->reference= 3;
1428
    
1429
        for(i=0; i<4; i++){
1430
            pic->data[i]= pic_arg->data[i];
1431
            pic->linesize[i]= pic_arg->linesize[i];
1432
        }
1433
        alloc_picture(s, (Picture*)pic, 1);
1434
    }else{
1435
        int offset= 16;
1436
        i= find_unused_picture(s, 0);
1437

    
1438
        pic= (AVFrame*)&s->picture[i];
1439
        pic->reference= 3;
1440

    
1441
        alloc_picture(s, (Picture*)pic, 0);
1442

    
1443
        if(   pic->data[0] + offset == pic_arg->data[0] 
1444
           && pic->data[1] + offset == pic_arg->data[1]
1445
           && pic->data[2] + offset == pic_arg->data[2]){
1446
       // empty
1447
        }else{
1448
            int h_chroma_shift, v_chroma_shift;
1449
            avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1450
        
1451
            for(i=0; i<3; i++){
1452
                int src_stride= pic_arg->linesize[i];
1453
                int dst_stride= i ? s->uvlinesize : s->linesize;
1454
                int h_shift= i ? h_chroma_shift : 0;
1455
                int v_shift= i ? v_chroma_shift : 0;
1456
                int w= s->width >>h_shift;
1457
                int h= s->height>>v_shift;
1458
                uint8_t *src= pic_arg->data[i];
1459
                uint8_t *dst= pic->data[i] + offset;
1460
            
1461
                if(src_stride==dst_stride)
1462
                    memcpy(dst, src, src_stride*h);
1463
                else{
1464
                    while(h--){
1465
                        memcpy(dst, src, w);
1466
                        dst += dst_stride;
1467
                        src += src_stride;
1468
                    }
1469
                }
1470
            }
1471
        }
1472
    }
1473
    pic->quality= pic_arg->quality;
1474
    pic->pict_type= pic_arg->pict_type;
1475
    pic->pts = pic_arg->pts;
1476
    
1477
    if(s->input_picture[encoding_delay])
1478
        pic->display_picture_number= s->input_picture[encoding_delay]->display_picture_number + 1;
1479
    
1480
  }
1481

    
1482
    /* shift buffer entries */
1483
    for(i=1; i<MAX_PICTURE_COUNT /*s->encoding_delay+1*/; i++)
1484
        s->input_picture[i-1]= s->input_picture[i];
1485
        
1486
    s->input_picture[encoding_delay]= (Picture*)pic;
1487

    
1488
    return 0;
1489
}
1490

    
1491
static void select_input_picture(MpegEncContext *s){
1492
    int i;
1493
    int coded_pic_num=0;    
1494

    
1495
    if(s->reordered_input_picture[0])
1496
        coded_pic_num= s->reordered_input_picture[0]->coded_picture_number + 1;
1497

    
1498
    for(i=1; i<MAX_PICTURE_COUNT; i++)
1499
        s->reordered_input_picture[i-1]= s->reordered_input_picture[i];
1500
    s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL;
1501

    
1502
    /* set next picture types & ordering */
1503
    if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){
1504
        if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){
1505
            s->reordered_input_picture[0]= s->input_picture[0];
1506
            s->reordered_input_picture[0]->pict_type= I_TYPE;
1507
            s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1508
        }else{
1509
            int b_frames;
1510
            
1511
            if(s->flags&CODEC_FLAG_PASS2){
1512
                for(i=0; i<s->max_b_frames+1; i++){
1513
                    int pict_num= s->input_picture[0]->display_picture_number + i;
1514
                    int pict_type= s->rc_context.entry[pict_num].new_pict_type;
1515
                    s->input_picture[i]->pict_type= pict_type;
1516
                    
1517
                    if(i + 1 >= s->rc_context.num_entries) break;
1518
                }
1519
            }
1520

    
1521
            if(s->input_picture[0]->pict_type){
1522
                /* user selected pict_type */
1523
                for(b_frames=0; b_frames<s->max_b_frames+1; b_frames++){
1524
                    if(s->input_picture[b_frames]->pict_type!=B_TYPE) break;
1525
                }
1526
            
1527
                if(b_frames > s->max_b_frames){
1528
                    fprintf(stderr, "warning, too many bframes in a row\n");
1529
                    b_frames = s->max_b_frames;
1530
                }
1531
            }else if(s->b_frame_strategy==0){
1532
                b_frames= s->max_b_frames;
1533
                while(b_frames && !s->input_picture[b_frames]) b_frames--;
1534
            }else if(s->b_frame_strategy==1){
1535
                for(i=1; i<s->max_b_frames+1; i++){
1536
                    if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
1537
                        s->input_picture[i]->b_frame_score= 
1538
                            get_intra_count(s, s->input_picture[i  ]->data[0], 
1539
                                               s->input_picture[i-1]->data[0], s->linesize) + 1;
1540
                    }
1541
                }
1542
                for(i=0; i<s->max_b_frames; i++){
1543
                    if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break;
1544
                }
1545
                                
1546
                b_frames= FFMAX(0, i-1);
1547
                
1548
                /* reset scores */
1549
                for(i=0; i<b_frames+1; i++){
1550
                    s->input_picture[i]->b_frame_score=0;
1551
                }
1552
            }else{
1553
                fprintf(stderr, "illegal b frame strategy\n");
1554
                b_frames=0;
1555
            }
1556

    
1557
            emms_c();
1558
//static int b_count=0;
1559
//b_count+= b_frames;
1560
//printf("b_frames: %d\n", b_count);
1561
                        
1562
            s->reordered_input_picture[0]= s->input_picture[b_frames];
1563
            if(   s->picture_in_gop_number + b_frames >= s->gop_size 
1564
               || s->reordered_input_picture[0]->pict_type== I_TYPE)
1565
                s->reordered_input_picture[0]->pict_type= I_TYPE;
1566
            else
1567
                s->reordered_input_picture[0]->pict_type= P_TYPE;
1568
            s->reordered_input_picture[0]->coded_picture_number= coded_pic_num;
1569
            for(i=0; i<b_frames; i++){
1570
                coded_pic_num++;
1571
                s->reordered_input_picture[i+1]= s->input_picture[i];
1572
                s->reordered_input_picture[i+1]->pict_type= B_TYPE;
1573
                s->reordered_input_picture[i+1]->coded_picture_number= coded_pic_num;
1574
            }
1575
        }
1576
    }
1577
    
1578
    if(s->reordered_input_picture[0]){
1579
        s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0;
1580

    
1581
        copy_picture(&s->new_picture, s->reordered_input_picture[0]);
1582

    
1583
        if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
1584
            // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
1585
        
1586
            int i= find_unused_picture(s, 0);
1587
            Picture *pic= &s->picture[i];
1588

    
1589
            /* mark us unused / free shared pic */
1590
            for(i=0; i<4; i++)
1591
                s->reordered_input_picture[0]->data[i]= NULL;
1592
            s->reordered_input_picture[0]->type= 0;
1593
            
1594
            //FIXME bad, copy * except
1595
            pic->pict_type = s->reordered_input_picture[0]->pict_type;
1596
            pic->quality   = s->reordered_input_picture[0]->quality;
1597
            pic->coded_picture_number = s->reordered_input_picture[0]->coded_picture_number;
1598
            pic->reference = s->reordered_input_picture[0]->reference;
1599
            pic->pts = s->reordered_input_picture[0]->pts;
1600
            
1601
            alloc_picture(s, pic, 0);
1602

    
1603
            s->current_picture_ptr= pic;
1604
        }else{
1605
            // input is not a shared pix -> reuse buffer for current_pix
1606

    
1607
            assert(   s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER 
1608
                   || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL);
1609
            
1610
            s->current_picture_ptr= s->reordered_input_picture[0];
1611
            for(i=0; i<4; i++){
1612
                s->new_picture.data[i]+=16;
1613
            }
1614
        }
1615
        copy_picture(&s->current_picture, s->current_picture_ptr);
1616
    
1617
        s->picture_number= s->new_picture.display_picture_number;
1618
//printf("dpn:%d\n", s->picture_number);
1619
    }else{
1620
       memset(&s->new_picture, 0, sizeof(Picture));
1621
    }
1622
}
1623

    
1624
int MPV_encode_picture(AVCodecContext *avctx,
1625
                       unsigned char *buf, int buf_size, void *data)
1626
{
1627
    MpegEncContext *s = avctx->priv_data;
1628
    AVFrame *pic_arg = data;
1629
    int i;
1630

    
1631
    if(avctx->pix_fmt != PIX_FMT_YUV420P){
1632
        fprintf(stderr, "this codec supports only YUV420P\n");
1633
        return -1;
1634
    }
1635
    
1636
    init_put_bits(&s->pb, buf, buf_size);
1637

    
1638
    s->picture_in_gop_number++;
1639

    
1640
    load_input_picture(s, pic_arg);
1641
    
1642
    select_input_picture(s);
1643
    
1644
    /* output? */
1645
    if(s->new_picture.data[0]){
1646

    
1647
        s->pict_type= s->new_picture.pict_type;
1648
//emms_c();
1649
//printf("qs:%f %f %d\n", s->new_picture.quality, s->current_picture.quality, s->qscale);
1650
        MPV_frame_start(s, avctx);
1651

    
1652
        encode_picture(s, s->picture_number);
1653
        
1654
        avctx->real_pict_num  = s->picture_number;
1655
        avctx->header_bits = s->header_bits;
1656
        avctx->mv_bits     = s->mv_bits;
1657
        avctx->misc_bits   = s->misc_bits;
1658
        avctx->i_tex_bits  = s->i_tex_bits;
1659
        avctx->p_tex_bits  = s->p_tex_bits;
1660
        avctx->i_count     = s->i_count;
1661
        avctx->p_count     = s->mb_num - s->i_count - s->skip_count; //FIXME f/b_count in avctx
1662
        avctx->skip_count  = s->skip_count;
1663

    
1664
        MPV_frame_end(s);
1665

    
1666
        if (s->out_format == FMT_MJPEG)
1667
            mjpeg_picture_trailer(s);
1668
        
1669
        if(s->flags&CODEC_FLAG_PASS1)
1670
            ff_write_pass1_stats(s);
1671

    
1672
        for(i=0; i<4; i++){
1673
            avctx->error[i] += s->current_picture_ptr->error[i];
1674
        }
1675
    }
1676

    
1677
    s->input_picture_number++;
1678

    
1679
    flush_put_bits(&s->pb);
1680
    s->frame_bits  = (pbBufPtr(&s->pb) - s->pb.buf) * 8;
1681
    
1682
    s->total_bits += s->frame_bits;
1683
    avctx->frame_bits  = s->frame_bits;
1684
    
1685
    return pbBufPtr(&s->pb) - s->pb.buf;
1686
}
1687

    
1688
#endif //CONFIG_ENCODERS
1689

    
1690
static inline void gmc1_motion(MpegEncContext *s,
1691
                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1692
                               int dest_offset,
1693
                               uint8_t **ref_picture, int src_offset)
1694
{
1695
    uint8_t *ptr;
1696
    int offset, src_x, src_y, linesize, uvlinesize;
1697
    int motion_x, motion_y;
1698
    int emu=0;
1699

    
1700
    motion_x= s->sprite_offset[0][0];
1701
    motion_y= s->sprite_offset[0][1];
1702
    src_x = s->mb_x * 16 + (motion_x >> (s->sprite_warping_accuracy+1));
1703
    src_y = s->mb_y * 16 + (motion_y >> (s->sprite_warping_accuracy+1));
1704
    motion_x<<=(3-s->sprite_warping_accuracy);
1705
    motion_y<<=(3-s->sprite_warping_accuracy);
1706
    src_x = clip(src_x, -16, s->width);
1707
    if (src_x == s->width)
1708
        motion_x =0;
1709
    src_y = clip(src_y, -16, s->height);
1710
    if (src_y == s->height)
1711
        motion_y =0;
1712

    
1713
    linesize = s->linesize;
1714
    uvlinesize = s->uvlinesize;
1715
    
1716
    ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
1717

    
1718
    dest_y+=dest_offset;
1719
    if(s->flags&CODEC_FLAG_EMU_EDGE){
1720
        if(   (unsigned)src_x >= s->h_edge_pos - 17
1721
           || (unsigned)src_y >= s->v_edge_pos - 17){
1722
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, linesize, 17, 17, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
1723
            ptr= s->edge_emu_buffer;
1724
        }
1725
    }
1726
    
1727
    if((motion_x|motion_y)&7){
1728
        s->dsp.gmc1(dest_y  , ptr  , linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1729
        s->dsp.gmc1(dest_y+8, ptr+8, linesize, 16, motion_x&15, motion_y&15, 128 - s->no_rounding);
1730
    }else{
1731
        int dxy;
1732
        
1733
        dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
1734
        if (s->no_rounding){
1735
            s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
1736
        }else{
1737
            s->dsp.put_pixels_tab       [0][dxy](dest_y, ptr, linesize, 16);
1738
        }
1739
    }
1740
    
1741
    if(s->flags&CODEC_FLAG_GRAY) return;
1742

    
1743
    motion_x= s->sprite_offset[1][0];
1744
    motion_y= s->sprite_offset[1][1];
1745
    src_x = s->mb_x * 8 + (motion_x >> (s->sprite_warping_accuracy+1));
1746
    src_y = s->mb_y * 8 + (motion_y >> (s->sprite_warping_accuracy+1));
1747
    motion_x<<=(3-s->sprite_warping_accuracy);
1748
    motion_y<<=(3-s->sprite_warping_accuracy);
1749
    src_x = clip(src_x, -8, s->width>>1);
1750
    if (src_x == s->width>>1)
1751
        motion_x =0;
1752
    src_y = clip(src_y, -8, s->height>>1);
1753
    if (src_y == s->height>>1)
1754
        motion_y =0;
1755

    
1756
    offset = (src_y * uvlinesize) + src_x + (src_offset>>1);
1757
    ptr = ref_picture[1] + offset;
1758
    if(s->flags&CODEC_FLAG_EMU_EDGE){
1759
        if(   (unsigned)src_x >= (s->h_edge_pos>>1) - 9
1760
           || (unsigned)src_y >= (s->v_edge_pos>>1) - 9){
1761
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1762
            ptr= s->edge_emu_buffer;
1763
            emu=1;
1764
        }
1765
    }
1766
    s->dsp.gmc1(dest_cb + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1767
    
1768
    ptr = ref_picture[2] + offset;
1769
    if(emu){
1770
        ff_emulated_edge_mc(s->edge_emu_buffer, ptr, uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
1771
        ptr= s->edge_emu_buffer;
1772
    }
1773
    s->dsp.gmc1(dest_cr + (dest_offset>>1), ptr, uvlinesize, 8, motion_x&15, motion_y&15, 128 - s->no_rounding);
1774
    
1775
    return;
1776
}
1777

    
1778
static inline void gmc_motion(MpegEncContext *s,
1779
                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1780
                               int dest_offset,
1781
                               uint8_t **ref_picture, int src_offset)
1782
{
1783
    uint8_t *ptr;
1784
    int linesize, uvlinesize;
1785
    const int a= s->sprite_warping_accuracy;
1786
    int ox, oy;
1787

    
1788
    linesize = s->linesize;
1789
    uvlinesize = s->uvlinesize;
1790

    
1791
    ptr = ref_picture[0] + src_offset;
1792

    
1793
    dest_y+=dest_offset;
1794
    
1795
    ox= s->sprite_offset[0][0] + s->sprite_delta[0][0]*s->mb_x*16 + s->sprite_delta[0][1]*s->mb_y*16;
1796
    oy= s->sprite_offset[0][1] + s->sprite_delta[1][0]*s->mb_x*16 + s->sprite_delta[1][1]*s->mb_y*16;
1797

    
1798
    s->dsp.gmc(dest_y, ptr, linesize, 16,
1799
           ox, 
1800
           oy, 
1801
           s->sprite_delta[0][0], s->sprite_delta[0][1],
1802
           s->sprite_delta[1][0], s->sprite_delta[1][1], 
1803
           a+1, (1<<(2*a+1)) - s->no_rounding,
1804
           s->h_edge_pos, s->v_edge_pos);
1805
    s->dsp.gmc(dest_y+8, ptr, linesize, 16,
1806
           ox + s->sprite_delta[0][0]*8, 
1807
           oy + s->sprite_delta[1][0]*8, 
1808
           s->sprite_delta[0][0], s->sprite_delta[0][1],
1809
           s->sprite_delta[1][0], s->sprite_delta[1][1], 
1810
           a+1, (1<<(2*a+1)) - s->no_rounding,
1811
           s->h_edge_pos, s->v_edge_pos);
1812

    
1813
    if(s->flags&CODEC_FLAG_GRAY) return;
1814

    
1815

    
1816
    dest_cb+=dest_offset>>1;
1817
    dest_cr+=dest_offset>>1;
1818
    
1819
    ox= s->sprite_offset[1][0] + s->sprite_delta[0][0]*s->mb_x*8 + s->sprite_delta[0][1]*s->mb_y*8;
1820
    oy= s->sprite_offset[1][1] + s->sprite_delta[1][0]*s->mb_x*8 + s->sprite_delta[1][1]*s->mb_y*8;
1821

    
1822
    ptr = ref_picture[1] + (src_offset>>1);
1823
    s->dsp.gmc(dest_cb, ptr, uvlinesize, 8,
1824
           ox, 
1825
           oy, 
1826
           s->sprite_delta[0][0], s->sprite_delta[0][1],
1827
           s->sprite_delta[1][0], s->sprite_delta[1][1], 
1828
           a+1, (1<<(2*a+1)) - s->no_rounding,
1829
           s->h_edge_pos>>1, s->v_edge_pos>>1);
1830
    
1831
    ptr = ref_picture[2] + (src_offset>>1);
1832
    s->dsp.gmc(dest_cr, ptr, uvlinesize, 8,
1833
           ox, 
1834
           oy, 
1835
           s->sprite_delta[0][0], s->sprite_delta[0][1],
1836
           s->sprite_delta[1][0], s->sprite_delta[1][1], 
1837
           a+1, (1<<(2*a+1)) - s->no_rounding,
1838
           s->h_edge_pos>>1, s->v_edge_pos>>1);
1839
}
1840

    
1841
/**
1842
 * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples.
1843
 * @param buf destination buffer
1844
 * @param src source buffer
1845
 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
1846
 * @param block_w width of block
1847
 * @param block_h height of block
1848
 * @param src_x x coordinate of the top left sample of the block in the source buffer
1849
 * @param src_y y coordinate of the top left sample of the block in the source buffer
1850
 * @param w width of the source buffer
1851
 * @param h height of the source buffer
1852
 */
1853
void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, 
1854
                                    int src_x, int src_y, int w, int h){
1855
    int x, y;
1856
    int start_y, start_x, end_y, end_x;
1857

    
1858
    if(src_y>= h){
1859
        src+= (h-1-src_y)*linesize;
1860
        src_y=h-1;
1861
    }else if(src_y<=-block_h){
1862
        src+= (1-block_h-src_y)*linesize;
1863
        src_y=1-block_h;
1864
    }
1865
    if(src_x>= w){
1866
        src+= (w-1-src_x);
1867
        src_x=w-1;
1868
    }else if(src_x<=-block_w){
1869
        src+= (1-block_w-src_x);
1870
        src_x=1-block_w;
1871
    }
1872

    
1873
    start_y= FFMAX(0, -src_y);
1874
    start_x= FFMAX(0, -src_x);
1875
    end_y= FFMIN(block_h, h-src_y);
1876
    end_x= FFMIN(block_w, w-src_x);
1877

    
1878
    // copy existing part
1879
    for(y=start_y; y<end_y; y++){
1880
        for(x=start_x; x<end_x; x++){
1881
            buf[x + y*linesize]= src[x + y*linesize];
1882
        }
1883
    }
1884

    
1885
    //top
1886
    for(y=0; y<start_y; y++){
1887
        for(x=start_x; x<end_x; x++){
1888
            buf[x + y*linesize]= buf[x + start_y*linesize];
1889
        }
1890
    }
1891

    
1892
    //bottom
1893
    for(y=end_y; y<block_h; y++){
1894
        for(x=start_x; x<end_x; x++){
1895
            buf[x + y*linesize]= buf[x + (end_y-1)*linesize];
1896
        }
1897
    }
1898
                                    
1899
    for(y=0; y<block_h; y++){
1900
       //left
1901
        for(x=0; x<start_x; x++){
1902
            buf[x + y*linesize]= buf[start_x + y*linesize];
1903
        }
1904
       
1905
       //right
1906
        for(x=end_x; x<block_w; x++){
1907
            buf[x + y*linesize]= buf[end_x - 1 + y*linesize];
1908
        }
1909
    }
1910
}
1911

    
1912

    
1913
/* apply one mpeg motion vector to the three components */
1914
static inline void mpeg_motion(MpegEncContext *s,
1915
                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1916
                               int dest_offset,
1917
                               uint8_t **ref_picture, int src_offset,
1918
                               int field_based, op_pixels_func (*pix_op)[4],
1919
                               int motion_x, int motion_y, int h)
1920
{
1921
    uint8_t *ptr;
1922
    int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
1923
    int emu=0;
1924
#if 0    
1925
if(s->quarter_sample)
1926
{
1927
    motion_x>>=1;
1928
    motion_y>>=1;
1929
}
1930
#endif
1931
    dxy = ((motion_y & 1) << 1) | (motion_x & 1);
1932
    src_x = s->mb_x * 16 + (motion_x >> 1);
1933
    src_y = s->mb_y * (16 >> field_based) + (motion_y >> 1);
1934
                
1935
    /* WARNING: do no forget half pels */
1936
    height = s->height >> field_based;
1937
    v_edge_pos = s->v_edge_pos >> field_based;
1938
    src_x = clip(src_x, -16, s->width);
1939
    if (src_x == s->width)
1940
        dxy &= ~1;
1941
    src_y = clip(src_y, -16, height);
1942
    if (src_y == height)
1943
        dxy &= ~2;
1944
    linesize   = s->current_picture.linesize[0] << field_based;
1945
    uvlinesize = s->current_picture.linesize[1] << field_based;
1946
    ptr = ref_picture[0] + (src_y * linesize) + (src_x) + src_offset;
1947
    dest_y += dest_offset;
1948

    
1949
    if(s->unrestricted_mv && (s->flags&CODEC_FLAG_EMU_EDGE)){
1950
        if(   (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16
1951
           || (unsigned)src_y >    v_edge_pos - (motion_y&1) - h){
1952
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based,  //FIXME linesize? and uv below
1953
                             src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
1954
            ptr= s->edge_emu_buffer + src_offset;
1955
            emu=1;
1956
        }
1957
    }
1958
    pix_op[0][dxy](dest_y, ptr, linesize, h);
1959

    
1960
    if(s->flags&CODEC_FLAG_GRAY) return;
1961

    
1962
    if (s->out_format == FMT_H263) {
1963
        dxy = 0;
1964
        if ((motion_x & 3) != 0)
1965
            dxy |= 1;
1966
        if ((motion_y & 3) != 0)
1967
            dxy |= 2;
1968
        mx = motion_x >> 2;
1969
        my = motion_y >> 2;
1970
    } else {
1971
        mx = motion_x / 2;
1972
        my = motion_y / 2;
1973
        dxy = ((my & 1) << 1) | (mx & 1);
1974
        mx >>= 1;
1975
        my >>= 1;
1976
    }
1977
    
1978
    src_x = s->mb_x * 8 + mx;
1979
    src_y = s->mb_y * (8 >> field_based) + my;
1980
    src_x = clip(src_x, -8, s->width >> 1);
1981
    if (src_x == (s->width >> 1))
1982
        dxy &= ~1;
1983
    src_y = clip(src_y, -8, height >> 1);
1984
    if (src_y == (height >> 1))
1985
        dxy &= ~2;
1986
    offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
1987
    ptr = ref_picture[1] + offset;
1988
    if(emu){
1989
        ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1990
                         src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1991
        ptr= s->edge_emu_buffer + (src_offset >> 1);
1992
    }
1993
    pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
1994

    
1995
    ptr = ref_picture[2] + offset;
1996
    if(emu){
1997
        ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9+field_based, 
1998
                         src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
1999
        ptr= s->edge_emu_buffer + (src_offset >> 1);
2000
    }
2001
    pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr, uvlinesize, h >> 1);
2002
}
2003

    
2004
static inline void qpel_motion(MpegEncContext *s,
2005
                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2006
                               int dest_offset,
2007
                               uint8_t **ref_picture, int src_offset,
2008
                               int field_based, op_pixels_func (*pix_op)[4],
2009
                               qpel_mc_func (*qpix_op)[16],
2010
                               int motion_x, int motion_y, int h)
2011
{
2012
    uint8_t *ptr;
2013
    int dxy, offset, mx, my, src_x, src_y, height, v_edge_pos, linesize, uvlinesize;
2014
    int emu=0;
2015

    
2016
    dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2017
    src_x = s->mb_x * 16 + (motion_x >> 2);
2018
    src_y = s->mb_y * (16 >> field_based) + (motion_y >> 2);
2019

    
2020
    height = s->height >> field_based;
2021
    v_edge_pos = s->v_edge_pos >> field_based;
2022
    src_x = clip(src_x, -16, s->width);
2023
    if (src_x == s->width)
2024
        dxy &= ~3;
2025
    src_y = clip(src_y, -16, height);
2026
    if (src_y == height)
2027
        dxy &= ~12;
2028
    linesize = s->linesize << field_based;
2029
    uvlinesize = s->uvlinesize << field_based;
2030
    ptr = ref_picture[0] + (src_y * linesize) + src_x + src_offset;
2031
    dest_y += dest_offset;
2032
//printf("%d %d %d\n", src_x, src_y, dxy);
2033
    
2034
    if(s->flags&CODEC_FLAG_EMU_EDGE){
2035
        if(   (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 16 
2036
           || (unsigned)src_y >    v_edge_pos - (motion_y&3) - h  ){
2037
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr - src_offset, s->linesize, 17, 17+field_based, 
2038
                             src_x, src_y<<field_based, s->h_edge_pos, s->v_edge_pos);
2039
            ptr= s->edge_emu_buffer + src_offset;
2040
            emu=1;
2041
        }
2042
    }
2043
    if(!field_based)
2044
        qpix_op[0][dxy](dest_y, ptr, linesize);
2045
    else{
2046
        //damn interlaced mode
2047
        //FIXME boundary mirroring is not exactly correct here
2048
        qpix_op[1][dxy](dest_y  , ptr  , linesize);
2049
        qpix_op[1][dxy](dest_y+8, ptr+8, linesize);
2050
    }
2051

    
2052
    if(s->flags&CODEC_FLAG_GRAY) return;
2053

    
2054
    if(field_based){
2055
        mx= motion_x/2;
2056
        my= motion_y>>1;
2057
    }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA2){
2058
        static const int rtab[8]= {0,0,1,1,0,0,0,1};
2059
        mx= (motion_x>>1) + rtab[motion_x&7];
2060
        my= (motion_y>>1) + rtab[motion_y&7];
2061
    }else if(s->workaround_bugs&FF_BUG_QPEL_CHROMA){
2062
        mx= (motion_x>>1)|(motion_x&1);
2063
        my= (motion_y>>1)|(motion_y&1);
2064
    }else{
2065
        mx= motion_x/2;
2066
        my= motion_y/2;
2067
    }
2068
    mx= (mx>>1)|(mx&1);
2069
    my= (my>>1)|(my&1);
2070

    
2071
    dxy= (mx&1) | ((my&1)<<1);
2072
    mx>>=1;
2073
    my>>=1;
2074

    
2075
    src_x = s->mb_x * 8 + mx;
2076
    src_y = s->mb_y * (8 >> field_based) + my;
2077
    src_x = clip(src_x, -8, s->width >> 1);
2078
    if (src_x == (s->width >> 1))
2079
        dxy &= ~1;
2080
    src_y = clip(src_y, -8, height >> 1);
2081
    if (src_y == (height >> 1))
2082
        dxy &= ~2;
2083

    
2084
    offset = (src_y * uvlinesize) + src_x + (src_offset >> 1);
2085
    ptr = ref_picture[1] + offset;
2086
    if(emu){
2087
        ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2088
                         src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2089
        ptr= s->edge_emu_buffer + (src_offset >> 1);
2090
    }
2091
    pix_op[1][dxy](dest_cb + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2092
    
2093
    ptr = ref_picture[2] + offset;
2094
    if(emu){
2095
        ff_emulated_edge_mc(s->edge_emu_buffer, ptr - (src_offset >> 1), s->uvlinesize, 9, 9 + field_based, 
2096
                         src_x, src_y<<field_based, s->h_edge_pos>>1, s->v_edge_pos>>1);
2097
        ptr= s->edge_emu_buffer + (src_offset >> 1);
2098
    }
2099
    pix_op[1][dxy](dest_cr + (dest_offset >> 1), ptr,  uvlinesize, h >> 1);
2100
}
2101

    
2102
inline int ff_h263_round_chroma(int x){
2103
    if (x >= 0)
2104
        return  (h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2105
    else {
2106
        x = -x;
2107
        return -(h263_chroma_roundtab[x & 0xf] + ((x >> 3) & ~1));
2108
    }
2109
}
2110

    
2111
/**
2112
 * motion compesation of a single macroblock
2113
 * @param s context
2114
 * @param dest_y luma destination pointer
2115
 * @param dest_cb chroma cb/u destination pointer
2116
 * @param dest_cr chroma cr/v destination pointer
2117
 * @param dir direction (0->forward, 1->backward)
2118
 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
2119
 * @param pic_op halfpel motion compensation function (average or put normally)
2120
 * @param pic_op qpel motion compensation function (average or put normally)
2121
 * the motion vectors are taken from s->mv and the MV type from s->mv_type
2122
 */
2123
static inline void MPV_motion(MpegEncContext *s, 
2124
                              uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
2125
                              int dir, uint8_t **ref_picture, 
2126
                              op_pixels_func (*pix_op)[4], qpel_mc_func (*qpix_op)[16])
2127
{
2128
    int dxy, offset, mx, my, src_x, src_y, motion_x, motion_y;
2129
    int mb_x, mb_y, i;
2130
    uint8_t *ptr, *dest;
2131
    int emu=0;
2132

    
2133
    mb_x = s->mb_x;
2134
    mb_y = s->mb_y;
2135

    
2136
    switch(s->mv_type) {
2137
    case MV_TYPE_16X16:
2138
#ifdef CONFIG_RISKY
2139
        if(s->mcsel){
2140
            if(s->real_sprite_warping_points==1){
2141
                gmc1_motion(s, dest_y, dest_cb, dest_cr, 0,
2142
                            ref_picture, 0);
2143
            }else{
2144
                gmc_motion(s, dest_y, dest_cb, dest_cr, 0,
2145
                            ref_picture, 0);
2146
            }
2147
        }else if(s->quarter_sample){
2148
            qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2149
                        ref_picture, 0,
2150
                        0, pix_op, qpix_op,
2151
                        s->mv[dir][0][0], s->mv[dir][0][1], 16);
2152
        }else if(s->mspel){
2153
            ff_mspel_motion(s, dest_y, dest_cb, dest_cr,
2154
                        ref_picture, pix_op,
2155
                        s->mv[dir][0][0], s->mv[dir][0][1], 16);
2156
        }else
2157
#endif
2158
        {
2159
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2160
                        ref_picture, 0,
2161
                        0, pix_op,
2162
                        s->mv[dir][0][0], s->mv[dir][0][1], 16);
2163
        }           
2164
        break;
2165
    case MV_TYPE_8X8:
2166
        mx = 0;
2167
        my = 0;
2168
        if(s->quarter_sample){
2169
            for(i=0;i<4;i++) {
2170
                motion_x = s->mv[dir][i][0];
2171
                motion_y = s->mv[dir][i][1];
2172

    
2173
                dxy = ((motion_y & 3) << 2) | (motion_x & 3);
2174
                src_x = mb_x * 16 + (motion_x >> 2) + (i & 1) * 8;
2175
                src_y = mb_y * 16 + (motion_y >> 2) + (i >>1) * 8;
2176
                    
2177
                /* WARNING: do no forget half pels */
2178
                src_x = clip(src_x, -16, s->width);
2179
                if (src_x == s->width)
2180
                    dxy &= ~3;
2181
                src_y = clip(src_y, -16, s->height);
2182
                if (src_y == s->height)
2183
                    dxy &= ~12;
2184
                    
2185
                ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2186
                if(s->flags&CODEC_FLAG_EMU_EDGE){
2187
                    if(   (unsigned)src_x > s->h_edge_pos - (motion_x&3) - 8 
2188
                       || (unsigned)src_y > s->v_edge_pos - (motion_y&3) - 8 ){
2189
                        ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2190
                        ptr= s->edge_emu_buffer;
2191
                    }
2192
                }
2193
                dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2194
                qpix_op[1][dxy](dest, ptr, s->linesize);
2195

    
2196
                mx += s->mv[dir][i][0]/2;
2197
                my += s->mv[dir][i][1]/2;
2198
            }
2199
        }else{
2200
            for(i=0;i<4;i++) {
2201
                motion_x = s->mv[dir][i][0];
2202
                motion_y = s->mv[dir][i][1];
2203

    
2204
                dxy = ((motion_y & 1) << 1) | (motion_x & 1);
2205
                src_x = mb_x * 16 + (motion_x >> 1) + (i & 1) * 8;
2206
                src_y = mb_y * 16 + (motion_y >> 1) + (i >>1) * 8;
2207
                    
2208
                /* WARNING: do no forget half pels */
2209
                src_x = clip(src_x, -16, s->width);
2210
                if (src_x == s->width)
2211
                    dxy &= ~1;
2212
                src_y = clip(src_y, -16, s->height);
2213
                if (src_y == s->height)
2214
                    dxy &= ~2;
2215
                    
2216
                ptr = ref_picture[0] + (src_y * s->linesize) + (src_x);
2217
                if(s->flags&CODEC_FLAG_EMU_EDGE){
2218
                    if(   (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 8
2219
                       || (unsigned)src_y > s->v_edge_pos - (motion_y&1) - 8){
2220
                        ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->linesize, 9, 9, src_x, src_y, s->h_edge_pos, s->v_edge_pos);
2221
                        ptr= s->edge_emu_buffer;
2222
                    }
2223
                }
2224
                dest = dest_y + ((i & 1) * 8) + (i >> 1) * 8 * s->linesize;
2225
                pix_op[1][dxy](dest, ptr, s->linesize, 8);
2226

    
2227
                mx += s->mv[dir][i][0];
2228
                my += s->mv[dir][i][1];
2229
            }
2230
        }
2231

    
2232
        if(s->flags&CODEC_FLAG_GRAY) break;
2233
        /* In case of 8X8, we construct a single chroma motion vector
2234
           with a special rounding */
2235
        mx= ff_h263_round_chroma(mx);
2236
        my= ff_h263_round_chroma(my);
2237
        dxy = ((my & 1) << 1) | (mx & 1);
2238
        mx >>= 1;
2239
        my >>= 1;
2240

    
2241
        src_x = mb_x * 8 + mx;
2242
        src_y = mb_y * 8 + my;
2243
        src_x = clip(src_x, -8, s->width/2);
2244
        if (src_x == s->width/2)
2245
            dxy &= ~1;
2246
        src_y = clip(src_y, -8, s->height/2);
2247
        if (src_y == s->height/2)
2248
            dxy &= ~2;
2249
        
2250
        offset = (src_y * (s->uvlinesize)) + src_x;
2251
        ptr = ref_picture[1] + offset;
2252
        if(s->flags&CODEC_FLAG_EMU_EDGE){
2253
                if(   (unsigned)src_x > (s->h_edge_pos>>1) - (dxy &1) - 8
2254
                   || (unsigned)src_y > (s->v_edge_pos>>1) - (dxy>>1) - 8){
2255
                    ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2256
                    ptr= s->edge_emu_buffer;
2257
                    emu=1;
2258
                }
2259
            }
2260
        pix_op[1][dxy](dest_cb, ptr, s->uvlinesize, 8);
2261

    
2262
        ptr = ref_picture[2] + offset;
2263
        if(emu){
2264
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, s->h_edge_pos>>1, s->v_edge_pos>>1);
2265
            ptr= s->edge_emu_buffer;
2266
        }
2267
        pix_op[1][dxy](dest_cr, ptr, s->uvlinesize, 8);
2268
        break;
2269
    case MV_TYPE_FIELD:
2270
        if (s->picture_structure == PICT_FRAME) {
2271
            if(s->quarter_sample){
2272
                /* top field */
2273
                qpel_motion(s, dest_y, dest_cb, dest_cr, 0,
2274
                            ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2275
                            1, pix_op, qpix_op,
2276
                            s->mv[dir][0][0], s->mv[dir][0][1], 8);
2277
                /* bottom field */
2278
                qpel_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2279
                            ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2280
                            1, pix_op, qpix_op,
2281
                            s->mv[dir][1][0], s->mv[dir][1][1], 8);
2282
            }else{
2283
                /* top field */       
2284
                mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2285
                            ref_picture, s->field_select[dir][0] ? s->linesize : 0,
2286
                            1, pix_op,
2287
                            s->mv[dir][0][0], s->mv[dir][0][1], 8);
2288
                /* bottom field */
2289
                mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2290
                            ref_picture, s->field_select[dir][1] ? s->linesize : 0,
2291
                            1, pix_op,
2292
                            s->mv[dir][1][0], s->mv[dir][1][1], 8);
2293
            }
2294
        } else {
2295
            int offset;
2296
            if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2297
                offset= s->field_select[dir][0] ? s->linesize : 0;
2298
            }else{
2299
                ref_picture= s->current_picture.data;
2300
                offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2301
            } 
2302

    
2303
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2304
                        ref_picture, offset,
2305
                        0, pix_op,
2306
                        s->mv[dir][0][0], s->mv[dir][0][1], 16);
2307
        }
2308
        break;
2309
    case MV_TYPE_16X8:{
2310
        int offset;
2311
         uint8_t ** ref2picture;
2312

    
2313
            if(s->picture_structure == s->field_select[dir][0] + 1 || s->pict_type == B_TYPE || s->first_field){
2314
                ref2picture= ref_picture;
2315
                offset= s->field_select[dir][0] ? s->linesize : 0;
2316
            }else{
2317
                ref2picture= s->current_picture.data;
2318
                offset= s->field_select[dir][0] ? s->linesize : -s->linesize; 
2319
            } 
2320

    
2321
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2322
                        ref2picture, offset,
2323
                        0, pix_op,
2324
                        s->mv[dir][0][0], s->mv[dir][0][1], 8);
2325

    
2326

    
2327
            if(s->picture_structure == s->field_select[dir][1] + 1 || s->pict_type == B_TYPE || s->first_field){
2328
                ref2picture= ref_picture;
2329
                offset= s->field_select[dir][1] ? s->linesize : 0;
2330
            }else{
2331
                ref2picture= s->current_picture.data;
2332
                offset= s->field_select[dir][1] ? s->linesize : -s->linesize; 
2333
            } 
2334
            // I know it is ugly but this is the only way to fool emu_edge without rewrite mpeg_motion
2335
            mpeg_motion(s, dest_y+16*s->linesize, dest_cb+8*s->uvlinesize, dest_cr+8*s->uvlinesize,
2336
                        0,
2337
                        ref2picture, offset,
2338
                        0, pix_op,
2339
                        s->mv[dir][1][0], s->mv[dir][1][1]+16, 8);
2340
        }
2341
        
2342
        break;
2343
    case MV_TYPE_DMV:
2344
    {
2345
    op_pixels_func (*dmv_pix_op)[4];
2346
    int offset;
2347

    
2348
        dmv_pix_op = s->dsp.put_pixels_tab;
2349

    
2350
        if(s->picture_structure == PICT_FRAME){
2351
            //put top field from top field
2352
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2353
                        ref_picture, 0,
2354
                        1, dmv_pix_op,
2355
                        s->mv[dir][0][0], s->mv[dir][0][1], 8);
2356
            //put bottom field from bottom field
2357
            mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2358
                        ref_picture, s->linesize,
2359
                        1, dmv_pix_op,
2360
                        s->mv[dir][0][0], s->mv[dir][0][1], 8);
2361

    
2362
            dmv_pix_op = s->dsp.avg_pixels_tab; 
2363
        
2364
            //avg top field from bottom field
2365
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2366
                        ref_picture, s->linesize,
2367
                        1, dmv_pix_op,
2368
                        s->mv[dir][2][0], s->mv[dir][2][1], 8);
2369
            //avg bottom field from top field
2370
            mpeg_motion(s, dest_y, dest_cb, dest_cr, s->linesize,
2371
                        ref_picture, 0,
2372
                        1, dmv_pix_op,
2373
                        s->mv[dir][3][0], s->mv[dir][3][1], 8);
2374

    
2375
        }else{
2376
            offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2377
                         s->linesize : 0;
2378

    
2379
            //put field from the same parity
2380
            //same parity is never in the same frame
2381
            mpeg_motion(s, dest_y, dest_cb, dest_cr, 0,
2382
                        ref_picture,offset,
2383
                        0,dmv_pix_op,
2384
                        s->mv[dir][0][0],s->mv[dir][0][1],16);
2385

    
2386
            // after put we make avg of the same block
2387
            dmv_pix_op=s->dsp.avg_pixels_tab; 
2388

    
2389
            //opposite parity is always in the same frame if this is second field
2390
            if(!s->first_field){
2391
                ref_picture = s->current_picture.data;    
2392
                //top field is one linesize from frame beginig
2393
                offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2394
                        -s->linesize : s->linesize;
2395
            }else 
2396
                offset=(s->picture_structure == PICT_BOTTOM_FIELD)? 
2397
                        0 : s->linesize;
2398

    
2399
            //avg field from the opposite parity
2400
            mpeg_motion(s, dest_y, dest_cb, dest_cr,0,
2401
                        ref_picture, offset,
2402
                        0,dmv_pix_op,
2403
                        s->mv[dir][2][0],s->mv[dir][2][1],16);
2404
        }
2405
    }
2406
    break;
2407

    
2408
    }
2409
}
2410

    
2411

    
2412
/* put block[] to dest[] */
2413
static inline void put_dct(MpegEncContext *s, 
2414
                           DCTELEM *block, int i, uint8_t *dest, int line_size)
2415
{
2416
    s->dct_unquantize(s, block, i, s->qscale);
2417
    s->dsp.idct_put (dest, line_size, block);
2418
}
2419

    
2420
/* add block[] to dest[] */
2421
static inline void add_dct(MpegEncContext *s, 
2422
                           DCTELEM *block, int i, uint8_t *dest, int line_size)
2423
{
2424
    if (s->block_last_index[i] >= 0) {
2425
        s->dsp.idct_add (dest, line_size, block);
2426
    }
2427
}
2428

    
2429
static inline void add_dequant_dct(MpegEncContext *s, 
2430
                           DCTELEM *block, int i, uint8_t *dest, int line_size)
2431
{
2432
    if (s->block_last_index[i] >= 0) {
2433
        s->dct_unquantize(s, block, i, s->qscale);
2434

    
2435
        s->dsp.idct_add (dest, line_size, block);
2436
    }
2437
}
2438

    
2439
/**
2440
 * cleans dc, ac, coded_block for the current non intra MB
2441
 */
2442
void ff_clean_intra_table_entries(MpegEncContext *s)
2443
{
2444
    int wrap = s->block_wrap[0];
2445
    int xy = s->block_index[0];
2446
    
2447
    s->dc_val[0][xy           ] = 
2448
    s->dc_val[0][xy + 1       ] = 
2449
    s->dc_val[0][xy     + wrap] =
2450
    s->dc_val[0][xy + 1 + wrap] = 1024;
2451
    /* ac pred */
2452
    memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
2453
    memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2454
    if (s->msmpeg4_version>=3) {
2455
        s->coded_block[xy           ] =
2456
        s->coded_block[xy + 1       ] =
2457
        s->coded_block[xy     + wrap] =
2458
        s->coded_block[xy + 1 + wrap] = 0;
2459
    }
2460
    /* chroma */
2461
    wrap = s->block_wrap[4];
2462
    xy = s->mb_x + 1 + (s->mb_y + 1) * wrap;
2463
    s->dc_val[1][xy] =
2464
    s->dc_val[2][xy] = 1024;
2465
    /* ac pred */
2466
    memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2467
    memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2468
    
2469
    s->mbintra_table[s->mb_x + s->mb_y*s->mb_stride]= 0;
2470
}
2471

    
2472
/* generic function called after a macroblock has been parsed by the
2473
   decoder or after it has been encoded by the encoder.
2474

2475
   Important variables used:
2476
   s->mb_intra : true if intra macroblock
2477
   s->mv_dir   : motion vector direction
2478
   s->mv_type  : motion vector type
2479
   s->mv       : motion vector
2480
   s->interlaced_dct : true if interlaced dct used (mpeg2)
2481
 */
2482
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
2483
{
2484
    int mb_x, mb_y;
2485
    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2486
#ifdef HAVE_XVMC
2487
    if(s->avctx->xvmc_acceleration){
2488
        XVMC_decode_mb(s,block);
2489
        return;
2490
    }
2491
#endif
2492

    
2493
    mb_x = s->mb_x;
2494
    mb_y = s->mb_y;
2495

    
2496
    s->current_picture.qscale_table[mb_xy]= s->qscale;
2497

    
2498
    /* update DC predictors for P macroblocks */
2499
    if (!s->mb_intra) {
2500
        if (s->h263_pred || s->h263_aic) {
2501
            if(s->mbintra_table[mb_xy])
2502
                ff_clean_intra_table_entries(s);
2503
        } else {
2504
            s->last_dc[0] =
2505
            s->last_dc[1] =
2506
            s->last_dc[2] = 128 << s->intra_dc_precision;
2507
        }
2508
    }
2509
    else if (s->h263_pred || s->h263_aic)
2510
        s->mbintra_table[mb_xy]=1;
2511

    
2512
    if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==B_TYPE))) { //FIXME precalc
2513
        uint8_t *dest_y, *dest_cb, *dest_cr;
2514
        int dct_linesize, dct_offset;
2515
        op_pixels_func (*op_pix)[4];
2516
        qpel_mc_func (*op_qpix)[16];
2517
        const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2518
        const int uvlinesize= s->current_picture.linesize[1];
2519

    
2520
        /* avoid copy if macroblock skipped in last frame too */
2521
        /* skip only during decoding as we might trash the buffers during encoding a bit */
2522
        if(!s->encoding){
2523
            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2524
            const int age= s->current_picture.age;
2525

    
2526
            assert(age);
2527

    
2528
            if (s->mb_skiped) {
2529
                s->mb_skiped= 0;
2530
                assert(s->pict_type!=I_TYPE);
2531
 
2532
                (*mbskip_ptr) ++; /* indicate that this time we skiped it */
2533
                if(*mbskip_ptr >99) *mbskip_ptr= 99;
2534

    
2535
                /* if previous was skipped too, then nothing to do !  */
2536
                if (*mbskip_ptr >= age && s->current_picture.reference){
2537
                    return;
2538
                }
2539
            } else if(!s->current_picture.reference){
2540
                (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2541
                if(*mbskip_ptr >99) *mbskip_ptr= 99;
2542
            } else{
2543
                *mbskip_ptr = 0; /* not skipped */
2544
            }
2545
        }
2546

    
2547
        if (s->interlaced_dct) {
2548
            dct_linesize = linesize * 2;
2549
            dct_offset = linesize;
2550
        } else {
2551
            dct_linesize = linesize;
2552
            dct_offset = linesize * 8;
2553
        }
2554
        
2555
        dest_y=  s->dest[0];
2556
        dest_cb= s->dest[1];
2557
        dest_cr= s->dest[2];
2558

    
2559
        if (!s->mb_intra) {
2560
            /* motion handling */
2561
            /* decoding or more than one mb_type (MC was allready done otherwise) */
2562
            if(!s->encoding){
2563
                if ((!s->no_rounding) || s->pict_type==B_TYPE){                
2564
                    op_pix = s->dsp.put_pixels_tab;
2565
                    op_qpix= s->dsp.put_qpel_pixels_tab;
2566
                }else{
2567
                    op_pix = s->dsp.put_no_rnd_pixels_tab;
2568
                    op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2569
                }
2570

    
2571
                if (s->mv_dir & MV_DIR_FORWARD) {
2572
                    MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2573
                    op_pix = s->dsp.avg_pixels_tab;
2574
                    op_qpix= s->dsp.avg_qpel_pixels_tab;
2575
                }
2576
                if (s->mv_dir & MV_DIR_BACKWARD) {
2577
                    MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2578
                }
2579
            }
2580

    
2581
            /* skip dequant / idct if we are really late ;) */
2582
            if(s->hurry_up>1) return;
2583

    
2584
            /* add dct residue */
2585
            if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2586
                                || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2587
                add_dequant_dct(s, block[0], 0, dest_y, dct_linesize);
2588
                add_dequant_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2589
                add_dequant_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2590
                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2591

    
2592
                if(!(s->flags&CODEC_FLAG_GRAY)){
2593
                    add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize);
2594
                    add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize);
2595
                }
2596
            } else if(s->codec_id != CODEC_ID_WMV2){
2597
                add_dct(s, block[0], 0, dest_y, dct_linesize);
2598
                add_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2599
                add_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2600
                add_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2601

    
2602
                if(!(s->flags&CODEC_FLAG_GRAY)){
2603
                    add_dct(s, block[4], 4, dest_cb, uvlinesize);
2604
                    add_dct(s, block[5], 5, dest_cr, uvlinesize);
2605
                }
2606
            } 
2607
#ifdef CONFIG_RISKY
2608
            else{
2609
                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2610
            }
2611
#endif
2612
        } else {
2613
            /* dct only in intra block */
2614
            if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2615
                put_dct(s, block[0], 0, dest_y, dct_linesize);
2616
                put_dct(s, block[1], 1, dest_y + 8, dct_linesize);
2617
                put_dct(s, block[2], 2, dest_y + dct_offset, dct_linesize);
2618
                put_dct(s, block[3], 3, dest_y + dct_offset + 8, dct_linesize);
2619

    
2620
                if(!(s->flags&CODEC_FLAG_GRAY)){
2621
                    put_dct(s, block[4], 4, dest_cb, uvlinesize);
2622
                    put_dct(s, block[5], 5, dest_cr, uvlinesize);
2623
                }
2624
            }else{
2625
                s->dsp.idct_put(dest_y                 , dct_linesize, block[0]);
2626
                s->dsp.idct_put(dest_y              + 8, dct_linesize, block[1]);
2627
                s->dsp.idct_put(dest_y + dct_offset    , dct_linesize, block[2]);
2628
                s->dsp.idct_put(dest_y + dct_offset + 8, dct_linesize, block[3]);
2629

    
2630
                if(!(s->flags&CODEC_FLAG_GRAY)){
2631
                    s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2632
                    s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2633
                }
2634
            }
2635
        }
2636
    }
2637
}
2638

    
2639
#ifdef CONFIG_ENCODERS
2640

    
2641
static inline void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
2642
{
2643
    static const char tab[64]=
2644
        {3,2,2,1,1,1,1,1,
2645
         1,1,1,1,1,1,1,1,
2646
         1,1,1,1,1,1,1,1,
2647
         0,0,0,0,0,0,0,0,
2648
         0,0,0,0,0,0,0,0,
2649
         0,0,0,0,0,0,0,0,
2650
         0,0,0,0,0,0,0,0,
2651
         0,0,0,0,0,0,0,0};
2652
    int score=0;
2653
    int run=0;
2654
    int i;
2655
    DCTELEM *block= s->block[n];
2656
    const int last_index= s->block_last_index[n];
2657
    int skip_dc;
2658

    
2659
    if(threshold<0){
2660
        skip_dc=0;
2661
        threshold= -threshold;
2662
    }else
2663
        skip_dc=1;
2664

    
2665
    /* are all which we could set to zero are allready zero? */
2666
    if(last_index<=skip_dc - 1) return;
2667

    
2668
    for(i=0; i<=last_index; i++){
2669
        const int j = s->intra_scantable.permutated[i];
2670
        const int level = ABS(block[j]);
2671
        if(level==1){
2672
            if(skip_dc && i==0) continue;
2673
            score+= tab[run];
2674
            run=0;
2675
        }else if(level>1){
2676
            return;
2677
        }else{
2678
            run++;
2679
        }
2680
    }
2681
    if(score >= threshold) return;
2682
    for(i=skip_dc; i<=last_index; i++){
2683
        const int j = s->intra_scantable.permutated[i];
2684
        block[j]=0;
2685
    }
2686
    if(block[0]) s->block_last_index[n]= 0;
2687
    else         s->block_last_index[n]= -1;
2688
}
2689

    
2690
static inline void clip_coeffs(MpegEncContext *s, DCTELEM *block, int last_index)
2691
{
2692
    int i;
2693
    const int maxlevel= s->max_qcoeff;
2694
    const int minlevel= s->min_qcoeff;
2695
    
2696
    if(s->mb_intra){
2697
        i=1; //skip clipping of intra dc
2698
    }else
2699
        i=0;
2700
    
2701
    for(;i<=last_index; i++){
2702
        const int j= s->intra_scantable.permutated[i];
2703
        int level = block[j];
2704
       
2705
        if     (level>maxlevel) level=maxlevel;
2706
        else if(level<minlevel) level=minlevel;
2707

    
2708
        block[j]= level;
2709
    }
2710
}
2711

    
2712
#if 0
2713
static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2714
    int score=0;
2715
    int x,y;
2716
    
2717
    for(y=0; y<7; y++){
2718
        for(x=0; x<16; x+=4){
2719
            score+= ABS(s[x  ] - s[x  +stride]) + ABS(s[x+1] - s[x+1+stride]) 
2720
                   +ABS(s[x+2] - s[x+2+stride]) + ABS(s[x+3] - s[x+3+stride]);
2721
        }
2722
        s+= stride;
2723
    }
2724
    
2725
    return score;
2726
}
2727

2728
static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2729
    int score=0;
2730
    int x,y;
2731
    
2732
    for(y=0; y<7; y++){
2733
        for(x=0; x<16; x++){
2734
            score+= ABS(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2735
        }
2736
        s1+= stride;
2737
        s2+= stride;
2738
    }
2739
    
2740
    return score;
2741
}
2742
#else
2743
#define SQ(a) ((a)*(a))
2744

    
2745
static int pix_vcmp16x8(uint8_t *s, int stride){ //FIXME move to dsputil & optimize
2746
    int score=0;
2747
    int x,y;
2748
    
2749
    for(y=0; y<7; y++){
2750
        for(x=0; x<16; x+=4){
2751
            score+= SQ(s[x  ] - s[x  +stride]) + SQ(s[x+1] - s[x+1+stride]) 
2752
                   +SQ(s[x+2] - s[x+2+stride]) + SQ(s[x+3] - s[x+3+stride]);
2753
        }
2754
        s+= stride;
2755
    }
2756
    
2757
    return score;
2758
}
2759

    
2760
static int pix_diff_vcmp16x8(uint8_t *s1, uint8_t*s2, int stride){ //FIXME move to dsputil & optimize
2761
    int score=0;
2762
    int x,y;
2763
    
2764
    for(y=0; y<7; y++){
2765
        for(x=0; x<16; x++){
2766
            score+= SQ(s1[x  ] - s2[x ] - s1[x  +stride] + s2[x +stride]);
2767
        }
2768
        s1+= stride;
2769
        s2+= stride;
2770
    }
2771
    
2772
    return score;
2773
}
2774

    
2775
#endif
2776

    
2777
#endif //CONFIG_ENCODERS
2778

    
2779
/**
2780
 *
2781
 * @param h is the normal height, this will be reduced automatically if needed for the last row
2782
 */
2783
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2784
    if (s->avctx->draw_horiz_band) {
2785
        AVFrame *src;
2786
        int offset[4];
2787
        
2788
        if(s->picture_structure != PICT_FRAME){
2789
            h <<= 1;
2790
            y <<= 1;
2791
            if(s->first_field  && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2792
        }
2793

    
2794
        h= FFMIN(h, s->height - y);
2795

    
2796
        if(s->pict_type==B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER)) 
2797
            src= (AVFrame*)s->current_picture_ptr;
2798
        else if(s->last_picture_ptr)
2799
            src= (AVFrame*)s->last_picture_ptr;
2800
        else
2801
            return;
2802
            
2803
        if(s->pict_type==B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2804
            offset[0]=
2805
            offset[1]=
2806
            offset[2]=
2807
            offset[3]= 0;
2808
        }else{
2809
            offset[0]= y * s->linesize;;
2810
            offset[1]= 
2811
            offset[2]= (y>>1) * s->uvlinesize;;
2812
            offset[3]= 0;
2813
        }
2814

    
2815
        emms_c();
2816

    
2817
        s->avctx->draw_horiz_band(s->avctx, src, offset,
2818
                                  y, s->picture_structure, h);
2819
    }
2820
}
2821

    
2822
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2823
    const int linesize= s->current_picture.linesize[0]; //not s->linesize as this woulnd be wrong for field pics
2824
    const int uvlinesize= s->current_picture.linesize[1];
2825
        
2826
    s->block_index[0]= s->block_wrap[0]*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2827
    s->block_index[1]= s->block_wrap[0]*(s->mb_y*2 + 1)     + s->mb_x*2;
2828
    s->block_index[2]= s->block_wrap[0]*(s->mb_y*2 + 2) - 1 + s->mb_x*2;
2829
    s->block_index[3]= s->block_wrap[0]*(s->mb_y*2 + 2)     + s->mb_x*2;
2830
    s->block_index[4]= s->block_wrap[4]*(s->mb_y + 1)                    + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2831
    s->block_index[5]= s->block_wrap[4]*(s->mb_y + 1 + s->mb_height + 2) + s->block_wrap[0]*(s->mb_height*2 + 2) + s->mb_x;
2832
    
2833
    if(s->pict_type==B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME){
2834
        s->dest[0] = s->current_picture.data[0] + s->mb_x * 16 - 16;
2835
        s->dest[1] = s->current_picture.data[1] + s->mb_x * 8 - 8;
2836
        s->dest[2] = s->current_picture.data[2] + s->mb_x * 8 - 8;
2837
    }else{
2838
        s->dest[0] = s->current_picture.data[0] + (s->mb_y * 16* linesize  ) + s->mb_x * 16 - 16;
2839
        s->dest[1] = s->current_picture.data[1] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2840
        s->dest[2] = s->current_picture.data[2] + (s->mb_y * 8 * uvlinesize) + s->mb_x * 8 - 8;
2841
    }    
2842
}
2843

    
2844
#ifdef CONFIG_ENCODERS
2845

    
2846
static void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2847
{
2848
    const int mb_x= s->mb_x;
2849
    const int mb_y= s->mb_y;
2850
    int i;
2851
    int skip_dct[6];
2852
    int dct_offset   = s->linesize*8; //default for progressive frames
2853
    
2854
    for(i=0; i<6; i++) skip_dct[i]=0;
2855
    
2856
    if(s->adaptive_quant){
2857
        const int last_qp= s->qscale;
2858
        const int mb_xy= mb_x + mb_y*s->mb_stride;
2859

    
2860
        s->lambda= s->lambda_table[mb_xy];
2861
        update_qscale(s);
2862
        s->dquant= s->qscale - last_qp;
2863

    
2864
        if(s->out_format==FMT_H263)
2865
            s->dquant= clip(s->dquant, -2, 2); //FIXME RD
2866
            
2867
        if(s->codec_id==CODEC_ID_MPEG4){        
2868
            if(!s->mb_intra){
2869
                if((s->mv_dir&MV_DIRECT) || s->mv_type==MV_TYPE_8X8)
2870
                    s->dquant=0;
2871
            }
2872
        }
2873
        s->qscale= last_qp + s->dquant;
2874
        s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
2875
        s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
2876
    }
2877

    
2878
    if (s->mb_intra) {
2879
        uint8_t *ptr;
2880
        int wrap_y;
2881
        int emu=0;
2882

    
2883
        wrap_y = s->linesize;
2884
        ptr = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2885

    
2886
        if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2887
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2888
            ptr= s->edge_emu_buffer;
2889
            emu=1;
2890
        }
2891
        
2892
        if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2893
            int progressive_score, interlaced_score;
2894
            
2895
            progressive_score= pix_vcmp16x8(ptr, wrap_y  ) + pix_vcmp16x8(ptr + wrap_y*8, wrap_y );
2896
            interlaced_score = pix_vcmp16x8(ptr, wrap_y*2) + pix_vcmp16x8(ptr + wrap_y  , wrap_y*2);
2897
            
2898
            if(progressive_score > interlaced_score + 100){
2899
                s->interlaced_dct=1;
2900
            
2901
                dct_offset= wrap_y;
2902
                wrap_y<<=1;
2903
            }else
2904
                s->interlaced_dct=0;
2905
        }
2906
        
2907
        s->dsp.get_pixels(s->block[0], ptr                 , wrap_y);
2908
        s->dsp.get_pixels(s->block[1], ptr              + 8, wrap_y);
2909
        s->dsp.get_pixels(s->block[2], ptr + dct_offset    , wrap_y);
2910
        s->dsp.get_pixels(s->block[3], ptr + dct_offset + 8, wrap_y);
2911

    
2912
        if(s->flags&CODEC_FLAG_GRAY){
2913
            skip_dct[4]= 1;
2914
            skip_dct[5]= 1;
2915
        }else{
2916
            int wrap_c = s->uvlinesize;
2917
            ptr = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2918
            if(emu){
2919
                ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2920
                ptr= s->edge_emu_buffer;
2921
            }
2922
            s->dsp.get_pixels(s->block[4], ptr, wrap_c);
2923

    
2924
            ptr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2925
            if(emu){
2926
                ff_emulated_edge_mc(s->edge_emu_buffer, ptr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2927
                ptr= s->edge_emu_buffer;
2928
            }
2929
            s->dsp.get_pixels(s->block[5], ptr, wrap_c);
2930
        }
2931
    }else{
2932
        op_pixels_func (*op_pix)[4];
2933
        qpel_mc_func (*op_qpix)[16];
2934
        uint8_t *dest_y, *dest_cb, *dest_cr;
2935
        uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2936
        int wrap_y, wrap_c;
2937
        int emu=0;
2938

    
2939
        dest_y  = s->dest[0];
2940
        dest_cb = s->dest[1];
2941
        dest_cr = s->dest[2];
2942
        wrap_y = s->linesize;
2943
        wrap_c = s->uvlinesize;
2944
        ptr_y  = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16;
2945
        ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8;
2946
        ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8;
2947

    
2948
        if ((!s->no_rounding) || s->pict_type==B_TYPE){
2949
            op_pix = s->dsp.put_pixels_tab;
2950
            op_qpix= s->dsp.put_qpel_pixels_tab;
2951
        }else{
2952
            op_pix = s->dsp.put_no_rnd_pixels_tab;
2953
            op_qpix= s->dsp.put_no_rnd_qpel_pixels_tab;
2954
        }
2955

    
2956
        if (s->mv_dir & MV_DIR_FORWARD) {
2957
            MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
2958
            op_pix = s->dsp.avg_pixels_tab;
2959
            op_qpix= s->dsp.avg_qpel_pixels_tab;
2960
        }
2961
        if (s->mv_dir & MV_DIR_BACKWARD) {
2962
            MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
2963
        }
2964

    
2965
        if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){
2966
            ff_emulated_edge_mc(s->edge_emu_buffer, ptr_y, wrap_y, 16, 16, mb_x*16, mb_y*16, s->width, s->height);
2967
            ptr_y= s->edge_emu_buffer;
2968
            emu=1;
2969
        }
2970
        
2971
        if(s->flags&CODEC_FLAG_INTERLACED_DCT){
2972
            int progressive_score, interlaced_score;
2973
            
2974
            progressive_score= pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y  ) 
2975
                             + pix_diff_vcmp16x8(ptr_y + wrap_y*8, dest_y + wrap_y*8, wrap_y  );
2976
            interlaced_score = pix_diff_vcmp16x8(ptr_y           , dest_y           , wrap_y*2)
2977
                             + pix_diff_vcmp16x8(ptr_y + wrap_y  , dest_y + wrap_y  , wrap_y*2);
2978
            
2979
            if(progressive_score > interlaced_score + 600){
2980
                s->interlaced_dct=1;
2981
            
2982
                dct_offset= wrap_y;
2983
                wrap_y<<=1;
2984
            }else
2985
                s->interlaced_dct=0;
2986
        }
2987
        
2988
        s->dsp.diff_pixels(s->block[0], ptr_y                 , dest_y                 , wrap_y);
2989
        s->dsp.diff_pixels(s->block[1], ptr_y              + 8, dest_y              + 8, wrap_y);
2990
        s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset    , dest_y + dct_offset    , wrap_y);
2991
        s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y);
2992
        
2993
        if(s->flags&CODEC_FLAG_GRAY){
2994
            skip_dct[4]= 1;
2995
            skip_dct[5]= 1;
2996
        }else{
2997
            if(emu){
2998
                ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
2999
                ptr_cb= s->edge_emu_buffer;
3000
            }
3001
            s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
3002
            if(emu){
3003
                ff_emulated_edge_mc(s->edge_emu_buffer, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1);
3004
                ptr_cr= s->edge_emu_buffer;
3005
            }
3006
            s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
3007
        }
3008
        /* pre quantization */         
3009
        if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x]<2*s->qscale*s->qscale){
3010
            //FIXME optimize
3011
            if(s->dsp.pix_abs8x8(ptr_y               , dest_y               , wrap_y) < 20*s->qscale) skip_dct[0]= 1;
3012
            if(s->dsp.pix_abs8x8(ptr_y            + 8, dest_y            + 8, wrap_y) < 20*s->qscale) skip_dct[1]= 1;
3013
            if(s->dsp.pix_abs8x8(ptr_y +dct_offset   , dest_y +dct_offset   , wrap_y) < 20*s->qscale) skip_dct[2]= 1;
3014
            if(s->dsp.pix_abs8x8(ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y) < 20*s->qscale) skip_dct[3]= 1;
3015
            if(s->dsp.pix_abs8x8(ptr_cb              , dest_cb              , wrap_c) < 20*s->qscale) skip_dct[4]= 1;
3016
            if(s->dsp.pix_abs8x8(ptr_cr              , dest_cr              , wrap_c) < 20*s->qscale) skip_dct[5]= 1;
3017
#if 0
3018
{
3019
 static int stat[7];
3020
 int num=0;
3021
 for(i=0; i<6; i++)
3022
  if(skip_dct[i]) num++;
3023
 stat[num]++;
3024
 
3025
 if(s->mb_x==0 && s->mb_y==0){
3026
  for(i=0; i<7; i++){
3027
   printf("%6d %1d\n", stat[i], i);
3028
  }
3029
 }
3030
}
3031
#endif
3032
        }
3033

    
3034
    }
3035
            
3036
    /* DCT & quantize */
3037
    if(s->out_format==FMT_MJPEG){
3038
        for(i=0;i<6;i++) {
3039
            int overflow;
3040
            s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, 8, &overflow);
3041
            if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3042
        }
3043
    }else{
3044
        for(i=0;i<6;i++) {
3045
            if(!skip_dct[i]){
3046
                int overflow;
3047
                s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
3048
            // FIXME we could decide to change to quantizer instead of clipping
3049
            // JS: I don't think that would be a good idea it could lower quality instead
3050
            //     of improve it. Just INTRADC clipping deserves changes in quantizer
3051
                if (overflow) clip_coeffs(s, s->block[i], s->block_last_index[i]);
3052
            }else
3053
                s->block_last_index[i]= -1;
3054
        }
3055
        
3056
        if(s->luma_elim_threshold && !s->mb_intra)
3057
            for(i=0; i<4; i++)
3058
                dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
3059
        if(s->chroma_elim_threshold && !s->mb_intra)
3060
            for(i=4; i<6; i++)
3061
                dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
3062

    
3063
        if(s->flags & CODEC_FLAG_CBP_RD){
3064
            for(i=0;i<6;i++) {
3065
                if(s->block_last_index[i] == -1)
3066
                    s->coded_score[i]= INT_MAX/256;
3067
            }
3068
        }
3069
    }
3070

    
3071
    if((s->flags&CODEC_FLAG_GRAY) && s->mb_intra){
3072
        s->block_last_index[4]=
3073
        s->block_last_index[5]= 0;
3074
        s->block[4][0]=
3075
        s->block[5][0]= (1024 + s->c_dc_scale/2)/ s->c_dc_scale;
3076
    }
3077

    
3078
    /* huffman encode */
3079
    switch(s->codec_id){ //FIXME funct ptr could be slightly faster
3080
    case CODEC_ID_MPEG1VIDEO:
3081
    case CODEC_ID_MPEG2VIDEO:
3082
        mpeg1_encode_mb(s, s->block, motion_x, motion_y); break;
3083
#ifdef CONFIG_RISKY
3084
    case CODEC_ID_MPEG4:
3085
        mpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3086
    case CODEC_ID_MSMPEG4V2:
3087
    case CODEC_ID_MSMPEG4V3:
3088
    case CODEC_ID_WMV1:
3089
        msmpeg4_encode_mb(s, s->block, motion_x, motion_y); break;
3090
    case CODEC_ID_WMV2:
3091
         ff_wmv2_encode_mb(s, s->block, motion_x, motion_y); break;
3092
    case CODEC_ID_H263:
3093
    case CODEC_ID_H263P:
3094
    case CODEC_ID_FLV1:
3095
    case CODEC_ID_RV10:
3096
        h263_encode_mb(s, s->block, motion_x, motion_y); break;
3097
#endif
3098
    case CODEC_ID_MJPEG:
3099
        mjpeg_encode_mb(s, s->block); break;
3100
    default:
3101
        assert(0);
3102
    }
3103
}
3104

    
3105
#endif //CONFIG_ENCODERS
3106

    
3107
/**
3108
 * combines the (truncated) bitstream to a complete frame
3109
 * @returns -1 if no complete frame could be created
3110
 */
3111
int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size){
3112
    ParseContext *pc= &s->parse_context;
3113

    
3114
#if 0
3115
    if(pc->overread){
3116
        printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3117
        printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3118
    }
3119
#endif
3120

    
3121
    /* copy overreaded byes from last frame into buffer */
3122
    for(; pc->overread>0; pc->overread--){
3123
        pc->buffer[pc->index++]= pc->buffer[pc->overread_index++];
3124
    }
3125
    
3126
    pc->last_index= pc->index;
3127

    
3128
    /* copy into buffer end return */
3129
    if(next == END_NOT_FOUND){
3130
        pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, (*buf_size) + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3131

    
3132
        memcpy(&pc->buffer[pc->index], *buf, *buf_size);
3133
        pc->index += *buf_size;
3134
        return -1;
3135
    }
3136

    
3137
    *buf_size=
3138
    pc->overread_index= pc->index + next;
3139
    
3140
    /* append to buffer */
3141
    if(pc->index){
3142
        pc->buffer= av_fast_realloc(pc->buffer, &pc->buffer_size, next + pc->index + FF_INPUT_BUFFER_PADDING_SIZE);
3143

    
3144
        memcpy(&pc->buffer[pc->index], *buf, next + FF_INPUT_BUFFER_PADDING_SIZE );
3145
        pc->index = 0;
3146
        *buf= pc->buffer;
3147
    }
3148

    
3149
    /* store overread bytes */
3150
    for(;next < 0; next++){
3151
        pc->state = (pc->state<<8) | pc->buffer[pc->last_index + next];
3152
        pc->overread++;
3153
    }
3154

    
3155
#if 0
3156
    if(pc->overread){
3157
        printf("overread %d, state:%X next:%d index:%d o_index:%d\n", pc->overread, pc->state, next, pc->index, pc->overread_index);
3158
        printf("%X %X %X %X\n", (*buf)[0], (*buf)[1],(*buf)[2],(*buf)[3]);
3159
    }
3160
#endif
3161

    
3162
    return 0;
3163
}
3164

    
3165
void ff_mpeg_flush(AVCodecContext *avctx){
3166
    int i;
3167
    MpegEncContext *s = avctx->priv_data;
3168
    
3169
    for(i=0; i<MAX_PICTURE_COUNT; i++){
3170
       if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
3171
                                    || s->picture[i].type == FF_BUFFER_TYPE_USER))
3172
        avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]);
3173
    }
3174
    s->last_picture_ptr = s->next_picture_ptr = NULL;
3175
    
3176
    s->parse_context.state= -1;
3177
    s->parse_context.frame_start_found= 0;
3178
    s->parse_context.overread= 0;
3179
    s->parse_context.overread_index= 0;
3180
    s->parse_context.index= 0;
3181
    s->parse_context.last_index= 0;
3182
}
3183

    
3184
#ifdef CONFIG_ENCODERS
3185
void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length)
3186
{
3187
    int bytes= length>>4;
3188
    int bits= length&15;
3189
    int i;
3190

    
3191
    if(length==0) return;
3192

    
3193
    for(i=0; i<bytes; i++) put_bits(pb, 16, be2me_16(((uint16_t*)src)[i]));
3194
    put_bits(pb, bits, be2me_16(((uint16_t*)src)[i])>>(16-bits));
3195
}
3196

    
3197
static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
3198
    int i;
3199

    
3200
    memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3201

    
3202
    /* mpeg1 */
3203
    d->mb_skip_run= s->mb_skip_run;
3204
    for(i=0; i<3; i++)
3205
        d->last_dc[i]= s->last_dc[i];
3206
    
3207
    /* statistics */
3208
    d->mv_bits= s->mv_bits;
3209
    d->i_tex_bits= s->i_tex_bits;
3210
    d->p_tex_bits= s->p_tex_bits;
3211
    d->i_count= s->i_count;
3212
    d->f_count= s->f_count;
3213
    d->b_count= s->b_count;
3214
    d->skip_count= s->skip_count;
3215
    d->misc_bits= s->misc_bits;
3216
    d->last_bits= 0;
3217

    
3218
    d->mb_skiped= 0;
3219
    d->qscale= s->qscale;
3220
}
3221

    
3222
static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
3223
    int i;
3224

    
3225
    memcpy(d->mv, s->mv, 2*4*2*sizeof(int)); 
3226
    memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster then a loop?
3227
    
3228
    /* mpeg1 */
3229
    d->mb_skip_run= s->mb_skip_run;
3230
    for(i=0; i<3; i++)
3231
        d->last_dc[i]= s->last_dc[i];
3232
    
3233
    /* statistics */
3234
    d->mv_bits= s->mv_bits;
3235
    d->i_tex_bits= s->i_tex_bits;
3236
    d->p_tex_bits= s->p_tex_bits;
3237
    d->i_count= s->i_count;
3238
    d->f_count= s->f_count;
3239
    d->b_count= s->b_count;
3240
    d->skip_count= s->skip_count;
3241
    d->misc_bits= s->misc_bits;
3242

    
3243
    d->mb_intra= s->mb_intra;
3244
    d->mb_skiped= s->mb_skiped;
3245
    d->mv_type= s->mv_type;
3246
    d->mv_dir= s->mv_dir;
3247
    d->pb= s->pb;
3248
    if(s->data_partitioning){
3249
        d->pb2= s->pb2;
3250
        d->tex_pb= s->tex_pb;
3251
    }
3252
    d->block= s->block;
3253
    for(i=0; i<6; i++)
3254
        d->block_last_index[i]= s->block_last_index[i];
3255
    d->interlaced_dct= s->interlaced_dct;
3256
    d->qscale= s->qscale;
3257
}
3258

    
3259
static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, 
3260
                           PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
3261
                           int *dmin, int *next_block, int motion_x, int motion_y)
3262
{
3263
    int score;
3264
    uint8_t *dest_backup[3];
3265
    
3266
    copy_context_before_encode(s, backup, type);
3267

    
3268
    s->block= s->blocks[*next_block];
3269
    s->pb= pb[*next_block];
3270
    if(s->data_partitioning){
3271
        s->pb2   = pb2   [*next_block];
3272
        s->tex_pb= tex_pb[*next_block];
3273
    }
3274
    
3275
    if(*next_block){
3276
        memcpy(dest_backup, s->dest, sizeof(s->dest));
3277
        s->dest[0] = s->me.scratchpad;
3278
        s->dest[1] = s->me.scratchpad + 16;
3279
        s->dest[2] = s->me.scratchpad + 16 + 8;
3280
        assert(2*s->uvlinesize == s->linesize); //should be no prob for encoding
3281
        assert(s->linesize >= 64); //FIXME
3282
    }
3283

    
3284
    encode_mb(s, motion_x, motion_y);
3285
    
3286
    score= get_bit_count(&s->pb);
3287
    if(s->data_partitioning){
3288
        score+= get_bit_count(&s->pb2);
3289
        score+= get_bit_count(&s->tex_pb);
3290
    }
3291
   
3292
    if(s->avctx->mb_decision == FF_MB_DECISION_RD){
3293
        MPV_decode_mb(s, s->block);
3294

    
3295
        score *= s->lambda2;
3296
        score += sse_mb(s) << FF_LAMBDA_SHIFT;
3297
    }
3298
    
3299
    if(*next_block){
3300
        memcpy(s->dest, dest_backup, sizeof(s->dest));
3301
    }
3302

    
3303
    if(score<*dmin){
3304
        *dmin= score;
3305
        *next_block^=1;
3306

    
3307
        copy_context_after_encode(best, s, type);
3308
    }
3309
}
3310
                
3311
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
3312
    uint32_t *sq = squareTbl + 256;
3313
    int acc=0;
3314
    int x,y;
3315
    
3316
    if(w==16 && h==16) 
3317
        return s->dsp.sse[0](NULL, src1, src2, stride);
3318
    else if(w==8 && h==8)
3319
        return s->dsp.sse[1](NULL, src1, src2, stride);
3320
    
3321
    for(y=0; y<h; y++){
3322
        for(x=0; x<w; x++){
3323
            acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
3324
        } 
3325
    }
3326
    
3327
    assert(acc>=0);
3328
    
3329
    return acc;
3330
}
3331

    
3332
static int sse_mb(MpegEncContext *s){
3333
    int w= 16;
3334
    int h= 16;
3335

    
3336
    if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3337
    if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3338

    
3339
    if(w==16 && h==16)
3340
        return  s->dsp.sse[0](NULL, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], s->linesize)
3341
               +s->dsp.sse[1](NULL, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], s->uvlinesize)
3342
               +s->dsp.sse[1](NULL, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], s->uvlinesize);
3343
    else
3344
        return  sse(s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
3345
               +sse(s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
3346
               +sse(s, s->new_picture.data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
3347
}
3348

    
3349
static void encode_picture(MpegEncContext *s, int picture_number)
3350
{
3351
    int mb_x, mb_y, pdif = 0;
3352
    int i;
3353
    int bits;
3354
    MpegEncContext best_s, backup_s;
3355
    uint8_t bit_buf[2][3000];
3356
    uint8_t bit_buf2[2][3000];
3357
    uint8_t bit_buf_tex[2][3000];
3358
    PutBitContext pb[2], pb2[2], tex_pb[2];
3359

    
3360
    for(i=0; i<2; i++){
3361
        init_put_bits(&pb    [i], bit_buf    [i], 3000);
3362
        init_put_bits(&pb2   [i], bit_buf2   [i], 3000);
3363
        init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000);
3364
    }
3365

    
3366
    s->picture_number = picture_number;
3367
    
3368
    /* Reset the average MB variance */
3369
    s->current_picture.mb_var_sum = 0;
3370
    s->current_picture.mc_mb_var_sum = 0;
3371

    
3372
#ifdef CONFIG_RISKY
3373
    /* we need to initialize some time vars before we can encode b-frames */
3374
    // RAL: Condition added for MPEG1VIDEO
3375
    if (s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->h263_msmpeg4))
3376
        ff_set_mpeg4_time(s, s->picture_number); 
3377
#endif
3378
        
3379
    s->scene_change_score=0;
3380
    
3381
    s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME ratedistoration
3382
    
3383
    if(s->pict_type==I_TYPE){
3384
        if(s->msmpeg4_version >= 3) s->no_rounding=1;
3385
        else                        s->no_rounding=0;
3386
    }else if(s->pict_type!=B_TYPE){
3387
        if(s->flipflop_rounding || s->codec_id == CODEC_ID_H263P || s->codec_id == CODEC_ID_MPEG4)
3388
            s->no_rounding ^= 1;          
3389
    }
3390
    
3391
    /* Estimate motion for every MB */
3392
    s->mb_intra=0; //for the rate distoration & bit compare functions
3393
    if(s->pict_type != I_TYPE){
3394
        if(s->pict_type != B_TYPE){
3395
            if((s->avctx->pre_me && s->last_non_b_pict_type==I_TYPE) || s->avctx->pre_me==2){
3396
                s->me.pre_pass=1;
3397
                s->me.dia_size= s->avctx->pre_dia_size;
3398

    
3399
                for(mb_y=s->mb_height-1; mb_y >=0 ; mb_y--) {
3400
                    for(mb_x=s->mb_width-1; mb_x >=0 ; mb_x--) {
3401
                        s->mb_x = mb_x;
3402
                        s->mb_y = mb_y;
3403
                        ff_pre_estimate_p_frame_motion(s, mb_x, mb_y);
3404
                    }
3405
                }
3406
                s->me.pre_pass=0;
3407
            }
3408
        }
3409

    
3410
        s->me.dia_size= s->avctx->dia_size;
3411
        for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3412
            s->block_index[0]= s->block_wrap[0]*(mb_y*2 + 1) - 1;
3413
            s->block_index[1]= s->block_wrap[0]*(mb_y*2 + 1);
3414
            s->block_index[2]= s->block_wrap[0]*(mb_y*2 + 2) - 1;
3415
            s->block_index[3]= s->block_wrap[0]*(mb_y*2 + 2);
3416
            for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3417
                s->mb_x = mb_x;
3418
                s->mb_y = mb_y;
3419
                s->block_index[0]+=2;
3420
                s->block_index[1]+=2;
3421
                s->block_index[2]+=2;
3422
                s->block_index[3]+=2;
3423
                
3424
                /* compute motion vector & mb_type and store in context */
3425
                if(s->pict_type==B_TYPE)
3426
                    ff_estimate_b_frame_motion(s, mb_x, mb_y);
3427
                else
3428
                    ff_estimate_p_frame_motion(s, mb_x, mb_y);
3429
            }
3430
        }
3431
    }else /* if(s->pict_type == I_TYPE) */{
3432
        /* I-Frame */
3433
        //FIXME do we need to zero them?
3434
        memset(s->motion_val[0], 0, sizeof(int16_t)*(s->mb_width*2 + 2)*(s->mb_height*2 + 2)*2);
3435
        memset(s->p_mv_table   , 0, sizeof(int16_t)*(s->mb_stride)*s->mb_height*2);
3436
        memset(s->mb_type      , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3437
        
3438
        if(!s->fixed_qscale){
3439
            /* finding spatial complexity for I-frame rate control */
3440
            for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3441
                for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3442
                    int xx = mb_x * 16;
3443
                    int yy = mb_y * 16;
3444
                    uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx;
3445
                    int varc;
3446
                    int sum = s->dsp.pix_sum(pix, s->linesize);
3447
    
3448
                    varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8;
3449

    
3450
                    s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
3451
                    s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
3452
                    s->current_picture.mb_var_sum    += varc;
3453
                }
3454
            }
3455
        }
3456
    }
3457
    emms_c();
3458

    
3459
    if(s->scene_change_score > s->avctx->scenechange_threshold && s->pict_type == P_TYPE){
3460
        s->pict_type= I_TYPE;
3461
        memset(s->mb_type   , MB_TYPE_INTRA, sizeof(uint8_t)*s->mb_stride*s->mb_height);
3462
//printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3463
    }
3464

    
3465
    if(!s->umvplus){
3466
        if(s->pict_type==P_TYPE || s->pict_type==S_TYPE) {
3467
            s->f_code= ff_get_best_fcode(s, s->p_mv_table, MB_TYPE_INTER);
3468
        
3469
            ff_fix_long_p_mvs(s);
3470
        }
3471

    
3472
        if(s->pict_type==B_TYPE){
3473
            int a, b;
3474

    
3475
            a = ff_get_best_fcode(s, s->b_forw_mv_table, MB_TYPE_FORWARD);
3476
            b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, MB_TYPE_BIDIR);
3477
            s->f_code = FFMAX(a, b);
3478

    
3479
            a = ff_get_best_fcode(s, s->b_back_mv_table, MB_TYPE_BACKWARD);
3480
            b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, MB_TYPE_BIDIR);
3481
            s->b_code = FFMAX(a, b);
3482

    
3483
            ff_fix_long_b_mvs(s, s->b_forw_mv_table, s->f_code, MB_TYPE_FORWARD);
3484
            ff_fix_long_b_mvs(s, s->b_back_mv_table, s->b_code, MB_TYPE_BACKWARD);
3485
            ff_fix_long_b_mvs(s, s->b_bidir_forw_mv_table, s->f_code, MB_TYPE_BIDIR);
3486
            ff_fix_long_b_mvs(s, s->b_bidir_back_mv_table, s->b_code, MB_TYPE_BIDIR);
3487
        }
3488
    }
3489
    
3490
    if (!s->fixed_qscale) 
3491
        s->current_picture.quality = ff_rate_estimate_qscale(s);
3492

    
3493
    if(s->adaptive_quant){
3494
#ifdef CONFIG_RISKY
3495
        switch(s->codec_id){
3496
        case CODEC_ID_MPEG4:
3497
            ff_clean_mpeg4_qscales(s);
3498
            break;
3499
        case CODEC_ID_H263:
3500
        case CODEC_ID_H263P:
3501
        case CODEC_ID_FLV1:
3502
            ff_clean_h263_qscales(s);
3503
            break;
3504
        }
3505
#endif
3506

    
3507
        s->lambda= s->lambda_table[0];
3508
        //FIXME broken
3509
    }else
3510
        s->lambda= s->current_picture.quality;
3511
//printf("%d %d\n", s->avctx->global_quality, s->current_picture.quality);
3512
    update_qscale(s);
3513
    
3514
    if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE && !(s->flags & CODEC_FLAG_QSCALE)) 
3515
        s->qscale= 3; //reduce cliping problems
3516
        
3517
    if (s->out_format == FMT_MJPEG) {
3518
        /* for mjpeg, we do include qscale in the matrix */
3519
        s->intra_matrix[0] = ff_mpeg1_default_intra_matrix[0];
3520
        for(i=1;i<64;i++){
3521
            int j= s->dsp.idct_permutation[i];
3522

    
3523
            s->intra_matrix[j] = CLAMP_TO_8BIT((ff_mpeg1_default_intra_matrix[i] * s->qscale) >> 3);
3524
        }
3525
        convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, 
3526
                       s->intra_matrix, s->intra_quant_bias, 8, 8);
3527
    }
3528
    
3529
    //FIXME var duplication
3530
    s->current_picture.key_frame= s->pict_type == I_TYPE;
3531
    s->current_picture.pict_type= s->pict_type;
3532

    
3533
    if(s->current_picture.key_frame)
3534
        s->picture_in_gop_number=0;
3535

    
3536
    s->last_bits= get_bit_count(&s->pb);
3537
    switch(s->out_format) {
3538
    case FMT_MJPEG:
3539
        mjpeg_picture_header(s);
3540
        break;
3541
#ifdef CONFIG_RISKY
3542
    case FMT_H263:
3543
        if (s->codec_id == CODEC_ID_WMV2) 
3544
            ff_wmv2_encode_picture_header(s, picture_number);
3545
        else if (s->h263_msmpeg4) 
3546
            msmpeg4_encode_picture_header(s, picture_number);
3547
        else if (s->h263_pred)
3548
            mpeg4_encode_picture_header(s, picture_number);
3549
        else if (s->h263_rv10) 
3550
            rv10_encode_picture_header(s, picture_number);
3551
        else if (s->codec_id == CODEC_ID_FLV1)
3552
            ff_flv_encode_picture_header(s, picture_number);
3553
        else
3554
            h263_encode_picture_header(s, picture_number);
3555
        break;
3556
#endif
3557
    case FMT_MPEG1:
3558
        mpeg1_encode_picture_header(s, picture_number);
3559
        break;
3560
    case FMT_H264:
3561
        break;
3562
    }
3563
    bits= get_bit_count(&s->pb);
3564
    s->header_bits= bits - s->last_bits;
3565
    s->last_bits= bits;
3566
    s->mv_bits=0;
3567
    s->misc_bits=0;
3568
    s->i_tex_bits=0;
3569
    s->p_tex_bits=0;
3570
    s->i_count=0;
3571
    s->f_count=0;
3572
    s->b_count=0;
3573
    s->skip_count=0;
3574

    
3575
    for(i=0; i<3; i++){
3576
        /* init last dc values */
3577
        /* note: quant matrix value (8) is implied here */
3578
        s->last_dc[i] = 128;
3579
        
3580
        s->current_picture_ptr->error[i] = 0;
3581
    }
3582
    s->mb_skip_run = 0;
3583
    s->last_mv[0][0][0] = 0;
3584
    s->last_mv[0][0][1] = 0;
3585
    s->last_mv[1][0][0] = 0;
3586
    s->last_mv[1][0][1] = 0;
3587
     
3588
    s->last_mv_dir = 0;
3589

    
3590
#ifdef CONFIG_RISKY
3591
    switch(s->codec_id){
3592
    case CODEC_ID_H263:
3593
    case CODEC_ID_H263P:
3594
    case CODEC_ID_FLV1:
3595
        s->gob_index = ff_h263_get_gob_height(s);
3596
        break;
3597
    case CODEC_ID_MPEG4:
3598
        if(s->partitioned_frame)
3599
            ff_mpeg4_init_partitions(s);
3600
        break;
3601
    }
3602
#endif
3603

    
3604
    s->resync_mb_x=0;
3605
    s->resync_mb_y=0;
3606
    s->first_slice_line = 1;
3607
    s->ptr_lastgob = s->pb.buf;
3608
    for(mb_y=0; mb_y < s->mb_height; mb_y++) {
3609
        s->mb_x=0;
3610
        s->mb_y= mb_y;
3611

    
3612
        s->y_dc_scale= s->y_dc_scale_table[ s->qscale ];
3613
        s->c_dc_scale= s->c_dc_scale_table[ s->qscale ];
3614
        ff_init_block_index(s);
3615
        
3616
        for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3617
            const int xy= mb_y*s->mb_stride + mb_x;
3618
            int mb_type= s->mb_type[xy];
3619
//            int d;
3620
            int dmin= INT_MAX;
3621

    
3622
            s->mb_x = mb_x;
3623
            ff_update_block_index(s);
3624

    
3625
            /* write gob / video packet header  */
3626
#ifdef CONFIG_RISKY
3627
            if(s->rtp_mode && mb_y + mb_x>0){
3628
                int current_packet_size, is_gob_start;
3629
                
3630
                current_packet_size= pbBufPtr(&s->pb) - s->ptr_lastgob;
3631
                is_gob_start=0;
3632
                
3633
                if(s->codec_id==CODEC_ID_MPEG4){
3634
                    if(current_packet_size >= s->rtp_payload_size){
3635

    
3636
                        if(s->partitioned_frame){
3637
                            ff_mpeg4_merge_partitions(s);
3638
                            ff_mpeg4_init_partitions(s);
3639
                        }
3640
                        ff_mpeg4_encode_video_packet_header(s);
3641

    
3642
                        if(s->flags&CODEC_FLAG_PASS1){
3643
                            int bits= get_bit_count(&s->pb);
3644
                            s->misc_bits+= bits - s->last_bits;
3645
                            s->last_bits= bits;
3646
                        }
3647
                        ff_mpeg4_clean_buffers(s);
3648
                        is_gob_start=1;
3649
                    }
3650
                }else if(s->codec_id==CODEC_ID_MPEG1VIDEO){
3651
                    if(   current_packet_size >= s->rtp_payload_size 
3652
                       && s->mb_skip_run==0){
3653
                        ff_mpeg1_encode_slice_header(s);
3654
                        ff_mpeg1_clean_buffers(s);
3655
                        is_gob_start=1;
3656
                    }
3657
                }else if(s->codec_id==CODEC_ID_MPEG2VIDEO){
3658
                    if(   (   current_packet_size >= s->rtp_payload_size || mb_x==0)
3659
                       && s->mb_skip_run==0){
3660
                        ff_mpeg1_encode_slice_header(s);
3661
                        ff_mpeg1_clean_buffers(s);
3662
                        is_gob_start=1;
3663
                    }
3664
                }else{
3665
                    if(current_packet_size >= s->rtp_payload_size
3666
                       && s->mb_x==0 && s->mb_y%s->gob_index==0){
3667
                       
3668
                        h263_encode_gob_header(s, mb_y);                       
3669
                        is_gob_start=1;
3670
                    }
3671
                }
3672

    
3673
                if(is_gob_start){
3674
                    s->ptr_lastgob = pbBufPtr(&s->pb);
3675
                    s->first_slice_line=1;
3676
                    s->resync_mb_x=mb_x;
3677
                    s->resync_mb_y=mb_y;
3678
                }
3679
            }
3680
#endif
3681

    
3682
            if(  (s->resync_mb_x   == s->mb_x)
3683
               && s->resync_mb_y+1 == s->mb_y){
3684
                s->first_slice_line=0; 
3685
            }
3686

    
3687
            s->mb_skiped=0;
3688

    
3689
            if(mb_type & (mb_type-1)){ // more than 1 MB type possible
3690
                int next_block=0;
3691
                int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3692

    
3693
                copy_context_before_encode(&backup_s, s, -1);
3694
                backup_s.pb= s->pb;
3695
                best_s.data_partitioning= s->data_partitioning;
3696
                best_s.partitioned_frame= s->partitioned_frame;
3697
                if(s->data_partitioning){
3698
                    backup_s.pb2= s->pb2;
3699
                    backup_s.tex_pb= s->tex_pb;
3700
                }
3701

    
3702
                if(mb_type&MB_TYPE_INTER){
3703
                    s->mv_dir = MV_DIR_FORWARD;
3704
                    s->mv_type = MV_TYPE_16X16;
3705
                    s->mb_intra= 0;
3706
                    s->mv[0][0][0] = s->p_mv_table[xy][0];
3707
                    s->mv[0][0][1] = s->p_mv_table[xy][1];
3708
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER, pb, pb2, tex_pb, 
3709
                                 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3710
                }
3711
                if(mb_type&MB_TYPE_SKIPED){
3712
                    s->mv_dir = MV_DIR_FORWARD;
3713
                    s->mv_type = MV_TYPE_16X16;
3714
                    s->mb_intra= 0;
3715
                    s->mv[0][0][0] = 0;
3716
                    s->mv[0][0][1] = 0;
3717
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_SKIPED, pb, pb2, tex_pb, 
3718
                                 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3719
                }
3720
                if(mb_type&MB_TYPE_INTER4V){                 
3721
                    s->mv_dir = MV_DIR_FORWARD;
3722
                    s->mv_type = MV_TYPE_8X8;
3723
                    s->mb_intra= 0;
3724
                    for(i=0; i<4; i++){
3725
                        s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3726
                        s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3727
                    }
3728
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTER4V, pb, pb2, tex_pb, 
3729
                                 &dmin, &next_block, 0, 0);
3730
                }
3731
                if(mb_type&MB_TYPE_FORWARD){
3732
                    s->mv_dir = MV_DIR_FORWARD;
3733
                    s->mv_type = MV_TYPE_16X16;
3734
                    s->mb_intra= 0;
3735
                    s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3736
                    s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3737
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_FORWARD, pb, pb2, tex_pb, 
3738
                                 &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3739
                }
3740
                if(mb_type&MB_TYPE_BACKWARD){
3741
                    s->mv_dir = MV_DIR_BACKWARD;
3742
                    s->mv_type = MV_TYPE_16X16;
3743
                    s->mb_intra= 0;
3744
                    s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3745
                    s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3746
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BACKWARD, pb, pb2, tex_pb, 
3747
                                 &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3748
                }
3749
                if(mb_type&MB_TYPE_BIDIR){
3750
                    s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3751
                    s->mv_type = MV_TYPE_16X16;
3752
                    s->mb_intra= 0;
3753
                    s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3754
                    s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3755
                    s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3756
                    s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3757
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_BIDIR, pb, pb2, tex_pb, 
3758
                                 &dmin, &next_block, 0, 0);
3759
                }
3760
                if(mb_type&MB_TYPE_DIRECT){
3761
                    int mx= s->b_direct_mv_table[xy][0];
3762
                    int my= s->b_direct_mv_table[xy][1];
3763
                    
3764
                    s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3765
                    s->mb_intra= 0;
3766
#ifdef CONFIG_RISKY
3767
                    ff_mpeg4_set_direct_mv(s, mx, my);
3768
#endif
3769
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_DIRECT, pb, pb2, tex_pb, 
3770
                                 &dmin, &next_block, mx, my);
3771
                }
3772
                if(mb_type&MB_TYPE_INTRA){
3773
                    s->mv_dir = 0;
3774
                    s->mv_type = MV_TYPE_16X16;
3775
                    s->mb_intra= 1;
3776
                    s->mv[0][0][0] = 0;
3777
                    s->mv[0][0][1] = 0;
3778
                    encode_mb_hq(s, &backup_s, &best_s, MB_TYPE_INTRA, pb, pb2, tex_pb, 
3779
                                 &dmin, &next_block, 0, 0);
3780
                    if(s->h263_pred || s->h263_aic){
3781
                        if(best_s.mb_intra)
3782
                            s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3783
                        else
3784
                            ff_clean_intra_table_entries(s); //old mode?
3785
                    }
3786
                }
3787
                copy_context_after_encode(s, &best_s, -1);
3788
                
3789
                pb_bits_count= get_bit_count(&s->pb);
3790
                flush_put_bits(&s->pb);
3791
                ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3792
                s->pb= backup_s.pb;
3793
                
3794
                if(s->data_partitioning){
3795
                    pb2_bits_count= get_bit_count(&s->pb2);
3796
                    flush_put_bits(&s->pb2);
3797
                    ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3798
                    s->pb2= backup_s.pb2;
3799
                    
3800
                    tex_pb_bits_count= get_bit_count(&s->tex_pb);
3801
                    flush_put_bits(&s->tex_pb);
3802
                    ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3803
                    s->tex_pb= backup_s.tex_pb;
3804
                }
3805
                s->last_bits= get_bit_count(&s->pb);
3806
               
3807
#ifdef CONFIG_RISKY
3808
                if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3809
                    ff_h263_update_motion_val(s);
3810
#endif
3811
        
3812
                if(next_block==0){
3813
                    s->dsp.put_pixels_tab[0][0](s->dest[0], s->me.scratchpad     , s->linesize  ,16);
3814
                    s->dsp.put_pixels_tab[1][0](s->dest[1], s->me.scratchpad + 16, s->uvlinesize, 8);
3815
                    s->dsp.put_pixels_tab[1][0](s->dest[2], s->me.scratchpad + 24, s->uvlinesize, 8);
3816
                }
3817

    
3818
                if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3819
                    MPV_decode_mb(s, s->block);
3820
            } else {
3821
                int motion_x, motion_y;
3822
                int intra_score;
3823
                int inter_score= s->current_picture.mb_cmp_score[mb_x + mb_y*s->mb_stride];
3824
                
3825
              if(s->avctx->mb_decision==FF_MB_DECISION_SIMPLE && s->pict_type==P_TYPE){ //FIXME check if the mess is usefull at all
3826
                /* get luma score */
3827
                if((s->avctx->mb_cmp&0xFF)==FF_CMP_SSE){
3828
                    intra_score= (s->current_picture.mb_var[mb_x + mb_y*s->mb_stride]<<8) - 500; //FIXME dont scale it down so we dont have to fix it
3829
                }else{
3830
                    uint8_t *dest_y;
3831

    
3832
                    int mean= s->current_picture.mb_mean[mb_x + mb_y*s->mb_stride]; //FIXME
3833
                    mean*= 0x01010101;
3834
                    
3835
                    dest_y  = s->new_picture.data[0] + (mb_y * 16 * s->linesize    ) + mb_x * 16;
3836
                
3837
                    for(i=0; i<16; i++){
3838
                        *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 0]) = mean;
3839
                        *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 4]) = mean;
3840
                        *(uint32_t*)(&s->me.scratchpad[i*s->linesize+ 8]) = mean;
3841
                        *(uint32_t*)(&s->me.scratchpad[i*s->linesize+12]) = mean;
3842
                    }
3843

    
3844
                    s->mb_intra=1;
3845
                    intra_score= s->dsp.mb_cmp[0](s, s->me.scratchpad, dest_y, s->linesize);
3846
                                        
3847
/*                    printf("intra:%7d inter:%7d var:%7d mc_var.%7d\n", intra_score>>8, inter_score>>8, 
3848
                        s->current_picture.mb_var[mb_x + mb_y*s->mb_stride],
3849
                        s->current_picture.mc_mb_var[mb_x + mb_y*s->mb_stride]);*/
3850
                }
3851
                
3852
                /* get chroma score */
3853
                if(s->avctx->mb_cmp&FF_CMP_CHROMA){
3854
                    int i;
3855
                    
3856
                    s->mb_intra=1;
3857
                    for(i=1; i<3; i++){
3858
                        uint8_t *dest_c;
3859
                        int mean;
3860
                        
3861
                        if(s->out_format == FMT_H263){
3862
                            mean= (s->dc_val[i][mb_x + (mb_y+1)*(s->mb_width+2)] + 4)>>3; //FIXME not exact but simple ;)
3863
                        }else{
3864
                            mean= (s->last_dc[i] + 4)>>3;
3865
                        }
3866
                        dest_c = s->new_picture.data[i] + (mb_y * 8  * (s->uvlinesize)) + mb_x * 8;
3867
                        
3868
                        mean*= 0x01010101;
3869
                        for(i=0; i<8; i++){
3870
                            *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 0]) = mean;
3871
                            *(uint32_t*)(&s->me.scratchpad[i*s->uvlinesize+ 4]) = mean;
3872
                        }
3873
                        
3874
                        intra_score+= s->dsp.mb_cmp[1](s, s->me.scratchpad, dest_c, s->uvlinesize);
3875
                    }                
3876
                }
3877

    
3878
                /* bias */
3879
                switch(s->avctx->mb_cmp&0xFF){
3880
                default:
3881
                case FF_CMP_SAD:
3882
                    intra_score+= 32*s->qscale;
3883
                    break;
3884
                case FF_CMP_SSE:
3885
                    intra_score+= 24*s->qscale*s->qscale;
3886
                    break;
3887
                case FF_CMP_SATD:
3888
                    intra_score+= 96*s->qscale;
3889
                    break;
3890
                case FF_CMP_DCT:
3891
                    intra_score+= 48*s->qscale;
3892
                    break;
3893
                case FF_CMP_BIT:
3894
                    intra_score+= 16;
3895
                    break;
3896
                case FF_CMP_PSNR:
3897
                case FF_CMP_RD:
3898
                    intra_score+= (s->qscale*s->qscale*109*8 + 64)>>7;
3899
                    break;
3900
                }
3901

    
3902
                if(intra_score < inter_score)
3903
                    mb_type= MB_TYPE_INTRA;
3904
              }  
3905
                
3906
                s->mv_type=MV_TYPE_16X16;
3907
                // only one MB-Type possible
3908
                
3909
                switch(mb_type){
3910
                case MB_TYPE_INTRA:
3911
                    s->mv_dir = 0;
3912
                    s->mb_intra= 1;
3913
                    motion_x= s->mv[0][0][0] = 0;
3914
                    motion_y= s->mv[0][0][1] = 0;
3915
                    break;
3916
                case MB_TYPE_INTER:
3917
                    s->mv_dir = MV_DIR_FORWARD;
3918
                    s->mb_intra= 0;
3919
                    motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3920
                    motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3921
                    break;
3922
                case MB_TYPE_INTER4V:
3923
                    s->mv_dir = MV_DIR_FORWARD;
3924
                    s->mv_type = MV_TYPE_8X8;
3925
                    s->mb_intra= 0;
3926
                    for(i=0; i<4; i++){
3927
                        s->mv[0][i][0] = s->motion_val[s->block_index[i]][0];
3928
                        s->mv[0][i][1] = s->motion_val[s->block_index[i]][1];
3929
                    }
3930
                    motion_x= motion_y= 0;
3931
                    break;
3932
                case MB_TYPE_DIRECT:
3933
                    s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3934
                    s->mb_intra= 0;
3935
                    motion_x=s->b_direct_mv_table[xy][0];
3936
                    motion_y=s->b_direct_mv_table[xy][1];
3937
#ifdef CONFIG_RISKY
3938
                    ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3939
#endif
3940
                    break;
3941
                case MB_TYPE_BIDIR:
3942
                    s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3943
                    s->mb_intra= 0;
3944
                    motion_x=0;
3945
                    motion_y=0;
3946
                    s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3947
                    s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3948
                    s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3949
                    s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3950
                    break;
3951
                case MB_TYPE_BACKWARD:
3952
                    s->mv_dir = MV_DIR_BACKWARD;
3953
                    s->mb_intra= 0;
3954
                    motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3955
                    motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3956
                    break;
3957
                case MB_TYPE_FORWARD:
3958
                    s->mv_dir = MV_DIR_FORWARD;
3959
                    s->mb_intra= 0;
3960
                    motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3961
                    motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3962
//                    printf(" %d %d ", motion_x, motion_y);
3963
                    break;
3964
                default:
3965
                    motion_x=motion_y=0; //gcc warning fix
3966
                    printf("illegal MB type\n");
3967
                }
3968

    
3969
                encode_mb(s, motion_x, motion_y);
3970

    
3971
                // RAL: Update last macrobloc type
3972
                s->last_mv_dir = s->mv_dir;
3973
            
3974
#ifdef CONFIG_RISKY
3975
                if (s->out_format == FMT_H263 && s->pict_type!=B_TYPE)
3976
                    ff_h263_update_motion_val(s);
3977
#endif
3978
                
3979
                MPV_decode_mb(s, s->block);
3980
            }
3981

    
3982
            /* clean the MV table in IPS frames for direct mode in B frames */
3983
            if(s->mb_intra /* && I,P,S_TYPE */){
3984
                s->p_mv_table[xy][0]=0;
3985
                s->p_mv_table[xy][1]=0;
3986
            }
3987
            
3988
            if(s->flags&CODEC_FLAG_PSNR){
3989
                int w= 16;
3990
                int h= 16;
3991

    
3992
                if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3993
                if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3994

    
3995
                s->current_picture_ptr->error[0] += sse(
3996
                    s, s->new_picture.data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3997
                    s->dest[0], w, h, s->linesize);
3998
                s->current_picture_ptr->error[1] += sse(
3999
                    s, s->new_picture.data[1] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
4000
                    s->dest[1], w>>1, h>>1, s->uvlinesize);
4001
                s->current_picture_ptr->error[2] += sse(
4002
                    s, s->new_picture    .data[2] + s->mb_x*8  + s->mb_y*s->uvlinesize*8,
4003
                    s->dest[2], w>>1, h>>1, s->uvlinesize);
4004
            }
4005
//printf("MB %d %d bits\n", s->mb_x+s->mb_y*s->mb_stride, get_bit_count(&s->pb));
4006
        }
4007
    }
4008
    emms_c();
4009

    
4010
#ifdef CONFIG_RISKY
4011
    if(s->codec_id==CODEC_ID_MPEG4 && s->partitioned_frame)
4012
        ff_mpeg4_merge_partitions(s);
4013

    
4014
    if (s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == I_TYPE)
4015
        msmpeg4_encode_ext_header(s);
4016

    
4017
    if(s->codec_id==CODEC_ID_MPEG4) 
4018
        ff_mpeg4_stuffing(&s->pb);
4019
#endif
4020

    
4021
    //if (s->gob_number)
4022
    //    fprintf(stderr,"\nNumber of GOB: %d", s->gob_number);
4023
    
4024
    /* Send the last GOB if RTP */    
4025
    if (s->rtp_mode) {
4026
        flush_put_bits(&s->pb);
4027
        pdif = pbBufPtr(&s->pb) - s->ptr_lastgob;
4028
        /* Call the RTP callback to send the last GOB */
4029
        if (s->rtp_callback)
4030
            s->rtp_callback(s->ptr_lastgob, pdif, s->gob_number);
4031
        s->ptr_lastgob = pbBufPtr(&s->pb);
4032
        //fprintf(stderr,"\nGOB: %2d size: %d (last)", s->gob_number, pdif);
4033
    }
4034
}
4035

    
4036
static int dct_quantize_trellis_c(MpegEncContext *s, 
4037
                        DCTELEM *block, int n,
4038
                        int qscale, int *overflow){
4039
    const int *qmat;
4040
    const uint8_t *scantable= s->intra_scantable.scantable;
4041
    int max=0;
4042
    unsigned int threshold1, threshold2;
4043
    int bias=0;
4044
    int run_tab[65];
4045
    int level_tab[65];
4046
    int score_tab[65];
4047
    int last_run=0;
4048
    int last_level=0;
4049
    int last_score= 0;
4050
    int last_i= 0;
4051
    int not_coded_score= 0;
4052
    int coeff[3][64];
4053
    int coeff_count[64];
4054
    int qmul, qadd, start_i, last_non_zero, i, dc;
4055
    const int esc_length= s->ac_esc_length;
4056
    uint8_t * length;
4057
    uint8_t * last_length;
4058
    int score_limit=0;
4059
    int left_limit= 0;
4060
    const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4061
    const int patch_table= s->out_format == FMT_MPEG1 && !s->mb_intra;
4062
        
4063
    s->dsp.fdct (block);
4064

    
4065
    qmul= qscale*16;
4066
    qadd= ((qscale-1)|1)*8;
4067

    
4068
    if (s->mb_intra) {
4069
        int q;
4070
        if (!s->h263_aic) {
4071
            if (n < 4)
4072
                q = s->y_dc_scale;
4073
            else
4074
                q = s->c_dc_scale;
4075
            q = q << 3;
4076
        } else{
4077
            /* For AIC we skip quant/dequant of INTRADC */
4078
            q = 1 << 3;
4079
            qadd=0;
4080
        }
4081
            
4082
        /* note: block[0] is assumed to be positive */
4083
        block[0] = (block[0] + (q >> 1)) / q;
4084
        start_i = 1;
4085
        last_non_zero = 0;
4086
        qmat = s->q_intra_matrix[qscale];
4087
        if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4088
            bias= 1<<(QMAT_SHIFT-1);
4089
        length     = s->intra_ac_vlc_length;
4090
        last_length= s->intra_ac_vlc_last_length;
4091
    } else {
4092
        start_i = 0;
4093
        last_non_zero = -1;
4094
        qmat = s->q_inter_matrix[qscale];
4095
        length     = s->inter_ac_vlc_length;
4096
        last_length= s->inter_ac_vlc_last_length;
4097
    }
4098

    
4099
    threshold1= (1<<QMAT_SHIFT) - bias - 1;
4100
    threshold2= (threshold1<<1);
4101

    
4102
    for(i=start_i; i<64; i++) {
4103
        const int j = scantable[i];
4104
        const int k= i-start_i;
4105
        int level = block[j];
4106
        level = level * qmat[j];
4107

    
4108
//        if(   bias+level >= (1<<(QMAT_SHIFT - 3))
4109
//           || bias-level >= (1<<(QMAT_SHIFT - 3))){
4110
        if(((unsigned)(level+threshold1))>threshold2){
4111
            if(level>0){
4112
                level= (bias + level)>>QMAT_SHIFT;
4113
                coeff[0][k]= level;
4114
                coeff[1][k]= level-1;
4115
//                coeff[2][k]= level-2;
4116
            }else{
4117
                level= (bias - level)>>QMAT_SHIFT;
4118
                coeff[0][k]= -level;
4119
                coeff[1][k]= -level+1;
4120
//                coeff[2][k]= -level+2;
4121
            }
4122
            coeff_count[k]= FFMIN(level, 2);
4123
            assert(coeff_count[k]);
4124
            max |=level;
4125
            last_non_zero = i;
4126
        }else{
4127
            coeff[0][k]= (level>>31)|1;
4128
            coeff_count[k]= 1;
4129
        }
4130
    }
4131
    
4132
    *overflow= s->max_qcoeff < max; //overflow might have happend
4133
    
4134
    if(last_non_zero < start_i){
4135
        memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4136
        return last_non_zero;
4137
    }
4138

    
4139
    score_tab[0]= 0;
4140
    
4141
    if(patch_table){
4142
//        length[UNI_AC_ENC_INDEX(0, 63)]=
4143
//        length[UNI_AC_ENC_INDEX(0, 65)]= 2;
4144
    }
4145

    
4146
    for(i=0; i<=last_non_zero - start_i; i++){
4147
        int level_index, run, j;
4148
        const int dct_coeff= block[ scantable[i + start_i] ];
4149
        const int zero_distoration= dct_coeff*dct_coeff;
4150
        int best_score=256*256*256*120;
4151

    
4152
        last_score += zero_distoration;
4153
        not_coded_score += zero_distoration;
4154
        for(level_index=0; level_index < coeff_count[i]; level_index++){
4155
            int distoration;
4156
            int level= coeff[level_index][i];
4157
            int unquant_coeff;
4158
            
4159
            assert(level);
4160

    
4161
            if(s->out_format == FMT_H263){
4162
                if(level>0){
4163
                    unquant_coeff= level*qmul + qadd;
4164
                }else{
4165
                    unquant_coeff= level*qmul - qadd;
4166
                }
4167
            }else{ //MPEG1
4168
                j= s->dsp.idct_permutation[ scantable[i + start_i] ]; //FIXME optimize
4169
                if(s->mb_intra){
4170
                    if (level < 0) {
4171
                        unquant_coeff = (int)((-level) * qscale * s->intra_matrix[j]) >> 3;
4172
                        unquant_coeff = -((unquant_coeff - 1) | 1);
4173
                    } else {
4174
                        unquant_coeff = (int)(  level  * qscale * s->intra_matrix[j]) >> 3;
4175
                        unquant_coeff =   (unquant_coeff - 1) | 1;
4176
                    }
4177
                }else{
4178
                    if (level < 0) {
4179
                        unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4180
                        unquant_coeff = -((unquant_coeff - 1) | 1);
4181
                    } else {
4182
                        unquant_coeff = (((  level  << 1) + 1) * qscale * ((int) s->inter_matrix[j])) >> 4;
4183
                        unquant_coeff =   (unquant_coeff - 1) | 1;
4184
                    }
4185
                }
4186
                unquant_coeff<<= 3;
4187
            }
4188

    
4189
            distoration= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff);
4190
            level+=64;
4191
            if((level&(~127)) == 0){
4192
                for(run=0; run<=i - left_limit; run++){
4193
                    int score= distoration + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4194
                    score += score_tab[i-run];
4195
                    
4196
                    if(score < best_score){
4197
                        best_score= 
4198
                        score_tab[i+1]= score;
4199
                        run_tab[i+1]= run;
4200
                        level_tab[i+1]= level-64;
4201
                    }
4202
                }
4203

    
4204
                if(s->out_format == FMT_H263){
4205
                    for(run=0; run<=i - left_limit; run++){
4206
                        int score= distoration + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4207
                        score += score_tab[i-run];
4208
                        if(score < last_score){
4209
                            last_score= score;
4210
                            last_run= run;
4211
                            last_level= level-64;
4212
                            last_i= i+1;
4213
                        }
4214
                    }
4215
                }
4216
            }else{
4217
                distoration += esc_length*lambda;
4218
                for(run=0; run<=i - left_limit; run++){
4219
                    int score= distoration + score_tab[i-run];
4220
                    
4221
                    if(score < best_score){
4222
                        best_score= 
4223
                        score_tab[i+1]= score;
4224
                        run_tab[i+1]= run;
4225
                        level_tab[i+1]= level-64;
4226
                    }
4227
                }
4228

    
4229
                if(s->out_format == FMT_H263){
4230
                    for(run=0; run<=i - left_limit; run++){
4231
                        int score= distoration + score_tab[i-run];
4232
                        if(score < last_score){
4233
                            last_score= score;
4234
                            last_run= run;
4235
                            last_level= level-64;
4236
                            last_i= i+1;
4237
                        }
4238
                    }
4239
                }
4240
            }
4241
        }
4242

    
4243
        for(j=left_limit; j<=i; j++){
4244
            score_tab[j] += zero_distoration;
4245
        }
4246
        score_limit+= zero_distoration;
4247
        if(score_tab[i+1] < score_limit)
4248
            score_limit= score_tab[i+1];
4249
        
4250
        //Note: there is a vlc code in mpeg4 which is 1 bit shorter then another one with a shorter run and the same level
4251
        while(score_tab[ left_limit ] > score_limit + lambda) left_limit++;
4252
    
4253
        if(patch_table){
4254
//            length[UNI_AC_ENC_INDEX(0, 63)]=
4255
//            length[UNI_AC_ENC_INDEX(0, 65)]= 3;
4256
        }
4257
    }
4258

    
4259
    if(s->out_format != FMT_H263){
4260
        last_score= 256*256*256*120;
4261
        for(i= left_limit; i<=last_non_zero - start_i + 1; i++){
4262
            int score= score_tab[i];
4263
            if(i) score += lambda*2; //FIXME exacter?
4264

    
4265
            if(score < last_score){
4266
                last_score= score;
4267
                last_i= i;
4268
                last_level= level_tab[i];
4269
                last_run= run_tab[i];
4270
            }
4271
        }
4272
    }
4273

    
4274
    s->coded_score[n] = last_score - not_coded_score;
4275
    
4276
    dc= block[0];
4277
    last_non_zero= last_i - 1 + start_i;
4278
    memset(block + start_i, 0, (64-start_i)*sizeof(DCTELEM));
4279
    
4280
    if(last_non_zero < start_i)
4281
        return last_non_zero;
4282

    
4283
    if(last_non_zero == 0 && start_i == 0){
4284
        int best_level= 0;
4285
        int best_score= dc * dc;
4286
        
4287
        for(i=0; i<coeff_count[0]; i++){
4288
            int level= coeff[i][0];
4289
            int unquant_coeff, score, distoration;
4290

    
4291
            if(s->out_format == FMT_H263){
4292
                if(level>0){
4293
                    unquant_coeff= (level*qmul + qadd)>>3;
4294
                }else{
4295
                    unquant_coeff= (level*qmul - qadd)>>3;
4296
                }
4297
            }else{ //MPEG1
4298
                    if (level < 0) {
4299
                        unquant_coeff = ((((-level) << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4300
                        unquant_coeff = -((unquant_coeff - 1) | 1);
4301
                    } else {
4302
                        unquant_coeff = (((  level  << 1) + 1) * qscale * ((int) s->inter_matrix[0])) >> 4;
4303
                        unquant_coeff =   (unquant_coeff - 1) | 1;
4304
                    }
4305
            }
4306
            unquant_coeff = (unquant_coeff + 4) >> 3;
4307
            unquant_coeff<<= 3 + 3;
4308

    
4309
            distoration= (unquant_coeff - dc) * (unquant_coeff - dc);
4310
            level+=64;
4311
            if((level&(~127)) == 0)
4312
                score= distoration + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4313
            else
4314
                score= distoration + esc_length*lambda;
4315

    
4316
            if(score < best_score){
4317
                best_score= score;
4318
                best_level= level - 64;
4319
            }
4320
        }
4321
        block[0]= best_level;
4322
        s->coded_score[n] = best_score - dc*dc;
4323
        if(best_level == 0) return -1;
4324
        else                return last_non_zero;
4325
    }
4326

    
4327
    i= last_i;
4328
    assert(last_level);
4329
//FIXME use permutated scantable
4330
    block[ s->dsp.idct_permutation[ scantable[last_non_zero] ] ]= last_level;
4331
    i -= last_run + 1;
4332
    
4333
    for(;i>0 ; i -= run_tab[i] + 1){
4334
        const int j= s->dsp.idct_permutation[ scantable[i - 1 + start_i] ];
4335
    
4336
        block[j]= level_tab[i];
4337
        assert(block[j]);
4338
    }
4339

    
4340
    return last_non_zero;
4341
}
4342

    
4343
static int dct_quantize_c(MpegEncContext *s, 
4344
                        DCTELEM *block, int n,
4345
                        int qscale, int *overflow)
4346
{
4347
    int i, j, level, last_non_zero, q;
4348
    const int *qmat;
4349
    const uint8_t *scantable= s->intra_scantable.scantable;
4350
    int bias;
4351
    int max=0;
4352
    unsigned int threshold1, threshold2;
4353

    
4354
    s->dsp.fdct (block);
4355

    
4356
    if (s->mb_intra) {
4357
        if (!s->h263_aic) {
4358
            if (n < 4)
4359
                q = s->y_dc_scale;
4360
            else
4361
                q = s->c_dc_scale;
4362
            q = q << 3;
4363
        } else
4364
            /* For AIC we skip quant/dequant of INTRADC */
4365
            q = 1 << 3;
4366
            
4367
        /* note: block[0] is assumed to be positive */
4368
        block[0] = (block[0] + (q >> 1)) / q;
4369
        i = 1;
4370
        last_non_zero = 0;
4371
        qmat = s->q_intra_matrix[qscale];
4372
        bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4373
    } else {
4374
        i = 0;
4375
        last_non_zero = -1;
4376
        qmat = s->q_inter_matrix[qscale];
4377
        bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT);
4378
    }
4379
    threshold1= (1<<QMAT_SHIFT) - bias - 1;
4380
    threshold2= (threshold1<<1);
4381

    
4382
    for(;i<64;i++) {
4383
        j = scantable[i];
4384
        level = block[j];
4385
        level = level * qmat[j];
4386

    
4387
//        if(   bias+level >= (1<<QMAT_SHIFT)
4388
//           || bias-level >= (1<<QMAT_SHIFT)){
4389
        if(((unsigned)(level+threshold1))>threshold2){
4390
            if(level>0){
4391
                level= (bias + level)>>QMAT_SHIFT;
4392
                block[j]= level;
4393
            }else{
4394
                level= (bias - level)>>QMAT_SHIFT;
4395
                block[j]= -level;
4396
            }
4397
            max |=level;
4398
            last_non_zero = i;
4399
        }else{
4400
            block[j]=0;
4401
        }
4402
    }
4403
    *overflow= s->max_qcoeff < max; //overflow might have happend
4404
    
4405
    /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4406
    if (s->dsp.idct_permutation_type != FF_NO_IDCT_PERM)
4407
        ff_block_permute(block, s->dsp.idct_permutation, scantable, last_non_zero);
4408

    
4409
    return last_non_zero;
4410
}
4411

    
4412
#endif //CONFIG_ENCODERS
4413

    
4414
static void dct_unquantize_mpeg1_c(MpegEncContext *s, 
4415
                                   DCTELEM *block, int n, int qscale)
4416
{
4417
    int i, level, nCoeffs;
4418
    const uint16_t *quant_matrix;
4419

    
4420
    nCoeffs= s->block_last_index[n];
4421
    
4422
    if (s->mb_intra) {
4423
        if (n < 4) 
4424
            block[0] = block[0] * s->y_dc_scale;
4425
        else
4426
            block[0] = block[0] * s->c_dc_scale;
4427
        /* XXX: only mpeg1 */
4428
        quant_matrix = s->intra_matrix;
4429
        for(i=1;i<=nCoeffs;i++) {
4430
            int j= s->intra_scantable.permutated[i];
4431
            level = block[j];
4432
            if (level) {
4433
                if (level < 0) {
4434
                    level = -level;
4435
                    level = (int)(level * qscale * quant_matrix[j]) >> 3;
4436
                    level = (level - 1) | 1;
4437
                    level = -level;
4438
                } else {
4439
                    level = (int)(level * qscale * quant_matrix[j]) >> 3;
4440
                    level = (level - 1) | 1;
4441
                }
4442
#ifdef PARANOID
4443
                if (level < -2048 || level > 2047)
4444
                    fprintf(stderr, "unquant error %d %d\n", i, level);
4445
#endif
4446
                block[j] = level;
4447
            }
4448
        }
4449
    } else {
4450
        i = 0;
4451
        quant_matrix = s->inter_matrix;
4452
        for(;i<=nCoeffs;i++) {
4453
            int j= s->intra_scantable.permutated[i];
4454
            level = block[j];
4455
            if (level) {
4456
                if (level < 0) {
4457
                    level = -level;
4458
                    level = (((level << 1) + 1) * qscale *
4459
                             ((int) (quant_matrix[j]))) >> 4;
4460
                    level = (level - 1) | 1;
4461
                    level = -level;
4462
                } else {
4463
                    level = (((level << 1) + 1) * qscale *
4464
                             ((int) (quant_matrix[j]))) >> 4;
4465
                    level = (level - 1) | 1;
4466
                }
4467
#ifdef PARANOID
4468
                if (level < -2048 || level > 2047)
4469
                    fprintf(stderr, "unquant error %d %d\n", i, level);
4470
#endif
4471
                block[j] = level;
4472
            }
4473
        }
4474
    }
4475
}
4476

    
4477
static void dct_unquantize_mpeg2_c(MpegEncContext *s, 
4478
                                   DCTELEM *block, int n, int qscale)
4479
{
4480
    int i, level, nCoeffs;
4481
    const uint16_t *quant_matrix;
4482

    
4483
    if(s->alternate_scan) nCoeffs= 63;
4484
    else nCoeffs= s->block_last_index[n];
4485
    
4486
    if (s->mb_intra) {
4487
        if (n < 4) 
4488
            block[0] = block[0] * s->y_dc_scale;
4489
        else
4490
            block[0] = block[0] * s->c_dc_scale;
4491
        quant_matrix = s->intra_matrix;
4492
        for(i=1;i<=nCoeffs;i++) {
4493
            int j= s->intra_scantable.permutated[i];
4494
            level = block[j];
4495
            if (level) {
4496
                if (level < 0) {
4497
                    level = -level;
4498
                    level = (int)(level * qscale * quant_matrix[j]) >> 3;
4499
                    level = -level;
4500
                } else {
4501
                    level = (int)(level * qscale * quant_matrix[j]) >> 3;
4502
                }
4503
#ifdef PARANOID
4504
                if (level < -2048 || level > 2047)
4505
                    fprintf(stderr, "unquant error %d %d\n", i, level);
4506
#endif
4507
                block[j] = level;
4508
            }
4509
        }
4510
    } else {
4511
        int sum=-1;
4512
        i = 0;
4513
        quant_matrix = s->inter_matrix;
4514
        for(;i<=nCoeffs;i++) {
4515
            int j= s->intra_scantable.permutated[i];
4516
            level = block[j];
4517
            if (level) {
4518
                if (level < 0) {
4519
                    level = -level;
4520
                    level = (((level << 1) + 1) * qscale *
4521
                             ((int) (quant_matrix[j]))) >> 4;
4522
                    level = -level;
4523
                } else {
4524
                    level = (((level << 1) + 1) * qscale *
4525
                             ((int) (quant_matrix[j]))) >> 4;
4526
                }
4527
#ifdef PARANOID
4528
                if (level < -2048 || level > 2047)
4529
                    fprintf(stderr, "unquant error %d %d\n", i, level);
4530
#endif
4531
                block[j] = level;
4532
                sum+=level;
4533
            }
4534
        }
4535
        block[63]^=sum&1;
4536
    }
4537
}
4538

    
4539

    
4540
static void dct_unquantize_h263_c(MpegEncContext *s, 
4541
                                  DCTELEM *block, int n, int qscale)
4542
{
4543
    int i, level, qmul, qadd;
4544
    int nCoeffs;
4545
    
4546
    assert(s->block_last_index[n]>=0);
4547
    
4548
    qadd = (qscale - 1) | 1;
4549
    qmul = qscale << 1;
4550
    
4551
    if (s->mb_intra) {
4552
        if (!s->h263_aic) {
4553
            if (n < 4) 
4554
                block[0] = block[0] * s->y_dc_scale;
4555
            else
4556
                block[0] = block[0] * s->c_dc_scale;
4557
        }else
4558
            qadd = 0;
4559
        i = 1;
4560
        nCoeffs= 63; //does not allways use zigzag table 
4561
    } else {
4562
        i = 0;
4563
        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
4564
    }
4565

    
4566
    for(;i<=nCoeffs;i++) {
4567
        level = block[i];
4568
        if (level) {
4569
            if (level < 0) {
4570
                level = level * qmul - qadd;
4571
            } else {
4572
                level = level * qmul + qadd;
4573
            }
4574
#ifdef PARANOID
4575
                if (level < -2048 || level > 2047)
4576
                    fprintf(stderr, "unquant error %d %d\n", i, level);
4577
#endif
4578
            block[i] = level;
4579
        }
4580
    }
4581
}
4582

    
4583

    
4584
static const AVOption mpeg4_options[] =
4585
{
4586
    AVOPTION_CODEC_INT("bitrate", "desired video bitrate", bit_rate, 4, 240000000, 800000),
4587
    AVOPTION_CODEC_INT("ratetol", "number of bits the bitstream is allowed to diverge from the reference"
4588
                       "the reference can be CBR (for CBR pass1) or VBR (for pass2)",
4589
                       bit_rate_tolerance, 4, 240000000, 8000),
4590
    AVOPTION_CODEC_INT("qmin", "minimum quantizer", qmin, 1, 31, 2),
4591
    AVOPTION_CODEC_INT("qmax", "maximum quantizer", qmax, 1, 31, 31),
4592
    AVOPTION_CODEC_STRING("rc_eq", "rate control equation",
4593
                          rc_eq, "tex^qComp,option1,options2", 0),
4594
    AVOPTION_CODEC_INT("rc_minrate", "rate control minimum bitrate",
4595
                       rc_min_rate, 4, 24000000, 0),
4596
    AVOPTION_CODEC_INT("rc_maxrate", "rate control maximum bitrate",
4597
                       rc_max_rate, 4, 24000000, 0),
4598
    AVOPTION_CODEC_DOUBLE("rc_buf_aggresivity", "rate control buffer aggresivity",
4599
                          rc_buffer_aggressivity, 4, 24000000, 0),
4600
    AVOPTION_CODEC_DOUBLE("rc_initial_cplx", "initial complexity for pass1 ratecontrol",
4601
                          rc_initial_cplx, 0., 9999999., 0),
4602
    AVOPTION_CODEC_DOUBLE("i_quant_factor", "qscale factor between p and i frames",
4603
                          i_quant_factor, 0., 0., 0),
4604
    AVOPTION_CODEC_DOUBLE("i_quant_offset", "qscale offset between p and i frames",
4605
                          i_quant_factor, -999999., 999999., 0),
4606
    AVOPTION_CODEC_INT("dct_algo", "dct alghorithm",
4607
                       dct_algo, 0, 5, 0), // fixme - "Auto,FastInt,Int,MMX,MLib,Altivec"
4608
    AVOPTION_CODEC_DOUBLE("lumi_masking", "luminance masking",
4609
                          lumi_masking, 0., 999999., 0),
4610
    AVOPTION_CODEC_DOUBLE("temporal_cplx_masking", "temporary complexity masking",
4611
                          temporal_cplx_masking, 0., 999999., 0),
4612
    AVOPTION_CODEC_DOUBLE("spatial_cplx_masking", "spatial complexity masking",
4613
                          spatial_cplx_masking, 0., 999999., 0),
4614
    AVOPTION_CODEC_DOUBLE("p_masking", "p block masking",
4615
                          p_masking, 0., 999999., 0),
4616
    AVOPTION_CODEC_DOUBLE("dark_masking", "darkness masking",
4617
                          dark_masking, 0., 999999., 0),
4618
    AVOPTION_CODEC_INT("idct_algo", "idct alghorithm",
4619
                       idct_algo, 0, 8, 0), // fixme - "Auto,Int,Simple,SimpleMMX,LibMPEG2MMX,PS2,MLib,ARM,Altivec"
4620

    
4621
    AVOPTION_CODEC_INT("mb_qmin", "minimum MB quantizer",
4622
                       mb_qmin, 0, 8, 0),
4623
    AVOPTION_CODEC_INT("mb_qmax", "maximum MB quantizer",
4624
                       mb_qmin, 0, 8, 0),
4625

    
4626
    AVOPTION_CODEC_INT("me_cmp", "ME compare function",
4627
                       me_cmp, 0, 24000000, 0),
4628
    AVOPTION_CODEC_INT("me_sub_cmp", "subpixel ME compare function",
4629
                       me_sub_cmp, 0, 24000000, 0),
4630

    
4631

    
4632
    AVOPTION_CODEC_INT("dia_size", "ME diamond size & shape",
4633
                       dia_size, 0, 24000000, 0),
4634
    AVOPTION_CODEC_INT("last_predictor_count", "amount of previous MV predictors",
4635
                       last_predictor_count, 0, 24000000, 0),
4636

    
4637
    AVOPTION_CODEC_INT("pre_me", "pre pass for ME",
4638
                       pre_me, 0, 24000000, 0),
4639
    AVOPTION_CODEC_INT("me_pre_cmp", "ME pre pass compare function",
4640
                       me_pre_cmp, 0, 24000000, 0),
4641

    
4642
    AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4643
                       me_range, 0, 24000000, 0),
4644
    AVOPTION_CODEC_INT("pre_dia_size", "ME pre pass diamod size & shape",
4645
                       pre_dia_size, 0, 24000000, 0),
4646
    AVOPTION_CODEC_INT("me_subpel_quality", "subpel ME quality",
4647
                       me_subpel_quality, 0, 24000000, 0),
4648
    AVOPTION_CODEC_INT("me_range", "maximum ME search range",
4649
                       me_range, 0, 24000000, 0),
4650
    AVOPTION_CODEC_FLAG("psnr", "calculate PSNR of compressed frames",
4651
                        flags, CODEC_FLAG_PSNR, 0),
4652
    AVOPTION_CODEC_RCOVERRIDE("rc_override", "ratecontrol override (=startframe,endframe,qscale,quality_factor)",
4653
                              rc_override),
4654
    AVOPTION_SUB(avoptions_common),
4655
    AVOPTION_END()
4656
};
4657

    
4658
#ifdef CONFIG_ENCODERS
4659

    
4660
AVCodec mpeg1video_encoder = {
4661
    "mpeg1video",
4662
    CODEC_TYPE_VIDEO,
4663
    CODEC_ID_MPEG1VIDEO,
4664
    sizeof(MpegEncContext),
4665
    MPV_encode_init,
4666
    MPV_encode_picture,
4667
    MPV_encode_end,
4668
};
4669

    
4670
#ifdef CONFIG_RISKY
4671

    
4672
AVCodec mpeg2video_encoder = {
4673
    "mpeg2video",
4674
    CODEC_TYPE_VIDEO,
4675
    CODEC_ID_MPEG2VIDEO,
4676
    sizeof(MpegEncContext),
4677
    MPV_encode_init,
4678
    MPV_encode_picture,
4679
    MPV_encode_end,
4680
};
4681

    
4682
AVCodec h263_encoder = {
4683
    "h263",
4684
    CODEC_TYPE_VIDEO,
4685
    CODEC_ID_H263,
4686
    sizeof(MpegEncContext),
4687
    MPV_encode_init,
4688
    MPV_encode_picture,
4689
    MPV_encode_end,
4690
};
4691

    
4692
AVCodec h263p_encoder = {
4693
    "h263p",
4694
    CODEC_TYPE_VIDEO,
4695
    CODEC_ID_H263P,
4696
    sizeof(MpegEncContext),
4697
    MPV_encode_init,
4698
    MPV_encode_picture,
4699
    MPV_encode_end,
4700
};
4701

    
4702
AVCodec flv_encoder = {
4703
    "flv",
4704
    CODEC_TYPE_VIDEO,
4705
    CODEC_ID_FLV1,
4706
    sizeof(MpegEncContext),
4707
    MPV_encode_init,
4708
    MPV_encode_picture,
4709
    MPV_encode_end,
4710
};
4711

    
4712
AVCodec rv10_encoder = {
4713
    "rv10",
4714
    CODEC_TYPE_VIDEO,
4715
    CODEC_ID_RV10,
4716
    sizeof(MpegEncContext),
4717
    MPV_encode_init,
4718
    MPV_encode_picture,
4719
    MPV_encode_end,
4720
};
4721

    
4722
AVCodec mpeg4_encoder = {
4723
    "mpeg4",
4724
    CODEC_TYPE_VIDEO,
4725
    CODEC_ID_MPEG4,
4726
    sizeof(MpegEncContext),
4727
    MPV_encode_init,
4728
    MPV_encode_picture,
4729
    MPV_encode_end,