Statistics
| Branch: | Revision:

ffmpeg / libavcodec / mpegvideo.c @ 8ed2ae09

History | View | Annotate | Download (92.4 KB)

1
/*
2
 * The simplest mpeg encoder (well, it was the simplest!)
3
 * Copyright (c) 2000,2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7
 *
8
 * This file is part of Libav.
9
 *
10
 * Libav is free software; you can redistribute it and/or
11
 * modify it under the terms of the GNU Lesser General Public
12
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * Libav is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with Libav; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24

    
25
/**
26
 * @file
27
 * The simplest mpeg encoder (well, it was the simplest!).
28
 */
29

    
30
#include "libavutil/intmath.h"
31
#include "libavutil/imgutils.h"
32
#include "avcodec.h"
33
#include "dsputil.h"
34
#include "internal.h"
35
#include "mpegvideo.h"
36
#include "mpegvideo_common.h"
37
#include "mjpegenc.h"
38
#include "msmpeg4.h"
39
#include "faandct.h"
40
#include "xvmc_internal.h"
41
#include <limits.h>
42

    
43
//#undef NDEBUG
44
//#include <assert.h>
45

    
46
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47
                                   DCTELEM *block, int n, int qscale);
48
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49
                                   DCTELEM *block, int n, int qscale);
50
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51
                                   DCTELEM *block, int n, int qscale);
52
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53
                                   DCTELEM *block, int n, int qscale);
54
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55
                                   DCTELEM *block, int n, int qscale);
56
static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57
                                  DCTELEM *block, int n, int qscale);
58
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59
                                  DCTELEM *block, int n, int qscale);
60

    
61

    
62
/* enable all paranoid tests for rounding, overflows, etc... */
63
//#define PARANOID
64

    
65
//#define DEBUG
66

    
67

    
68
static const uint8_t ff_default_chroma_qscale_table[32]={
69
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
70
    0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
71
};
72

    
73
const uint8_t ff_mpeg1_dc_scale_table[128]={
74
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
75
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78
    8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79
};
80

    
81
static const uint8_t mpeg2_dc_scale_table1[128]={
82
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
83
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86
    4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87
};
88

    
89
static const uint8_t mpeg2_dc_scale_table2[128]={
90
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
91
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94
    2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95
};
96

    
97
static const uint8_t mpeg2_dc_scale_table3[128]={
98
//  0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
99
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102
    1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103
};
104

    
105
const uint8_t * const ff_mpeg2_dc_scale_table[4]={
106
    ff_mpeg1_dc_scale_table,
107
    mpeg2_dc_scale_table1,
108
    mpeg2_dc_scale_table2,
109
    mpeg2_dc_scale_table3,
110
};
111

    
112
const enum PixelFormat ff_pixfmt_list_420[] = {
113
    PIX_FMT_YUV420P,
114
    PIX_FMT_NONE
115
};
116

    
117
const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
118
    PIX_FMT_DXVA2_VLD,
119
    PIX_FMT_VAAPI_VLD,
120
    PIX_FMT_YUV420P,
121
    PIX_FMT_NONE
122
};
123

    
124
const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
125
    int i;
126

    
127
    assert(p<=end);
128
    if(p>=end)
129
        return end;
130

    
131
    for(i=0; i<3; i++){
132
        uint32_t tmp= *state << 8;
133
        *state= tmp + *(p++);
134
        if(tmp == 0x100 || p==end)
135
            return p;
136
    }
137

    
138
    while(p<end){
139
        if     (p[-1] > 1      ) p+= 3;
140
        else if(p[-2]          ) p+= 2;
141
        else if(p[-3]|(p[-1]-1)) p++;
142
        else{
143
            p++;
144
            break;
145
        }
146
    }
147

    
148
    p= FFMIN(p, end)-4;
149
    *state= AV_RB32(p);
150

    
151
    return p+4;
152
}
153

    
154
/* init common dct for both encoder and decoder */
155
av_cold int ff_dct_common_init(MpegEncContext *s)
156
{
157
    s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
158
    s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
159
    s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
160
    s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
161
    s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
162
    if(s->flags & CODEC_FLAG_BITEXACT)
163
        s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
164
    s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
165

    
166
#if   HAVE_MMX
167
    MPV_common_init_mmx(s);
168
#elif ARCH_ALPHA
169
    MPV_common_init_axp(s);
170
#elif CONFIG_MLIB
171
    MPV_common_init_mlib(s);
172
#elif HAVE_MMI
173
    MPV_common_init_mmi(s);
174
#elif ARCH_ARM
175
    MPV_common_init_arm(s);
176
#elif HAVE_ALTIVEC
177
    MPV_common_init_altivec(s);
178
#elif ARCH_BFIN
179
    MPV_common_init_bfin(s);
180
#endif
181

    
182
    /* load & permutate scantables
183
       note: only wmv uses different ones
184
    */
185
    if(s->alternate_scan){
186
        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_alternate_vertical_scan);
187
        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_alternate_vertical_scan);
188
    }else{
189
        ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable  , ff_zigzag_direct);
190
        ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable  , ff_zigzag_direct);
191
    }
192
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193
    ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
194

    
195
    return 0;
196
}
197

    
198
void ff_copy_picture(Picture *dst, Picture *src){
199
    *dst = *src;
200
    dst->type= FF_BUFFER_TYPE_COPY;
201
}
202

    
203
/**
204
 * Release a frame buffer
205
 */
206
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
207
{
208
    s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
209
    av_freep(&pic->hwaccel_picture_private);
210
}
211

    
212
/**
213
 * Allocate a frame buffer
214
 */
215
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
216
{
217
    int r;
218

    
219
    if (s->avctx->hwaccel) {
220
        assert(!pic->hwaccel_picture_private);
221
        if (s->avctx->hwaccel->priv_data_size) {
222
            pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
223
            if (!pic->hwaccel_picture_private) {
224
                av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
225
                return -1;
226
            }
227
        }
228
    }
229

    
230
    r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
231

    
232
    if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
233
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
234
        av_freep(&pic->hwaccel_picture_private);
235
        return -1;
236
    }
237

    
238
    if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
239
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
240
        free_frame_buffer(s, pic);
241
        return -1;
242
    }
243

    
244
    if (pic->linesize[1] != pic->linesize[2]) {
245
        av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
246
        free_frame_buffer(s, pic);
247
        return -1;
248
    }
249

    
250
    return 0;
251
}
252

    
253
/**
254
 * allocates a Picture
255
 * The pixels are allocated/set by calling get_buffer() if shared=0
256
 */
257
int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
258
    const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
259
    const int mb_array_size= s->mb_stride*s->mb_height;
260
    const int b8_array_size= s->b8_stride*s->mb_height*2;
261
    const int b4_array_size= s->b4_stride*s->mb_height*4;
262
    int i;
263
    int r= -1;
264

    
265
    if(shared){
266
        assert(pic->data[0]);
267
        assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268
        pic->type= FF_BUFFER_TYPE_SHARED;
269
    }else{
270
        assert(!pic->data[0]);
271

    
272
        if (alloc_frame_buffer(s, pic) < 0)
273
            return -1;
274

    
275
        s->linesize  = pic->linesize[0];
276
        s->uvlinesize= pic->linesize[1];
277
    }
278

    
279
    if(pic->qscale_table==NULL){
280
        if (s->encoding) {
281
            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var   , mb_array_size * sizeof(int16_t)  , fail)
282
            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t)  , fail)
283
            FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean  , mb_array_size * sizeof(int8_t )  , fail)
284
        }
285

    
286
        FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
287
        FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t)  , fail)
288
        FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
289
        pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
290
        if(s->out_format == FMT_H264){
291
            for(i=0; i<2; i++){
292
                FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4)  * sizeof(int16_t), fail)
293
                pic->motion_val[i]= pic->motion_val_base[i]+4;
294
                FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
295
            }
296
            pic->motion_subsample_log2= 2;
297
        }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
298
            for(i=0; i<2; i++){
299
                FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
300
                pic->motion_val[i]= pic->motion_val_base[i]+4;
301
                FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
302
            }
303
            pic->motion_subsample_log2= 3;
304
        }
305
        if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
306
            FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
307
        }
308
        pic->qstride= s->mb_stride;
309
        FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
310
    }
311

    
312
    /* It might be nicer if the application would keep track of these
313
     * but it would require an API change. */
314
    memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
315
    s->prev_pict_types[0]= s->dropable ? FF_B_TYPE : s->pict_type;
316
    if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == FF_B_TYPE)
317
        pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
318

    
319
    return 0;
320
fail: //for the FF_ALLOCZ_OR_GOTO macro
321
    if(r>=0)
322
        free_frame_buffer(s, pic);
323
    return -1;
324
}
325

    
326
/**
327
 * deallocates a picture
328
 */
329
static void free_picture(MpegEncContext *s, Picture *pic){
330
    int i;
331

    
332
    if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
333
        free_frame_buffer(s, pic);
334
    }
335

    
336
    av_freep(&pic->mb_var);
337
    av_freep(&pic->mc_mb_var);
338
    av_freep(&pic->mb_mean);
339
    av_freep(&pic->mbskip_table);
340
    av_freep(&pic->qscale_table);
341
    av_freep(&pic->mb_type_base);
342
    av_freep(&pic->dct_coeff);
343
    av_freep(&pic->pan_scan);
344
    pic->mb_type= NULL;
345
    for(i=0; i<2; i++){
346
        av_freep(&pic->motion_val_base[i]);
347
        av_freep(&pic->ref_index[i]);
348
    }
349

    
350
    if(pic->type == FF_BUFFER_TYPE_SHARED){
351
        for(i=0; i<4; i++){
352
            pic->base[i]=
353
            pic->data[i]= NULL;
354
        }
355
        pic->type= 0;
356
    }
357
}
358

    
359
static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
360
    int y_size = s->b8_stride * (2 * s->mb_height + 1);
361
    int c_size = s->mb_stride * (s->mb_height + 1);
362
    int yc_size = y_size + 2 * c_size;
363
    int i;
364

    
365
    // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
366
    FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
367
    s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
368

    
369
     //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
370
    FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,  (s->width+64)*4*16*2*sizeof(uint8_t), fail)
371
    s->me.temp=         s->me.scratchpad;
372
    s->rd_scratchpad=   s->me.scratchpad;
373
    s->b_scratchpad=    s->me.scratchpad;
374
    s->obmc_scratchpad= s->me.scratchpad + 16;
375
    if (s->encoding) {
376
        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map      , ME_MAP_SIZE*sizeof(uint32_t), fail)
377
        FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
378
        if(s->avctx->noise_reduction){
379
            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
380
        }
381
    }
382
    FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
383
    s->block= s->blocks[0];
384

    
385
    for(i=0;i<12;i++){
386
        s->pblocks[i] = &s->block[i];
387
    }
388

    
389
    if (s->out_format == FMT_H263) {
390
        /* ac values */
391
        FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
392
        s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
393
        s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
394
        s->ac_val[2] = s->ac_val[1] + c_size;
395
    }
396

    
397
    return 0;
398
fail:
399
    return -1; //free() through MPV_common_end()
400
}
401

    
402
static void free_duplicate_context(MpegEncContext *s){
403
    if(s==NULL) return;
404

    
405
    av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
406
    av_freep(&s->me.scratchpad);
407
    s->me.temp=
408
    s->rd_scratchpad=
409
    s->b_scratchpad=
410
    s->obmc_scratchpad= NULL;
411

    
412
    av_freep(&s->dct_error_sum);
413
    av_freep(&s->me.map);
414
    av_freep(&s->me.score_map);
415
    av_freep(&s->blocks);
416
    av_freep(&s->ac_val_base);
417
    s->block= NULL;
418
}
419

    
420
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
421
#define COPY(a) bak->a= src->a
422
    COPY(allocated_edge_emu_buffer);
423
    COPY(edge_emu_buffer);
424
    COPY(me.scratchpad);
425
    COPY(me.temp);
426
    COPY(rd_scratchpad);
427
    COPY(b_scratchpad);
428
    COPY(obmc_scratchpad);
429
    COPY(me.map);
430
    COPY(me.score_map);
431
    COPY(blocks);
432
    COPY(block);
433
    COPY(start_mb_y);
434
    COPY(end_mb_y);
435
    COPY(me.map_generation);
436
    COPY(pb);
437
    COPY(dct_error_sum);
438
    COPY(dct_count[0]);
439
    COPY(dct_count[1]);
440
    COPY(ac_val_base);
441
    COPY(ac_val[0]);
442
    COPY(ac_val[1]);
443
    COPY(ac_val[2]);
444
#undef COPY
445
}
446

    
447
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
448
    MpegEncContext bak;
449
    int i;
450
    //FIXME copy only needed parts
451
//START_TIMER
452
    backup_duplicate_context(&bak, dst);
453
    memcpy(dst, src, sizeof(MpegEncContext));
454
    backup_duplicate_context(dst, &bak);
455
    for(i=0;i<12;i++){
456
        dst->pblocks[i] = &dst->block[i];
457
    }
458
//STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
459
}
460

    
461
/**
462
 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
463
 * the changed fields will not depend upon the prior state of the MpegEncContext.
464
 */
465
void MPV_common_defaults(MpegEncContext *s){
466
    s->y_dc_scale_table=
467
    s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
468
    s->chroma_qscale_table= ff_default_chroma_qscale_table;
469
    s->progressive_frame= 1;
470
    s->progressive_sequence= 1;
471
    s->picture_structure= PICT_FRAME;
472

    
473
    s->coded_picture_number = 0;
474
    s->picture_number = 0;
475
    s->input_picture_number = 0;
476

    
477
    s->picture_in_gop_number = 0;
478

    
479
    s->f_code = 1;
480
    s->b_code = 1;
481
}
482

    
483
/**
484
 * sets the given MpegEncContext to defaults for decoding.
485
 * the changed fields will not depend upon the prior state of the MpegEncContext.
486
 */
487
void MPV_decode_defaults(MpegEncContext *s){
488
    MPV_common_defaults(s);
489
}
490

    
491
/**
492
 * init common structure for both encoder and decoder.
493
 * this assumes that some variables like width/height are already set
494
 */
495
av_cold int MPV_common_init(MpegEncContext *s)
496
{
497
    int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
498

    
499
    if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
500
        s->mb_height = (s->height + 31) / 32 * 2;
501
    else if (s->codec_id != CODEC_ID_H264)
502
        s->mb_height = (s->height + 15) / 16;
503

    
504
    if(s->avctx->pix_fmt == PIX_FMT_NONE){
505
        av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
506
        return -1;
507
    }
508

    
509
    if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
510
        av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
511
        return -1;
512
    }
513

    
514
    if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
515
        return -1;
516

    
517
    dsputil_init(&s->dsp, s->avctx);
518
    ff_dct_common_init(s);
519

    
520
    s->flags= s->avctx->flags;
521
    s->flags2= s->avctx->flags2;
522

    
523
    s->mb_width  = (s->width  + 15) / 16;
524
    s->mb_stride = s->mb_width + 1;
525
    s->b8_stride = s->mb_width*2 + 1;
526
    s->b4_stride = s->mb_width*4 + 1;
527
    mb_array_size= s->mb_height * s->mb_stride;
528
    mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
529

    
530
    /* set chroma shifts */
531
    avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
532
                                                    &(s->chroma_y_shift) );
533

    
534
    /* set default edge pos, will be overriden in decode_header if needed */
535
    s->h_edge_pos= s->mb_width*16;
536
    s->v_edge_pos= s->mb_height*16;
537

    
538
    s->mb_num = s->mb_width * s->mb_height;
539

    
540
    s->block_wrap[0]=
541
    s->block_wrap[1]=
542
    s->block_wrap[2]=
543
    s->block_wrap[3]= s->b8_stride;
544
    s->block_wrap[4]=
545
    s->block_wrap[5]= s->mb_stride;
546

    
547
    y_size = s->b8_stride * (2 * s->mb_height + 1);
548
    c_size = s->mb_stride * (s->mb_height + 1);
549
    yc_size = y_size + 2 * c_size;
550

    
551
    /* convert fourcc to upper case */
552
    s->codec_tag = ff_toupper4(s->avctx->codec_tag);
553

    
554
    s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
555

    
556
    s->avctx->coded_frame= (AVFrame*)&s->current_picture;
557

    
558
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
559
    for(y=0; y<s->mb_height; y++){
560
        for(x=0; x<s->mb_width; x++){
561
            s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
562
        }
563
    }
564
    s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
565

    
566
    if (s->encoding) {
567
        /* Allocate MV tables */
568
        FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base            , mv_table_size * 2 * sizeof(int16_t), fail)
569
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
570
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base       , mv_table_size * 2 * sizeof(int16_t), fail)
571
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
572
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
573
        FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base     , mv_table_size * 2 * sizeof(int16_t), fail)
574
        s->p_mv_table           = s->p_mv_table_base            + s->mb_stride + 1;
575
        s->b_forw_mv_table      = s->b_forw_mv_table_base       + s->mb_stride + 1;
576
        s->b_back_mv_table      = s->b_back_mv_table_base       + s->mb_stride + 1;
577
        s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
578
        s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
579
        s->b_direct_mv_table    = s->b_direct_mv_table_base     + s->mb_stride + 1;
580

    
581
        if(s->msmpeg4_version){
582
            FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
583
        }
584
        FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
585

    
586
        /* Allocate MB type table */
587
        FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type  , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
588

    
589
        FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
590

    
591
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix  , 64*32   * sizeof(int), fail)
592
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix  , 64*32   * sizeof(int), fail)
593
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
594
        FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
595
        FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
596
        FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
597

    
598
        if(s->avctx->noise_reduction){
599
            FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
600
        }
601
    }
602
    FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
603
    for(i = 0; i < MAX_PICTURE_COUNT; i++) {
604
        avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
605
    }
606

    
607
    FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
608

    
609
    if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
610
        /* interlaced direct mode decoding tables */
611
            for(i=0; i<2; i++){
612
                int j, k;
613
                for(j=0; j<2; j++){
614
                    for(k=0; k<2; k++){
615
                        FF_ALLOCZ_OR_GOTO(s->avctx,    s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
616
                        s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
617
                    }
618
                    FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
619
                    FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
620
                    s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
621
                }
622
                FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
623
            }
624
    }
625
    if (s->out_format == FMT_H263) {
626
        /* cbp values */
627
        FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
628
        s->coded_block= s->coded_block_base + s->b8_stride + 1;
629

    
630
        /* cbp, ac_pred, pred_dir */
631
        FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table     , mb_array_size * sizeof(uint8_t), fail)
632
        FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
633
    }
634

    
635
    if (s->h263_pred || s->h263_plus || !s->encoding) {
636
        /* dc values */
637
        //MN: we need these for error resilience of intra-frames
638
        FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
639
        s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
640
        s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
641
        s->dc_val[2] = s->dc_val[1] + c_size;
642
        for(i=0;i<yc_size;i++)
643
            s->dc_val_base[i] = 1024;
644
    }
645

    
646
    /* which mb is a intra block */
647
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
648
    memset(s->mbintra_table, 1, mb_array_size);
649

    
650
    /* init macroblock skip table */
651
    FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
652
    //Note the +1 is for a quicker mpeg4 slice_end detection
653
    FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
654

    
655
    s->parse_context.state= -1;
656
    if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
657
       s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
658
       s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
659
       s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
660
    }
661

    
662
    s->context_initialized = 1;
663

    
664
    s->thread_context[0]= s;
665
    threads = s->avctx->thread_count;
666

    
667
    for(i=1; i<threads; i++){
668
        s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
669
        memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
670
    }
671

    
672
    for(i=0; i<threads; i++){
673
        if(init_duplicate_context(s->thread_context[i], s) < 0)
674
           goto fail;
675
        s->thread_context[i]->start_mb_y= (s->mb_height*(i  ) + s->avctx->thread_count/2) / s->avctx->thread_count;
676
        s->thread_context[i]->end_mb_y  = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
677
    }
678

    
679
    return 0;
680
 fail:
681
    MPV_common_end(s);
682
    return -1;
683
}
684

    
685
/* init common structure for both encoder and decoder */
686
void MPV_common_end(MpegEncContext *s)
687
{
688
    int i, j, k;
689

    
690
    for(i=0; i<s->avctx->thread_count; i++){
691
        free_duplicate_context(s->thread_context[i]);
692
    }
693
    for(i=1; i<s->avctx->thread_count; i++){
694
        av_freep(&s->thread_context[i]);
695
    }
696

    
697
    av_freep(&s->parse_context.buffer);
698
    s->parse_context.buffer_size=0;
699

    
700
    av_freep(&s->mb_type);
701
    av_freep(&s->p_mv_table_base);
702
    av_freep(&s->b_forw_mv_table_base);
703
    av_freep(&s->b_back_mv_table_base);
704
    av_freep(&s->b_bidir_forw_mv_table_base);
705
    av_freep(&s->b_bidir_back_mv_table_base);
706
    av_freep(&s->b_direct_mv_table_base);
707
    s->p_mv_table= NULL;
708
    s->b_forw_mv_table= NULL;
709
    s->b_back_mv_table= NULL;
710
    s->b_bidir_forw_mv_table= NULL;
711
    s->b_bidir_back_mv_table= NULL;
712
    s->b_direct_mv_table= NULL;
713
    for(i=0; i<2; i++){
714
        for(j=0; j<2; j++){
715
            for(k=0; k<2; k++){
716
                av_freep(&s->b_field_mv_table_base[i][j][k]);
717
                s->b_field_mv_table[i][j][k]=NULL;
718
            }
719
            av_freep(&s->b_field_select_table[i][j]);
720
            av_freep(&s->p_field_mv_table_base[i][j]);
721
            s->p_field_mv_table[i][j]=NULL;
722
        }
723
        av_freep(&s->p_field_select_table[i]);
724
    }
725

    
726
    av_freep(&s->dc_val_base);
727
    av_freep(&s->coded_block_base);
728
    av_freep(&s->mbintra_table);
729
    av_freep(&s->cbp_table);
730
    av_freep(&s->pred_dir_table);
731

    
732
    av_freep(&s->mbskip_table);
733
    av_freep(&s->prev_pict_types);
734
    av_freep(&s->bitstream_buffer);
735
    s->allocated_bitstream_buffer_size=0;
736

    
737
    av_freep(&s->avctx->stats_out);
738
    av_freep(&s->ac_stats);
739
    av_freep(&s->error_status_table);
740
    av_freep(&s->mb_index2xy);
741
    av_freep(&s->lambda_table);
742
    av_freep(&s->q_intra_matrix);
743
    av_freep(&s->q_inter_matrix);
744
    av_freep(&s->q_intra_matrix16);
745
    av_freep(&s->q_inter_matrix16);
746
    av_freep(&s->input_picture);
747
    av_freep(&s->reordered_input_picture);
748
    av_freep(&s->dct_offset);
749

    
750
    if(s->picture){
751
        for(i=0; i<MAX_PICTURE_COUNT; i++){
752
            free_picture(s, &s->picture[i]);
753
        }
754
    }
755
    av_freep(&s->picture);
756
    s->context_initialized = 0;
757
    s->last_picture_ptr=
758
    s->next_picture_ptr=
759
    s->current_picture_ptr= NULL;
760
    s->linesize= s->uvlinesize= 0;
761

    
762
    for(i=0; i<3; i++)
763
        av_freep(&s->visualization_buffer[i]);
764

    
765
    avcodec_default_free_buffers(s->avctx);
766
}
767

    
768
void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
769
{
770
    int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
771
    uint8_t index_run[MAX_RUN+1];
772
    int last, run, level, start, end, i;
773

    
774
    /* If table is static, we can quit if rl->max_level[0] is not NULL */
775
    if(static_store && rl->max_level[0])
776
        return;
777

    
778
    /* compute max_level[], max_run[] and index_run[] */
779
    for(last=0;last<2;last++) {
780
        if (last == 0) {
781
            start = 0;
782
            end = rl->last;
783
        } else {
784
            start = rl->last;
785
            end = rl->n;
786
        }
787

    
788
        memset(max_level, 0, MAX_RUN + 1);
789
        memset(max_run, 0, MAX_LEVEL + 1);
790
        memset(index_run, rl->n, MAX_RUN + 1);
791
        for(i=start;i<end;i++) {
792
            run = rl->table_run[i];
793
            level = rl->table_level[i];
794
            if (index_run[run] == rl->n)
795
                index_run[run] = i;
796
            if (level > max_level[run])
797
                max_level[run] = level;
798
            if (run > max_run[level])
799
                max_run[level] = run;
800
        }
801
        if(static_store)
802
            rl->max_level[last] = static_store[last];
803
        else
804
            rl->max_level[last] = av_malloc(MAX_RUN + 1);
805
        memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
806
        if(static_store)
807
            rl->max_run[last] = static_store[last] + MAX_RUN + 1;
808
        else
809
            rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
810
        memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
811
        if(static_store)
812
            rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
813
        else
814
            rl->index_run[last] = av_malloc(MAX_RUN + 1);
815
        memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
816
    }
817
}
818

    
819
void init_vlc_rl(RLTable *rl)
820
{
821
    int i, q;
822

    
823
    for(q=0; q<32; q++){
824
        int qmul= q*2;
825
        int qadd= (q-1)|1;
826

    
827
        if(q==0){
828
            qmul=1;
829
            qadd=0;
830
        }
831
        for(i=0; i<rl->vlc.table_size; i++){
832
            int code= rl->vlc.table[i][0];
833
            int len = rl->vlc.table[i][1];
834
            int level, run;
835

    
836
            if(len==0){ // illegal code
837
                run= 66;
838
                level= MAX_LEVEL;
839
            }else if(len<0){ //more bits needed
840
                run= 0;
841
                level= code;
842
            }else{
843
                if(code==rl->n){ //esc
844
                    run= 66;
845
                    level= 0;
846
                }else{
847
                    run=   rl->table_run  [code] + 1;
848
                    level= rl->table_level[code] * qmul + qadd;
849
                    if(code >= rl->last) run+=192;
850
                }
851
            }
852
            rl->rl_vlc[q][i].len= len;
853
            rl->rl_vlc[q][i].level= level;
854
            rl->rl_vlc[q][i].run= run;
855
        }
856
    }
857
}
858

    
859
int ff_find_unused_picture(MpegEncContext *s, int shared){
860
    int i;
861

    
862
    if(shared){
863
        for(i=0; i<MAX_PICTURE_COUNT; i++){
864
            if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
865
        }
866
    }else{
867
        for(i=0; i<MAX_PICTURE_COUNT; i++){
868
            if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
869
        }
870
        for(i=0; i<MAX_PICTURE_COUNT; i++){
871
            if(s->picture[i].data[0]==NULL) return i;
872
        }
873
    }
874

    
875
    av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
876
    /* We could return -1, but the codec would crash trying to draw into a
877
     * non-existing frame anyway. This is safer than waiting for a random crash.
878
     * Also the return of this is never useful, an encoder must only allocate
879
     * as much as allowed in the specification. This has no relationship to how
880
     * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
881
     * enough for such valid streams).
882
     * Plus, a decoder has to check stream validity and remove frames if too
883
     * many reference frames are around. Waiting for "OOM" is not correct at
884
     * all. Similarly, missing reference frames have to be replaced by
885
     * interpolated/MC frames, anything else is a bug in the codec ...
886
     */
887
    abort();
888
    return -1;
889
}
890

    
891
static void update_noise_reduction(MpegEncContext *s){
892
    int intra, i;
893

    
894
    for(intra=0; intra<2; intra++){
895
        if(s->dct_count[intra] > (1<<16)){
896
            for(i=0; i<64; i++){
897
                s->dct_error_sum[intra][i] >>=1;
898
            }
899
            s->dct_count[intra] >>= 1;
900
        }
901

    
902
        for(i=0; i<64; i++){
903
            s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
904
        }
905
    }
906
}
907

    
908
/**
909
 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
910
 */
911
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
912
{
913
    int i;
914
    Picture *pic;
915
    s->mb_skipped = 0;
916

    
917
    assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
918

    
919
    /* mark&release old frames */
920
    if (s->pict_type != FF_B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
921
      if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
922
          free_frame_buffer(s, s->last_picture_ptr);
923

    
924
        /* release forgotten pictures */
925
        /* if(mpeg124/h263) */
926
        if(!s->encoding){
927
            for(i=0; i<MAX_PICTURE_COUNT; i++){
928
                if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
929
                    av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
930
                    free_frame_buffer(s, &s->picture[i]);
931
                }
932
            }
933
        }
934
      }
935
    }
936

    
937
    if(!s->encoding){
938
        /* release non reference frames */
939
        for(i=0; i<MAX_PICTURE_COUNT; i++){
940
            if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
941
                free_frame_buffer(s, &s->picture[i]);
942
            }
943
        }
944

    
945
        if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
946
            pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
947
        else{
948
            i= ff_find_unused_picture(s, 0);
949
            pic= &s->picture[i];
950
        }
951

    
952
        pic->reference= 0;
953
        if (!s->dropable){
954
            if (s->codec_id == CODEC_ID_H264)
955
                pic->reference = s->picture_structure;
956
            else if (s->pict_type != FF_B_TYPE)
957
                pic->reference = 3;
958
        }
959

    
960
        pic->coded_picture_number= s->coded_picture_number++;
961

    
962
        if(ff_alloc_picture(s, pic, 0) < 0)
963
            return -1;
964

    
965
        s->current_picture_ptr= pic;
966
        //FIXME use only the vars from current_pic
967
        s->current_picture_ptr->top_field_first= s->top_field_first;
968
        if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
969
            if(s->picture_structure != PICT_FRAME)
970
                s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
971
        }
972
        s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
973
    }
974

    
975
    s->current_picture_ptr->pict_type= s->pict_type;
976
//    if(s->flags && CODEC_FLAG_QSCALE)
977
  //      s->current_picture_ptr->quality= s->new_picture_ptr->quality;
978
    s->current_picture_ptr->key_frame= s->pict_type == FF_I_TYPE;
979

    
980
    ff_copy_picture(&s->current_picture, s->current_picture_ptr);
981

    
982
    if (s->pict_type != FF_B_TYPE) {
983
        s->last_picture_ptr= s->next_picture_ptr;
984
        if(!s->dropable)
985
            s->next_picture_ptr= s->current_picture_ptr;
986
    }
987
/*    av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
988
        s->last_picture_ptr    ? s->last_picture_ptr->data[0] : NULL,
989
        s->next_picture_ptr    ? s->next_picture_ptr->data[0] : NULL,
990
        s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
991
        s->pict_type, s->dropable);*/
992

    
993
    if(s->codec_id != CODEC_ID_H264){
994
        if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) && s->pict_type!=FF_I_TYPE){
995
            av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
996
            /* Allocate a dummy frame */
997
            i= ff_find_unused_picture(s, 0);
998
            s->last_picture_ptr= &s->picture[i];
999
            if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1000
                return -1;
1001
        }
1002
        if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==FF_B_TYPE){
1003
            /* Allocate a dummy frame */
1004
            i= ff_find_unused_picture(s, 0);
1005
            s->next_picture_ptr= &s->picture[i];
1006
            if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1007
                return -1;
1008
        }
1009
    }
1010

    
1011
    if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1012
    if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1013

    
1014
    assert(s->pict_type == FF_I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1015

    
1016
    if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1017
        int i;
1018
        for(i=0; i<4; i++){
1019
            if(s->picture_structure == PICT_BOTTOM_FIELD){
1020
                 s->current_picture.data[i] += s->current_picture.linesize[i];
1021
            }
1022
            s->current_picture.linesize[i] *= 2;
1023
            s->last_picture.linesize[i] *=2;
1024
            s->next_picture.linesize[i] *=2;
1025
        }
1026
    }
1027

    
1028
#if FF_API_HURRY_UP
1029
    s->hurry_up= s->avctx->hurry_up;
1030
#endif
1031
    s->error_recognition= avctx->error_recognition;
1032

    
1033
    /* set dequantizer, we can't do it during init as it might change for mpeg4
1034
       and we can't do it in the header decode as init is not called for mpeg4 there yet */
1035
    if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1036
        s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1037
        s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1038
    }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1039
        s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1040
        s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1041
    }else{
1042
        s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1043
        s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1044
    }
1045

    
1046
    if(s->dct_error_sum){
1047
        assert(s->avctx->noise_reduction && s->encoding);
1048

    
1049
        update_noise_reduction(s);
1050
    }
1051

    
1052
    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1053
        return ff_xvmc_field_start(s, avctx);
1054

    
1055
    return 0;
1056
}
1057

    
1058
/* generic function for encode/decode called after a frame has been coded/decoded */
1059
void MPV_frame_end(MpegEncContext *s)
1060
{
1061
    int i;
1062
    /* draw edge for correct motion prediction if outside */
1063
    //just to make sure that all data is rendered.
1064
    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1065
        ff_xvmc_field_end(s);
1066
    }else if(!s->avctx->hwaccel
1067
       && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1068
       && s->unrestricted_mv
1069
       && s->current_picture.reference
1070
       && !s->intra_only
1071
       && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1072
            s->dsp.draw_edges(s->current_picture.data[0], s->linesize  ,
1073
                              s->h_edge_pos   , s->v_edge_pos   ,
1074
                              EDGE_WIDTH  , EDGE_TOP | EDGE_BOTTOM);
1075
            s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1076
                              s->h_edge_pos>>1, s->v_edge_pos>>1,
1077
                              EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1078
            s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1079
                              s->h_edge_pos>>1, s->v_edge_pos>>1,
1080
                              EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1081
    }
1082
    emms_c();
1083

    
1084
    s->last_pict_type    = s->pict_type;
1085
    s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1086
    if(s->pict_type!=FF_B_TYPE){
1087
        s->last_non_b_pict_type= s->pict_type;
1088
    }
1089
#if 0
1090
        /* copy back current_picture variables */
1091
    for(i=0; i<MAX_PICTURE_COUNT; i++){
1092
        if(s->picture[i].data[0] == s->current_picture.data[0]){
1093
            s->picture[i]= s->current_picture;
1094
            break;
1095
        }
1096
    }
1097
    assert(i<MAX_PICTURE_COUNT);
1098
#endif
1099

    
1100
    if(s->encoding){
1101
        /* release non-reference frames */
1102
        for(i=0; i<MAX_PICTURE_COUNT; i++){
1103
            if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1104
                free_frame_buffer(s, &s->picture[i]);
1105
            }
1106
        }
1107
    }
1108
    // clear copies, to avoid confusion
1109
#if 0
1110
    memset(&s->last_picture, 0, sizeof(Picture));
1111
    memset(&s->next_picture, 0, sizeof(Picture));
1112
    memset(&s->current_picture, 0, sizeof(Picture));
1113
#endif
1114
    s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1115
}
1116

    
1117
/**
1118
 * draws an line from (ex, ey) -> (sx, sy).
1119
 * @param w width of the image
1120
 * @param h height of the image
1121
 * @param stride stride/linesize of the image
1122
 * @param color color of the arrow
1123
 */
1124
static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1125
    int x, y, fr, f;
1126

    
1127
    sx= av_clip(sx, 0, w-1);
1128
    sy= av_clip(sy, 0, h-1);
1129
    ex= av_clip(ex, 0, w-1);
1130
    ey= av_clip(ey, 0, h-1);
1131

    
1132
    buf[sy*stride + sx]+= color;
1133

    
1134
    if(FFABS(ex - sx) > FFABS(ey - sy)){
1135
        if(sx > ex){
1136
            FFSWAP(int, sx, ex);
1137
            FFSWAP(int, sy, ey);
1138
        }
1139
        buf+= sx + sy*stride;
1140
        ex-= sx;
1141
        f= ((ey-sy)<<16)/ex;
1142
        for(x= 0; x <= ex; x++){
1143
            y = (x*f)>>16;
1144
            fr= (x*f)&0xFFFF;
1145
            buf[ y   *stride + x]+= (color*(0x10000-fr))>>16;
1146
            buf[(y+1)*stride + x]+= (color*         fr )>>16;
1147
        }
1148
    }else{
1149
        if(sy > ey){
1150
            FFSWAP(int, sx, ex);
1151
            FFSWAP(int, sy, ey);
1152
        }
1153
        buf+= sx + sy*stride;
1154
        ey-= sy;
1155
        if(ey) f= ((ex-sx)<<16)/ey;
1156
        else   f= 0;
1157
        for(y= 0; y <= ey; y++){
1158
            x = (y*f)>>16;
1159
            fr= (y*f)&0xFFFF;
1160
            buf[y*stride + x  ]+= (color*(0x10000-fr))>>16;
1161
            buf[y*stride + x+1]+= (color*         fr )>>16;
1162
        }
1163
    }
1164
}
1165

    
1166
/**
1167
 * draws an arrow from (ex, ey) -> (sx, sy).
1168
 * @param w width of the image
1169
 * @param h height of the image
1170
 * @param stride stride/linesize of the image
1171
 * @param color color of the arrow
1172
 */
1173
static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1174
    int dx,dy;
1175

    
1176
    sx= av_clip(sx, -100, w+100);
1177
    sy= av_clip(sy, -100, h+100);
1178
    ex= av_clip(ex, -100, w+100);
1179
    ey= av_clip(ey, -100, h+100);
1180

    
1181
    dx= ex - sx;
1182
    dy= ey - sy;
1183

    
1184
    if(dx*dx + dy*dy > 3*3){
1185
        int rx=  dx + dy;
1186
        int ry= -dx + dy;
1187
        int length= ff_sqrt((rx*rx + ry*ry)<<8);
1188

    
1189
        //FIXME subpixel accuracy
1190
        rx= ROUNDED_DIV(rx*3<<4, length);
1191
        ry= ROUNDED_DIV(ry*3<<4, length);
1192

    
1193
        draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1194
        draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1195
    }
1196
    draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1197
}
1198

    
1199
/**
1200
 * prints debuging info for the given picture.
1201
 */
1202
void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1203

    
1204
    if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1205

    
1206
    if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1207
        int x,y;
1208

    
1209
        av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1210
        switch (pict->pict_type) {
1211
            case FF_I_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1212
            case FF_P_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1213
            case FF_B_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1214
            case FF_S_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1215
            case FF_SI_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1216
            case FF_SP_TYPE: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1217
        }
1218
        for(y=0; y<s->mb_height; y++){
1219
            for(x=0; x<s->mb_width; x++){
1220
                if(s->avctx->debug&FF_DEBUG_SKIP){
1221
                    int count= s->mbskip_table[x + y*s->mb_stride];
1222
                    if(count>9) count=9;
1223
                    av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1224
                }
1225
                if(s->avctx->debug&FF_DEBUG_QP){
1226
                    av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1227
                }
1228
                if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1229
                    int mb_type= pict->mb_type[x + y*s->mb_stride];
1230
                    //Type & MV direction
1231
                    if(IS_PCM(mb_type))
1232
                        av_log(s->avctx, AV_LOG_DEBUG, "P");
1233
                    else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1234
                        av_log(s->avctx, AV_LOG_DEBUG, "A");
1235
                    else if(IS_INTRA4x4(mb_type))
1236
                        av_log(s->avctx, AV_LOG_DEBUG, "i");
1237
                    else if(IS_INTRA16x16(mb_type))
1238
                        av_log(s->avctx, AV_LOG_DEBUG, "I");
1239
                    else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1240
                        av_log(s->avctx, AV_LOG_DEBUG, "d");
1241
                    else if(IS_DIRECT(mb_type))
1242
                        av_log(s->avctx, AV_LOG_DEBUG, "D");
1243
                    else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1244
                        av_log(s->avctx, AV_LOG_DEBUG, "g");
1245
                    else if(IS_GMC(mb_type))
1246
                        av_log(s->avctx, AV_LOG_DEBUG, "G");
1247
                    else if(IS_SKIP(mb_type))
1248
                        av_log(s->avctx, AV_LOG_DEBUG, "S");
1249
                    else if(!USES_LIST(mb_type, 1))
1250
                        av_log(s->avctx, AV_LOG_DEBUG, ">");
1251
                    else if(!USES_LIST(mb_type, 0))
1252
                        av_log(s->avctx, AV_LOG_DEBUG, "<");
1253
                    else{
1254
                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1255
                        av_log(s->avctx, AV_LOG_DEBUG, "X");
1256
                    }
1257

    
1258
                    //segmentation
1259
                    if(IS_8X8(mb_type))
1260
                        av_log(s->avctx, AV_LOG_DEBUG, "+");
1261
                    else if(IS_16X8(mb_type))
1262
                        av_log(s->avctx, AV_LOG_DEBUG, "-");
1263
                    else if(IS_8X16(mb_type))
1264
                        av_log(s->avctx, AV_LOG_DEBUG, "|");
1265
                    else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1266
                        av_log(s->avctx, AV_LOG_DEBUG, " ");
1267
                    else
1268
                        av_log(s->avctx, AV_LOG_DEBUG, "?");
1269

    
1270

    
1271
                    if(IS_INTERLACED(mb_type))
1272
                        av_log(s->avctx, AV_LOG_DEBUG, "=");
1273
                    else
1274
                        av_log(s->avctx, AV_LOG_DEBUG, " ");
1275
                }
1276
//                av_log(s->avctx, AV_LOG_DEBUG, " ");
1277
            }
1278
            av_log(s->avctx, AV_LOG_DEBUG, "\n");
1279
        }
1280
    }
1281

    
1282
    if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1283
        const int shift= 1 + s->quarter_sample;
1284
        int mb_y;
1285
        uint8_t *ptr;
1286
        int i;
1287
        int h_chroma_shift, v_chroma_shift, block_height;
1288
        const int width = s->avctx->width;
1289
        const int height= s->avctx->height;
1290
        const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1291
        const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1292
        s->low_delay=0; //needed to see the vectors without trashing the buffers
1293

    
1294
        avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1295
        for(i=0; i<3; i++){
1296
            memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1297
            pict->data[i]= s->visualization_buffer[i];
1298
        }
1299
        pict->type= FF_BUFFER_TYPE_COPY;
1300
        ptr= pict->data[0];
1301
        block_height = 16>>v_chroma_shift;
1302

    
1303
        for(mb_y=0; mb_y<s->mb_height; mb_y++){
1304
            int mb_x;
1305
            for(mb_x=0; mb_x<s->mb_width; mb_x++){
1306
                const int mb_index= mb_x + mb_y*s->mb_stride;
1307
                if((s->avctx->debug_mv) && pict->motion_val){
1308
                  int type;
1309
                  for(type=0; type<3; type++){
1310
                    int direction = 0;
1311
                    switch (type) {
1312
                      case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=FF_P_TYPE))
1313
                                continue;
1314
                              direction = 0;
1315
                              break;
1316
                      case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=FF_B_TYPE))
1317
                                continue;
1318
                              direction = 0;
1319
                              break;
1320
                      case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=FF_B_TYPE))
1321
                                continue;
1322
                              direction = 1;
1323
                              break;
1324
                    }
1325
                    if(!USES_LIST(pict->mb_type[mb_index], direction))
1326
                        continue;
1327

    
1328
                    if(IS_8X8(pict->mb_type[mb_index])){
1329
                      int i;
1330
                      for(i=0; i<4; i++){
1331
                        int sx= mb_x*16 + 4 + 8*(i&1);
1332
                        int sy= mb_y*16 + 4 + 8*(i>>1);
1333
                        int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1334
                        int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1335
                        int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1336
                        draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1337
                      }
1338
                    }else if(IS_16X8(pict->mb_type[mb_index])){
1339
                      int i;
1340
                      for(i=0; i<2; i++){
1341
                        int sx=mb_x*16 + 8;
1342
                        int sy=mb_y*16 + 4 + 8*i;
1343
                        int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1344
                        int mx=(pict->motion_val[direction][xy][0]>>shift);
1345
                        int my=(pict->motion_val[direction][xy][1]>>shift);
1346

    
1347
                        if(IS_INTERLACED(pict->mb_type[mb_index]))
1348
                            my*=2;
1349

    
1350
                        draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1351
                      }
1352
                    }else if(IS_8X16(pict->mb_type[mb_index])){
1353
                      int i;
1354
                      for(i=0; i<2; i++){
1355
                        int sx=mb_x*16 + 4 + 8*i;
1356
                        int sy=mb_y*16 + 8;
1357
                        int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1358
                        int mx=(pict->motion_val[direction][xy][0]>>shift);
1359
                        int my=(pict->motion_val[direction][xy][1]>>shift);
1360

    
1361
                        if(IS_INTERLACED(pict->mb_type[mb_index]))
1362
                            my*=2;
1363

    
1364
                        draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1365
                      }
1366
                    }else{
1367
                      int sx= mb_x*16 + 8;
1368
                      int sy= mb_y*16 + 8;
1369
                      int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1370
                      int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1371
                      int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1372
                      draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1373
                    }
1374
                  }
1375
                }
1376
                if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1377
                    uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1378
                    int y;
1379
                    for(y=0; y<block_height; y++){
1380
                        *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1381
                        *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1382
                    }
1383
                }
1384
                if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1385
                    int mb_type= pict->mb_type[mb_index];
1386
                    uint64_t u,v;
1387
                    int y;
1388
#define COLOR(theta, r)\
1389
u= (int)(128 + r*cos(theta*3.141592/180));\
1390
v= (int)(128 + r*sin(theta*3.141592/180));
1391

    
1392

    
1393
                    u=v=128;
1394
                    if(IS_PCM(mb_type)){
1395
                        COLOR(120,48)
1396
                    }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1397
                        COLOR(30,48)
1398
                    }else if(IS_INTRA4x4(mb_type)){
1399
                        COLOR(90,48)
1400
                    }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1401
//                        COLOR(120,48)
1402
                    }else if(IS_DIRECT(mb_type)){
1403
                        COLOR(150,48)
1404
                    }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1405
                        COLOR(170,48)
1406
                    }else if(IS_GMC(mb_type)){
1407
                        COLOR(190,48)
1408
                    }else if(IS_SKIP(mb_type)){
1409
//                        COLOR(180,48)
1410
                    }else if(!USES_LIST(mb_type, 1)){
1411
                        COLOR(240,48)
1412
                    }else if(!USES_LIST(mb_type, 0)){
1413
                        COLOR(0,48)
1414
                    }else{
1415
                        assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1416
                        COLOR(300,48)
1417
                    }
1418

    
1419
                    u*= 0x0101010101010101ULL;
1420
                    v*= 0x0101010101010101ULL;
1421
                    for(y=0; y<block_height; y++){
1422
                        *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1423
                        *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1424
                    }
1425

    
1426
                    //segmentation
1427
                    if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1428
                        *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1429
                        *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1430
                    }
1431
                    if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1432
                        for(y=0; y<16; y++)
1433
                            pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1434
                    }
1435
                    if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1436
                        int dm= 1 << (mv_sample_log2-2);
1437
                        for(i=0; i<4; i++){
1438
                            int sx= mb_x*16 + 8*(i&1);
1439
                            int sy= mb_y*16 + 8*(i>>1);
1440
                            int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1441
                            //FIXME bidir
1442
                            int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1443
                            if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1444
                                for(y=0; y<8; y++)
1445
                                    pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1446
                            if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1447
                                *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1448
                        }
1449
                    }
1450

    
1451
                    if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1452
                        // hmm
1453
                    }
1454
                }
1455
                s->mbskip_table[mb_index]=0;
1456
            }
1457
        }
1458
    }
1459
}
1460

    
1461
static inline int hpel_motion_lowres(MpegEncContext *s,
1462
                                  uint8_t *dest, uint8_t *src,
1463
                                  int field_based, int field_select,
1464
                                  int src_x, int src_y,
1465
                                  int width, int height, int stride,
1466
                                  int h_edge_pos, int v_edge_pos,
1467
                                  int w, int h, h264_chroma_mc_func *pix_op,
1468
                                  int motion_x, int motion_y)
1469
{
1470
    const int lowres= s->avctx->lowres;
1471
    const int op_index= FFMIN(lowres, 2);
1472
    const int s_mask= (2<<lowres)-1;
1473
    int emu=0;
1474
    int sx, sy;
1475

    
1476
    if(s->quarter_sample){
1477
        motion_x/=2;
1478
        motion_y/=2;
1479
    }
1480

    
1481
    sx= motion_x & s_mask;
1482
    sy= motion_y & s_mask;
1483
    src_x += motion_x >> (lowres+1);
1484
    src_y += motion_y >> (lowres+1);
1485

    
1486
    src += src_y * stride + src_x;
1487

    
1488
    if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - w
1489
       || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1490
        s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1491
                            src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1492
        src= s->edge_emu_buffer;
1493
        emu=1;
1494
    }
1495

    
1496
    sx= (sx << 2) >> lowres;
1497
    sy= (sy << 2) >> lowres;
1498
    if(field_select)
1499
        src += s->linesize;
1500
    pix_op[op_index](dest, src, stride, h, sx, sy);
1501
    return emu;
1502
}
1503

    
1504
/* apply one mpeg motion vector to the three components */
1505
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1506
                               uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1507
                               int field_based, int bottom_field, int field_select,
1508
                               uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1509
                               int motion_x, int motion_y, int h, int mb_y)
1510
{
1511
    uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1512
    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1513
    const int lowres= s->avctx->lowres;
1514
    const int op_index= FFMIN(lowres, 2);
1515
    const int block_s= 8>>lowres;
1516
    const int s_mask= (2<<lowres)-1;
1517
    const int h_edge_pos = s->h_edge_pos >> lowres;
1518
    const int v_edge_pos = s->v_edge_pos >> lowres;
1519
    linesize   = s->current_picture.linesize[0] << field_based;
1520
    uvlinesize = s->current_picture.linesize[1] << field_based;
1521

    
1522
    if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1523
        motion_x/=2;
1524
        motion_y/=2;
1525
    }
1526

    
1527
    if(field_based){
1528
        motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1529
    }
1530

    
1531
    sx= motion_x & s_mask;
1532
    sy= motion_y & s_mask;
1533
    src_x = s->mb_x*2*block_s               + (motion_x >> (lowres+1));
1534
    src_y =(   mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1535

    
1536
    if (s->out_format == FMT_H263) {
1537
        uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1538
        uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1539
        uvsrc_x = src_x>>1;
1540
        uvsrc_y = src_y>>1;
1541
    }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1542
        mx = motion_x / 4;
1543
        my = motion_y / 4;
1544
        uvsx = (2*mx) & s_mask;
1545
        uvsy = (2*my) & s_mask;
1546
        uvsrc_x = s->mb_x*block_s               + (mx >> lowres);
1547
        uvsrc_y =    mb_y*block_s               + (my >> lowres);
1548
    } else {
1549
        mx = motion_x / 2;
1550
        my = motion_y / 2;
1551
        uvsx = mx & s_mask;
1552
        uvsy = my & s_mask;
1553
        uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
1554
        uvsrc_y =(   mb_y*block_s>>field_based) + (my >> (lowres+1));
1555
    }
1556

    
1557
    ptr_y  = ref_picture[0] + src_y * linesize + src_x;
1558
    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1559
    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1560

    
1561
    if(   (unsigned)src_x > h_edge_pos                 - (!!sx) - 2*block_s
1562
       || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1563
            s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1564
                             src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1565
            ptr_y = s->edge_emu_buffer;
1566
            if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1567
                uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1568
                s->dsp.emulated_edge_mc(uvbuf  , ptr_cb, s->uvlinesize, 9, 9+field_based,
1569
                                 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1570
                s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1571
                                 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1572
                ptr_cb= uvbuf;
1573
                ptr_cr= uvbuf+16;
1574
            }
1575
    }
1576

    
1577
    if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1578
        dest_y += s->linesize;
1579
        dest_cb+= s->uvlinesize;
1580
        dest_cr+= s->uvlinesize;
1581
    }
1582

    
1583
    if(field_select){
1584
        ptr_y += s->linesize;
1585
        ptr_cb+= s->uvlinesize;
1586
        ptr_cr+= s->uvlinesize;
1587
    }
1588

    
1589
    sx= (sx << 2) >> lowres;
1590
    sy= (sy << 2) >> lowres;
1591
    pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1592

    
1593
    if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1594
        uvsx= (uvsx << 2) >> lowres;
1595
        uvsy= (uvsy << 2) >> lowres;
1596
        pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1597
        pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1598
    }
1599
    //FIXME h261 lowres loop filter
1600
}
1601

    
1602
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1603
                                     uint8_t *dest_cb, uint8_t *dest_cr,
1604
                                     uint8_t **ref_picture,
1605
                                     h264_chroma_mc_func *pix_op,
1606
                                     int mx, int my){
1607
    const int lowres= s->avctx->lowres;
1608
    const int op_index= FFMIN(lowres, 2);
1609
    const int block_s= 8>>lowres;
1610
    const int s_mask= (2<<lowres)-1;
1611
    const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1612
    const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1613
    int emu=0, src_x, src_y, offset, sx, sy;
1614
    uint8_t *ptr;
1615

    
1616
    if(s->quarter_sample){
1617
        mx/=2;
1618
        my/=2;
1619
    }
1620

    
1621
    /* In case of 8X8, we construct a single chroma motion vector
1622
       with a special rounding */
1623
    mx= ff_h263_round_chroma(mx);
1624
    my= ff_h263_round_chroma(my);
1625

    
1626
    sx= mx & s_mask;
1627
    sy= my & s_mask;
1628
    src_x = s->mb_x*block_s + (mx >> (lowres+1));
1629
    src_y = s->mb_y*block_s + (my >> (lowres+1));
1630

    
1631
    offset = src_y * s->uvlinesize + src_x;
1632
    ptr = ref_picture[1] + offset;
1633
    if(s->flags&CODEC_FLAG_EMU_EDGE){
1634
        if(   (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1635
           || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1636
            s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1637
            ptr= s->edge_emu_buffer;
1638
            emu=1;
1639
        }
1640
    }
1641
    sx= (sx << 2) >> lowres;
1642
    sy= (sy << 2) >> lowres;
1643
    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1644

    
1645
    ptr = ref_picture[2] + offset;
1646
    if(emu){
1647
        s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1648
        ptr= s->edge_emu_buffer;
1649
    }
1650
    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1651
}
1652

    
1653
/**
1654
 * motion compensation of a single macroblock
1655
 * @param s context
1656
 * @param dest_y luma destination pointer
1657
 * @param dest_cb chroma cb/u destination pointer
1658
 * @param dest_cr chroma cr/v destination pointer
1659
 * @param dir direction (0->forward, 1->backward)
1660
 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1661
 * @param pix_op halfpel motion compensation function (average or put normally)
1662
 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1663
 */
1664
static inline void MPV_motion_lowres(MpegEncContext *s,
1665
                              uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1666
                              int dir, uint8_t **ref_picture,
1667
                              h264_chroma_mc_func *pix_op)
1668
{
1669
    int mx, my;
1670
    int mb_x, mb_y, i;
1671
    const int lowres= s->avctx->lowres;
1672
    const int block_s= 8>>lowres;
1673

    
1674
    mb_x = s->mb_x;
1675
    mb_y = s->mb_y;
1676

    
1677
    switch(s->mv_type) {
1678
    case MV_TYPE_16X16:
1679
        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1680
                    0, 0, 0,
1681
                    ref_picture, pix_op,
1682
                    s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1683
        break;
1684
    case MV_TYPE_8X8:
1685
        mx = 0;
1686
        my = 0;
1687
            for(i=0;i<4;i++) {
1688
                hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1689
                            ref_picture[0], 0, 0,
1690
                            (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1691
                            s->width, s->height, s->linesize,
1692
                            s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1693
                            block_s, block_s, pix_op,
1694
                            s->mv[dir][i][0], s->mv[dir][i][1]);
1695

    
1696
                mx += s->mv[dir][i][0];
1697
                my += s->mv[dir][i][1];
1698
            }
1699

    
1700
        if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1701
            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1702
        break;
1703
    case MV_TYPE_FIELD:
1704
        if (s->picture_structure == PICT_FRAME) {
1705
            /* top field */
1706
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1707
                        1, 0, s->field_select[dir][0],
1708
                        ref_picture, pix_op,
1709
                        s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1710
            /* bottom field */
1711
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1712
                        1, 1, s->field_select[dir][1],
1713
                        ref_picture, pix_op,
1714
                        s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1715
        } else {
1716
            if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != FF_B_TYPE && !s->first_field){
1717
                ref_picture= s->current_picture_ptr->data;
1718
            }
1719

    
1720
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1721
                        0, 0, s->field_select[dir][0],
1722
                        ref_picture, pix_op,
1723
                        s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1724
        }
1725
        break;
1726
    case MV_TYPE_16X8:
1727
        for(i=0; i<2; i++){
1728
            uint8_t ** ref2picture;
1729

    
1730
            if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == FF_B_TYPE || s->first_field){
1731
                ref2picture= ref_picture;
1732
            }else{
1733
                ref2picture= s->current_picture_ptr->data;
1734
            }
1735

    
1736
            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1737
                        0, 0, s->field_select[dir][i],
1738
                        ref2picture, pix_op,
1739
                        s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1740

    
1741
            dest_y += 2*block_s*s->linesize;
1742
            dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1743
            dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1744
        }
1745
        break;
1746
    case MV_TYPE_DMV:
1747
        if(s->picture_structure == PICT_FRAME){
1748
            for(i=0; i<2; i++){
1749
                int j;
1750
                for(j=0; j<2; j++){
1751
                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1752
                                1, j, j^i,
1753
                                ref_picture, pix_op,
1754
                                s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1755
                }
1756
                pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1757
            }
1758
        }else{
1759
            for(i=0; i<2; i++){
1760
                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1761
                            0, 0, s->picture_structure != i+1,
1762
                            ref_picture, pix_op,
1763
                            s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1764

    
1765
                // after put we make avg of the same block
1766
                pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1767

    
1768
                //opposite parity is always in the same frame if this is second field
1769
                if(!s->first_field){
1770
                    ref_picture = s->current_picture_ptr->data;
1771
                }
1772
            }
1773
        }
1774
    break;
1775
    default: assert(0);
1776
    }
1777
}
1778

    
1779
/* put block[] to dest[] */
1780
static inline void put_dct(MpegEncContext *s,
1781
                           DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1782
{
1783
    s->dct_unquantize_intra(s, block, i, qscale);
1784
    s->dsp.idct_put (dest, line_size, block);
1785
}
1786

    
1787
/* add block[] to dest[] */
1788
static inline void add_dct(MpegEncContext *s,
1789
                           DCTELEM *block, int i, uint8_t *dest, int line_size)
1790
{
1791
    if (s->block_last_index[i] >= 0) {
1792
        s->dsp.idct_add (dest, line_size, block);
1793
    }
1794
}
1795

    
1796
static inline void add_dequant_dct(MpegEncContext *s,
1797
                           DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1798
{
1799
    if (s->block_last_index[i] >= 0) {
1800
        s->dct_unquantize_inter(s, block, i, qscale);
1801

    
1802
        s->dsp.idct_add (dest, line_size, block);
1803
    }
1804
}
1805

    
1806
/**
1807
 * cleans dc, ac, coded_block for the current non intra MB
1808
 */
1809
void ff_clean_intra_table_entries(MpegEncContext *s)
1810
{
1811
    int wrap = s->b8_stride;
1812
    int xy = s->block_index[0];
1813

    
1814
    s->dc_val[0][xy           ] =
1815
    s->dc_val[0][xy + 1       ] =
1816
    s->dc_val[0][xy     + wrap] =
1817
    s->dc_val[0][xy + 1 + wrap] = 1024;
1818
    /* ac pred */
1819
    memset(s->ac_val[0][xy       ], 0, 32 * sizeof(int16_t));
1820
    memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1821
    if (s->msmpeg4_version>=3) {
1822
        s->coded_block[xy           ] =
1823
        s->coded_block[xy + 1       ] =
1824
        s->coded_block[xy     + wrap] =
1825
        s->coded_block[xy + 1 + wrap] = 0;
1826
    }
1827
    /* chroma */
1828
    wrap = s->mb_stride;
1829
    xy = s->mb_x + s->mb_y * wrap;
1830
    s->dc_val[1][xy] =
1831
    s->dc_val[2][xy] = 1024;
1832
    /* ac pred */
1833
    memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1834
    memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1835

    
1836
    s->mbintra_table[xy]= 0;
1837
}
1838

    
1839
/* generic function called after a macroblock has been parsed by the
1840
   decoder or after it has been encoded by the encoder.
1841

1842
   Important variables used:
1843
   s->mb_intra : true if intra macroblock
1844
   s->mv_dir   : motion vector direction
1845
   s->mv_type  : motion vector type
1846
   s->mv       : motion vector
1847
   s->interlaced_dct : true if interlaced dct used (mpeg2)
1848
 */
1849
static av_always_inline
1850
void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1851
                            int lowres_flag, int is_mpeg12)
1852
{
1853
    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1854
    if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1855
        ff_xvmc_decode_mb(s);//xvmc uses pblocks
1856
        return;
1857
    }
1858

    
1859
    if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1860
       /* save DCT coefficients */
1861
       int i,j;
1862
       DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1863
       for(i=0; i<6; i++)
1864
           for(j=0; j<64; j++)
1865
               *dct++ = block[i][s->dsp.idct_permutation[j]];
1866
    }
1867

    
1868
    s->current_picture.qscale_table[mb_xy]= s->qscale;
1869

    
1870
    /* update DC predictors for P macroblocks */
1871
    if (!s->mb_intra) {
1872
        if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1873
            if(s->mbintra_table[mb_xy])
1874
                ff_clean_intra_table_entries(s);
1875
        } else {
1876
            s->last_dc[0] =
1877
            s->last_dc[1] =
1878
            s->last_dc[2] = 128 << s->intra_dc_precision;
1879
        }
1880
    }
1881
    else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1882
        s->mbintra_table[mb_xy]=1;
1883

    
1884
    if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==FF_B_TYPE) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1885
        uint8_t *dest_y, *dest_cb, *dest_cr;
1886
        int dct_linesize, dct_offset;
1887
        op_pixels_func (*op_pix)[4];
1888
        qpel_mc_func (*op_qpix)[16];
1889
        const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1890
        const int uvlinesize= s->current_picture.linesize[1];
1891
        const int readable= s->pict_type != FF_B_TYPE || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1892
        const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1893

    
1894
        /* avoid copy if macroblock skipped in last frame too */
1895
        /* skip only during decoding as we might trash the buffers during encoding a bit */
1896
        if(!s->encoding){
1897
            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1898
            const int age= s->current_picture.age;
1899

    
1900
            assert(age);
1901

    
1902
            if (s->mb_skipped) {
1903
                s->mb_skipped= 0;
1904
                assert(s->pict_type!=FF_I_TYPE);
1905

    
1906
                (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1907
                if(*mbskip_ptr >99) *mbskip_ptr= 99;
1908

    
1909
                /* if previous was skipped too, then nothing to do !  */
1910
                if (*mbskip_ptr >= age && s->current_picture.reference){
1911
                    return;
1912
                }
1913
            } else if(!s->current_picture.reference){
1914
                (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1915
                if(*mbskip_ptr >99) *mbskip_ptr= 99;
1916
            } else{
1917
                *mbskip_ptr = 0; /* not skipped */
1918
            }
1919
        }
1920

    
1921
        dct_linesize = linesize << s->interlaced_dct;
1922
        dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1923

    
1924
        if(readable){
1925
            dest_y=  s->dest[0];
1926
            dest_cb= s->dest[1];
1927
            dest_cr= s->dest[2];
1928
        }else{
1929
            dest_y = s->b_scratchpad;
1930
            dest_cb= s->b_scratchpad+16*linesize;
1931
            dest_cr= s->b_scratchpad+32*linesize;
1932
        }
1933

    
1934
        if (!s->mb_intra) {
1935
            /* motion handling */
1936
            /* decoding or more than one mb_type (MC was already done otherwise) */
1937
            if(!s->encoding){
1938
                if(lowres_flag){
1939
                    h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1940

    
1941
                    if (s->mv_dir & MV_DIR_FORWARD) {
1942
                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1943
                        op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1944
                    }
1945
                    if (s->mv_dir & MV_DIR_BACKWARD) {
1946
                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1947
                    }
1948
                }else{
1949
                    op_qpix= s->me.qpel_put;
1950
                    if ((!s->no_rounding) || s->pict_type==FF_B_TYPE){
1951
                        op_pix = s->dsp.put_pixels_tab;
1952
                    }else{
1953
                        op_pix = s->dsp.put_no_rnd_pixels_tab;
1954
                    }
1955
                    if (s->mv_dir & MV_DIR_FORWARD) {
1956
                        MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1957
                        op_pix = s->dsp.avg_pixels_tab;
1958
                        op_qpix= s->me.qpel_avg;
1959
                    }
1960
                    if (s->mv_dir & MV_DIR_BACKWARD) {
1961
                        MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1962
                    }
1963
                }
1964
            }
1965

    
1966
            /* skip dequant / idct if we are really late ;) */
1967
#if FF_API_HURRY_UP
1968
            if(s->hurry_up>1) goto skip_idct;
1969
#endif
1970
            if(s->avctx->skip_idct){
1971
                if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == FF_B_TYPE)
1972
                   ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != FF_I_TYPE)
1973
                   || s->avctx->skip_idct >= AVDISCARD_ALL)
1974
                    goto skip_idct;
1975
            }
1976

    
1977
            /* add dct residue */
1978
            if(s->encoding || !(   s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1979
                                || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1980
                add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
1981
                add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
1982
                add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
1983
                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1984

    
1985
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1986
                    if (s->chroma_y_shift){
1987
                        add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1988
                        add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
1989
                    }else{
1990
                        dct_linesize >>= 1;
1991
                        dct_offset >>=1;
1992
                        add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
1993
                        add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
1994
                        add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
1995
                        add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
1996
                    }
1997
                }
1998
            } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
1999
                add_dct(s, block[0], 0, dest_y                          , dct_linesize);
2000
                add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
2001
                add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
2002
                add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2003

    
2004
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2005
                    if(s->chroma_y_shift){//Chroma420
2006
                        add_dct(s, block[4], 4, dest_cb, uvlinesize);
2007
                        add_dct(s, block[5], 5, dest_cr, uvlinesize);
2008
                    }else{
2009
                        //chroma422
2010
                        dct_linesize = uvlinesize << s->interlaced_dct;
2011
                        dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2012

    
2013
                        add_dct(s, block[4], 4, dest_cb, dct_linesize);
2014
                        add_dct(s, block[5], 5, dest_cr, dct_linesize);
2015
                        add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2016
                        add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2017
                        if(!s->chroma_x_shift){//Chroma444
2018
                            add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2019
                            add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2020
                            add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2021
                            add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2022
                        }
2023
                    }
2024
                }//fi gray
2025
            }
2026
            else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2027
                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2028
            }
2029
        } else {
2030
            /* dct only in intra block */
2031
            if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2032
                put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
2033
                put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
2034
                put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
2035
                put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2036

    
2037
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2038
                    if(s->chroma_y_shift){
2039
                        put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2040
                        put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2041
                    }else{
2042
                        dct_offset >>=1;
2043
                        dct_linesize >>=1;
2044
                        put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
2045
                        put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
2046
                        put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2047
                        put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2048
                    }
2049
                }
2050
            }else{
2051
                s->dsp.idct_put(dest_y                          , dct_linesize, block[0]);
2052
                s->dsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
2053
                s->dsp.idct_put(dest_y + dct_offset             , dct_linesize, block[2]);
2054
                s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2055

    
2056
                if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2057
                    if(s->chroma_y_shift){
2058
                        s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2059
                        s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2060
                    }else{
2061

    
2062
                        dct_linesize = uvlinesize << s->interlaced_dct;
2063
                        dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2064

    
2065
                        s->dsp.idct_put(dest_cb,              dct_linesize, block[4]);
2066
                        s->dsp.idct_put(dest_cr,              dct_linesize, block[5]);
2067
                        s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2068
                        s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2069
                        if(!s->chroma_x_shift){//Chroma444
2070
                            s->dsp.idct_put(dest_cb + 8,              dct_linesize, block[8]);
2071
                            s->dsp.idct_put(dest_cr + 8,              dct_linesize, block[9]);
2072
                            s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2073
                            s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2074
                        }
2075
                    }
2076
                }//gray
2077
            }
2078
        }
2079
skip_idct:
2080
        if(!readable){
2081
            s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
2082
            s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2083
            s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2084
        }
2085
    }
2086
}
2087

    
2088
void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2089
#if !CONFIG_SMALL
2090
    if(s->out_format == FMT_MPEG1) {
2091
        if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2092
        else                 MPV_decode_mb_internal(s, block, 0, 1);
2093
    } else
2094
#endif
2095
    if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2096
    else                  MPV_decode_mb_internal(s, block, 0, 0);
2097
}
2098

    
2099
/**
2100
 *
2101
 * @param h is the normal height, this will be reduced automatically if needed for the last row
2102
 */
2103
void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2104
    if (s->avctx->draw_horiz_band) {
2105
        AVFrame *src;
2106
        const int field_pic= s->picture_structure != PICT_FRAME;
2107
        int offset[4];
2108

    
2109
        h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2110

    
2111
        if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2112
            h <<= 1;
2113
            y <<= 1;
2114
            if(s->first_field) return;
2115
        }
2116

    
2117
        if(s->pict_type==FF_B_TYPE || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2118
            src= (AVFrame*)s->current_picture_ptr;
2119
        else if(s->last_picture_ptr)
2120
            src= (AVFrame*)s->last_picture_ptr;
2121
        else
2122
            return;
2123

    
2124
        if(s->pict_type==FF_B_TYPE && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2125
            offset[0]=
2126
            offset[1]=
2127
            offset[2]=
2128
            offset[3]= 0;
2129
        }else{
2130
            offset[0]= y * s->linesize;
2131
            offset[1]=
2132
            offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2133
            offset[3]= 0;
2134
        }
2135

    
2136
        emms_c();
2137

    
2138
        s->avctx->draw_horiz_band(s->avctx, src, offset,
2139
                                  y, s->picture_structure, h);
2140
    }
2141
}
2142

    
2143
void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2144
    const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2145
    const int uvlinesize= s->current_picture.linesize[1];
2146
    const int mb_size= 4 - s->avctx->lowres;
2147

    
2148
    s->block_index[0]= s->b8_stride*(s->mb_y*2    ) - 2 + s->mb_x*2;
2149
    s->block_index[1]= s->b8_stride*(s->mb_y*2    ) - 1 + s->mb_x*2;
2150
    s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2151
    s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2152
    s->block_index[4]= s->mb_stride*(s->mb_y + 1)                + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2153
    s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2154
    //block_index is not used by mpeg2, so it is not affected by chroma_format
2155

    
2156
    s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2157
    s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2158
    s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2159

    
2160
    if(!(s->pict_type==FF_B_TYPE && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2161
    {
2162
        if(s->picture_structure==PICT_FRAME){
2163
        s->dest[0] += s->mb_y *   linesize << mb_size;
2164
        s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2165
        s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2166
        }else{
2167
            s->dest[0] += (s->mb_y>>1) *   linesize << mb_size;
2168
            s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2169
            s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2170
            assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2171
        }
2172
    }
2173
}
2174

    
2175
void ff_mpeg_flush(AVCodecContext *avctx){
2176
    int i;
2177
    MpegEncContext *s = avctx->priv_data;
2178

    
2179
    if(s==NULL || s->picture==NULL)
2180
        return;
2181

    
2182
    for(i=0; i<MAX_PICTURE_COUNT; i++){
2183
       if(s->picture[i].data[0] && (   s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2184
                                    || s->picture[i].type == FF_BUFFER_TYPE_USER))
2185
        free_frame_buffer(s, &s->picture[i]);
2186
    }
2187
    s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2188

    
2189
    s->mb_x= s->mb_y= 0;
2190
    s->closed_gop= 0;
2191

    
2192
    s->parse_context.state= -1;
2193
    s->parse_context.frame_start_found= 0;
2194
    s->parse_context.overread= 0;
2195
    s->parse_context.overread_index= 0;
2196
    s->parse_context.index= 0;
2197
    s->parse_context.last_index= 0;
2198
    s->bitstream_buffer_size=0;
2199
    s->pp_time=0;
2200
}
2201

    
2202
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2203
                                   DCTELEM *block, int n, int qscale)
2204
{
2205
    int i, level, nCoeffs;
2206
    const uint16_t *quant_matrix;
2207

    
2208
    nCoeffs= s->block_last_index[n];
2209

    
2210
    if (n < 4)
2211
        block[0] = block[0] * s->y_dc_scale;
2212
    else
2213
        block[0] = block[0] * s->c_dc_scale;
2214
    /* XXX: only mpeg1 */
2215
    quant_matrix = s->intra_matrix;
2216
    for(i=1;i<=nCoeffs;i++) {
2217
        int j= s->intra_scantable.permutated[i];
2218
        level = block[j];
2219
        if (level) {
2220
            if (level < 0) {
2221
                level = -level;
2222
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2223
                level = (level - 1) | 1;
2224
                level = -level;
2225
            } else {
2226
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2227
                level = (level - 1) | 1;
2228
            }
2229
            block[j] = level;
2230
        }
2231
    }
2232
}
2233

    
2234
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2235
                                   DCTELEM *block, int n, int qscale)
2236
{
2237
    int i, level, nCoeffs;
2238
    const uint16_t *quant_matrix;
2239

    
2240
    nCoeffs= s->block_last_index[n];
2241

    
2242
    quant_matrix = s->inter_matrix;
2243
    for(i=0; i<=nCoeffs; i++) {
2244
        int j= s->intra_scantable.permutated[i];
2245
        level = block[j];
2246
        if (level) {
2247
            if (level < 0) {
2248
                level = -level;
2249
                level = (((level << 1) + 1) * qscale *
2250
                         ((int) (quant_matrix[j]))) >> 4;
2251
                level = (level - 1) | 1;
2252
                level = -level;
2253
            } else {
2254
                level = (((level << 1) + 1) * qscale *
2255
                         ((int) (quant_matrix[j]))) >> 4;
2256
                level = (level - 1) | 1;
2257
            }
2258
            block[j] = level;
2259
        }
2260
    }
2261
}
2262

    
2263
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2264
                                   DCTELEM *block, int n, int qscale)
2265
{
2266
    int i, level, nCoeffs;
2267
    const uint16_t *quant_matrix;
2268

    
2269
    if(s->alternate_scan) nCoeffs= 63;
2270
    else nCoeffs= s->block_last_index[n];
2271

    
2272
    if (n < 4)
2273
        block[0] = block[0] * s->y_dc_scale;
2274
    else
2275
        block[0] = block[0] * s->c_dc_scale;
2276
    quant_matrix = s->intra_matrix;
2277
    for(i=1;i<=nCoeffs;i++) {
2278
        int j= s->intra_scantable.permutated[i];
2279
        level = block[j];
2280
        if (level) {
2281
            if (level < 0) {
2282
                level = -level;
2283
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2284
                level = -level;
2285
            } else {
2286
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2287
            }
2288
            block[j] = level;
2289
        }
2290
    }
2291
}
2292

    
2293
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2294
                                   DCTELEM *block, int n, int qscale)
2295
{
2296
    int i, level, nCoeffs;
2297
    const uint16_t *quant_matrix;
2298
    int sum=-1;
2299

    
2300
    if(s->alternate_scan) nCoeffs= 63;
2301
    else nCoeffs= s->block_last_index[n];
2302

    
2303
    if (n < 4)
2304
        block[0] = block[0] * s->y_dc_scale;
2305
    else
2306
        block[0] = block[0] * s->c_dc_scale;
2307
    quant_matrix = s->intra_matrix;
2308
    for(i=1;i<=nCoeffs;i++) {
2309
        int j= s->intra_scantable.permutated[i];
2310
        level = block[j];
2311
        if (level) {
2312
            if (level < 0) {
2313
                level = -level;
2314
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2315
                level = -level;
2316
            } else {
2317
                level = (int)(level * qscale * quant_matrix[j]) >> 3;
2318
            }
2319
            block[j] = level;
2320
            sum+=level;
2321
        }
2322
    }
2323
    block[63]^=sum&1;
2324
}
2325

    
2326
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2327
                                   DCTELEM *block, int n, int qscale)
2328
{
2329
    int i, level, nCoeffs;
2330
    const uint16_t *quant_matrix;
2331
    int sum=-1;
2332

    
2333
    if(s->alternate_scan) nCoeffs= 63;
2334
    else nCoeffs= s->block_last_index[n];
2335

    
2336
    quant_matrix = s->inter_matrix;
2337
    for(i=0; i<=nCoeffs; i++) {
2338
        int j= s->intra_scantable.permutated[i];
2339
        level = block[j];
2340
        if (level) {
2341
            if (level < 0) {
2342
                level = -level;
2343
                level = (((level << 1) + 1) * qscale *
2344
                         ((int) (quant_matrix[j]))) >> 4;
2345
                level = -level;
2346
            } else {
2347
                level = (((level << 1) + 1) * qscale *
2348
                         ((int) (quant_matrix[j]))) >> 4;
2349
            }
2350
            block[j] = level;
2351
            sum+=level;
2352
        }
2353
    }
2354
    block[63]^=sum&1;
2355
}
2356

    
2357
static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2358
                                  DCTELEM *block, int n, int qscale)
2359
{
2360
    int i, level, qmul, qadd;
2361
    int nCoeffs;
2362

    
2363
    assert(s->block_last_index[n]>=0);
2364

    
2365
    qmul = qscale << 1;
2366

    
2367
    if (!s->h263_aic) {
2368
        if (n < 4)
2369
            block[0] = block[0] * s->y_dc_scale;
2370
        else
2371
            block[0] = block[0] * s->c_dc_scale;
2372
        qadd = (qscale - 1) | 1;
2373
    }else{
2374
        qadd = 0;
2375
    }
2376
    if(s->ac_pred)
2377
        nCoeffs=63;
2378
    else
2379
        nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2380

    
2381
    for(i=1; i<=nCoeffs; i++) {
2382
        level = block[i];
2383
        if (level) {
2384
            if (level < 0) {
2385
                level = level * qmul - qadd;
2386
            } else {
2387
                level = level * qmul + qadd;
2388
            }
2389
            block[i] = level;
2390
        }
2391
    }
2392
}
2393

    
2394
static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2395
                                  DCTELEM *block, int n, int qscale)
2396
{
2397
    int i, level, qmul, qadd;
2398
    int nCoeffs;
2399

    
2400
    assert(s->block_last_index[n]>=0);
2401

    
2402
    qadd = (qscale - 1) | 1;
2403
    qmul = qscale << 1;
2404

    
2405
    nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2406

    
2407
    for(i=0; i<=nCoeffs; i++) {
2408
        level = block[i];
2409
        if (level) {
2410
            if (level < 0) {
2411
                level = level * qmul - qadd;
2412
            } else {
2413
                level = level * qmul + qadd;
2414
            }
2415
            block[i] = level;
2416
        }
2417
    }
2418
}
2419

    
2420
/**
2421
 * set qscale and update qscale dependent variables.
2422
 */
2423
void ff_set_qscale(MpegEncContext * s, int qscale)
2424
{
2425
    if (qscale < 1)
2426
        qscale = 1;
2427
    else if (qscale > 31)
2428
        qscale = 31;
2429

    
2430
    s->qscale = qscale;
2431
    s->chroma_qscale= s->chroma_qscale_table[qscale];
2432

    
2433
    s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2434
    s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2435
}