Statistics
| Branch: | Revision:

ffmpeg / libavcodec / h263.c @ 2912e87a

History | View | Annotate | Download (11.7 KB)

1
/*
2
 * H263/MPEG4 backend for ffmpeg encoder and decoder
3
 * Copyright (c) 2000,2001 Fabrice Bellard
4
 * H263+ support.
5
 * Copyright (c) 2001 Juan J. Sierralta P
6
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
7
 *
8
 * This file is part of Libav.
9
 *
10
 * Libav is free software; you can redistribute it and/or
11
 * modify it under the terms of the GNU Lesser General Public
12
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * Libav is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with Libav; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24

    
25
/**
26
 * @file
27
 * h263/mpeg4 codec.
28
 */
29

    
30
//#define DEBUG
31
#include <limits.h>
32

    
33
#include "dsputil.h"
34
#include "avcodec.h"
35
#include "mpegvideo.h"
36
#include "h263.h"
37
#include "h263data.h"
38
#include "mathops.h"
39
#include "unary.h"
40
#include "flv.h"
41
#include "mpeg4video.h"
42

    
43
//#undef NDEBUG
44
//#include <assert.h>
45

    
46
uint8_t ff_h263_static_rl_table_store[2][2][2*MAX_RUN + MAX_LEVEL + 3];
47

    
48

    
49
void ff_h263_update_motion_val(MpegEncContext * s){
50
    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
51
               //FIXME a lot of that is only needed for !low_delay
52
    const int wrap = s->b8_stride;
53
    const int xy = s->block_index[0];
54

    
55
    s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
56

    
57
    if(s->mv_type != MV_TYPE_8X8){
58
        int motion_x, motion_y;
59
        if (s->mb_intra) {
60
            motion_x = 0;
61
            motion_y = 0;
62
        } else if (s->mv_type == MV_TYPE_16X16) {
63
            motion_x = s->mv[0][0][0];
64
            motion_y = s->mv[0][0][1];
65
        } else /*if (s->mv_type == MV_TYPE_FIELD)*/ {
66
            int i;
67
            motion_x = s->mv[0][0][0] + s->mv[0][1][0];
68
            motion_y = s->mv[0][0][1] + s->mv[0][1][1];
69
            motion_x = (motion_x>>1) | (motion_x&1);
70
            for(i=0; i<2; i++){
71
                s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
72
                s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
73
            }
74
            s->current_picture.ref_index[0][4*mb_xy    ]=
75
            s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
76
            s->current_picture.ref_index[0][4*mb_xy + 2]=
77
            s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
78
        }
79

    
80
        /* no update if 8X8 because it has been done during parsing */
81
        s->current_picture.motion_val[0][xy][0] = motion_x;
82
        s->current_picture.motion_val[0][xy][1] = motion_y;
83
        s->current_picture.motion_val[0][xy + 1][0] = motion_x;
84
        s->current_picture.motion_val[0][xy + 1][1] = motion_y;
85
        s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
86
        s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
87
        s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
88
        s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
89
    }
90

    
91
    if(s->encoding){ //FIXME encoding MUST be cleaned up
92
        if (s->mv_type == MV_TYPE_8X8)
93
            s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
94
        else if(s->mb_intra)
95
            s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
96
        else
97
            s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
98
    }
99
}
100

    
101
int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
102
{
103
    int x, y, wrap, a, c, pred_dc;
104
    int16_t *dc_val;
105

    
106
    /* find prediction */
107
    if (n < 4) {
108
        x = 2 * s->mb_x + (n & 1);
109
        y = 2 * s->mb_y + ((n & 2) >> 1);
110
        wrap = s->b8_stride;
111
        dc_val = s->dc_val[0];
112
    } else {
113
        x = s->mb_x;
114
        y = s->mb_y;
115
        wrap = s->mb_stride;
116
        dc_val = s->dc_val[n - 4 + 1];
117
    }
118
    /* B C
119
     * A X
120
     */
121
    a = dc_val[(x - 1) + (y) * wrap];
122
    c = dc_val[(x) + (y - 1) * wrap];
123

    
124
    /* No prediction outside GOB boundary */
125
    if(s->first_slice_line && n!=3){
126
        if(n!=2) c= 1024;
127
        if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
128
    }
129
    /* just DC prediction */
130
    if (a != 1024 && c != 1024)
131
        pred_dc = (a + c) >> 1;
132
    else if (a != 1024)
133
        pred_dc = a;
134
    else
135
        pred_dc = c;
136

    
137
    /* we assume pred is positive */
138
    *dc_val_ptr = &dc_val[x + y * wrap];
139
    return pred_dc;
140
}
141

    
142
void ff_h263_loop_filter(MpegEncContext * s){
143
    int qp_c;
144
    const int linesize  = s->linesize;
145
    const int uvlinesize= s->uvlinesize;
146
    const int xy = s->mb_y * s->mb_stride + s->mb_x;
147
    uint8_t *dest_y = s->dest[0];
148
    uint8_t *dest_cb= s->dest[1];
149
    uint8_t *dest_cr= s->dest[2];
150

    
151
//    if(s->pict_type==FF_B_TYPE && !s->readable) return;
152

    
153
    /*
154
       Diag Top
155
       Left Center
156
    */
157
    if(!IS_SKIP(s->current_picture.mb_type[xy])){
158
        qp_c= s->qscale;
159
        s->dsp.h263_v_loop_filter(dest_y+8*linesize  , linesize, qp_c);
160
        s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
161
    }else
162
        qp_c= 0;
163

    
164
    if(s->mb_y){
165
        int qp_dt, qp_tt, qp_tc;
166

    
167
        if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
168
            qp_tt=0;
169
        else
170
            qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
171

    
172
        if(qp_c)
173
            qp_tc= qp_c;
174
        else
175
            qp_tc= qp_tt;
176

    
177
        if(qp_tc){
178
            const int chroma_qp= s->chroma_qscale_table[qp_tc];
179
            s->dsp.h263_v_loop_filter(dest_y  ,   linesize, qp_tc);
180
            s->dsp.h263_v_loop_filter(dest_y+8,   linesize, qp_tc);
181

    
182
            s->dsp.h263_v_loop_filter(dest_cb , uvlinesize, chroma_qp);
183
            s->dsp.h263_v_loop_filter(dest_cr , uvlinesize, chroma_qp);
184
        }
185

    
186
        if(qp_tt)
187
            s->dsp.h263_h_loop_filter(dest_y-8*linesize+8  ,   linesize, qp_tt);
188

    
189
        if(s->mb_x){
190
            if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
191
                qp_dt= qp_tt;
192
            else
193
                qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
194

    
195
            if(qp_dt){
196
                const int chroma_qp= s->chroma_qscale_table[qp_dt];
197
                s->dsp.h263_h_loop_filter(dest_y -8*linesize  ,   linesize, qp_dt);
198
                s->dsp.h263_h_loop_filter(dest_cb-8*uvlinesize, uvlinesize, chroma_qp);
199
                s->dsp.h263_h_loop_filter(dest_cr-8*uvlinesize, uvlinesize, chroma_qp);
200
            }
201
        }
202
    }
203

    
204
    if(qp_c){
205
        s->dsp.h263_h_loop_filter(dest_y +8,   linesize, qp_c);
206
        if(s->mb_y + 1 == s->mb_height)
207
            s->dsp.h263_h_loop_filter(dest_y+8*linesize+8,   linesize, qp_c);
208
    }
209

    
210
    if(s->mb_x){
211
        int qp_lc;
212
        if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
213
            qp_lc= qp_c;
214
        else
215
            qp_lc= s->current_picture.qscale_table[xy-1];
216

    
217
        if(qp_lc){
218
            s->dsp.h263_h_loop_filter(dest_y,   linesize, qp_lc);
219
            if(s->mb_y + 1 == s->mb_height){
220
                const int chroma_qp= s->chroma_qscale_table[qp_lc];
221
                s->dsp.h263_h_loop_filter(dest_y +8*  linesize,   linesize, qp_lc);
222
                s->dsp.h263_h_loop_filter(dest_cb             , uvlinesize, chroma_qp);
223
                s->dsp.h263_h_loop_filter(dest_cr             , uvlinesize, chroma_qp);
224
            }
225
        }
226
    }
227
}
228

    
229
void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
230
{
231
    int x, y, wrap, a, c, pred_dc, scale, i;
232
    int16_t *dc_val, *ac_val, *ac_val1;
233

    
234
    /* find prediction */
235
    if (n < 4) {
236
        x = 2 * s->mb_x + (n & 1);
237
        y = 2 * s->mb_y + (n>> 1);
238
        wrap = s->b8_stride;
239
        dc_val = s->dc_val[0];
240
        ac_val = s->ac_val[0][0];
241
        scale = s->y_dc_scale;
242
    } else {
243
        x = s->mb_x;
244
        y = s->mb_y;
245
        wrap = s->mb_stride;
246
        dc_val = s->dc_val[n - 4 + 1];
247
        ac_val = s->ac_val[n - 4 + 1][0];
248
        scale = s->c_dc_scale;
249
    }
250

    
251
    ac_val += ((y) * wrap + (x)) * 16;
252
    ac_val1 = ac_val;
253

    
254
    /* B C
255
     * A X
256
     */
257
    a = dc_val[(x - 1) + (y) * wrap];
258
    c = dc_val[(x) + (y - 1) * wrap];
259

    
260
    /* No prediction outside GOB boundary */
261
    if(s->first_slice_line && n!=3){
262
        if(n!=2) c= 1024;
263
        if(n!=1 && s->mb_x == s->resync_mb_x) a= 1024;
264
    }
265

    
266
    if (s->ac_pred) {
267
        pred_dc = 1024;
268
        if (s->h263_aic_dir) {
269
            /* left prediction */
270
            if (a != 1024) {
271
                ac_val -= 16;
272
                for(i=1;i<8;i++) {
273
                    block[s->dsp.idct_permutation[i<<3]] += ac_val[i];
274
                }
275
                pred_dc = a;
276
            }
277
        } else {
278
            /* top prediction */
279
            if (c != 1024) {
280
                ac_val -= 16 * wrap;
281
                for(i=1;i<8;i++) {
282
                    block[s->dsp.idct_permutation[i   ]] += ac_val[i + 8];
283
                }
284
                pred_dc = c;
285
            }
286
        }
287
    } else {
288
        /* just DC prediction */
289
        if (a != 1024 && c != 1024)
290
            pred_dc = (a + c) >> 1;
291
        else if (a != 1024)
292
            pred_dc = a;
293
        else
294
            pred_dc = c;
295
    }
296

    
297
    /* we assume pred is positive */
298
    block[0]=block[0]*scale + pred_dc;
299

    
300
    if (block[0] < 0)
301
        block[0] = 0;
302
    else
303
        block[0] |= 1;
304

    
305
    /* Update AC/DC tables */
306
    dc_val[(x) + (y) * wrap] = block[0];
307

    
308
    /* left copy */
309
    for(i=1;i<8;i++)
310
        ac_val1[i    ] = block[s->dsp.idct_permutation[i<<3]];
311
    /* top copy */
312
    for(i=1;i<8;i++)
313
        ac_val1[8 + i] = block[s->dsp.idct_permutation[i   ]];
314
}
315

    
316
int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
317
                        int *px, int *py)
318
{
319
    int wrap;
320
    int16_t *A, *B, *C, (*mot_val)[2];
321
    static const int off[4]= {2, 1, 1, -1};
322

    
323
    wrap = s->b8_stride;
324
    mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
325

    
326
    A = mot_val[ - 1];
327
    /* special case for first (slice) line */
328
    if (s->first_slice_line && block<3) {
329
        // we can't just change some MVs to simulate that as we need them for the B frames (and ME)
330
        // and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
331
        if(block==0){ //most common case
332
            if(s->mb_x  == s->resync_mb_x){ //rare
333
                *px= *py = 0;
334
            }else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
335
                C = mot_val[off[block] - wrap];
336
                if(s->mb_x==0){
337
                    *px = C[0];
338
                    *py = C[1];
339
                }else{
340
                    *px = mid_pred(A[0], 0, C[0]);
341
                    *py = mid_pred(A[1], 0, C[1]);
342
                }
343
            }else{
344
                *px = A[0];
345
                *py = A[1];
346
            }
347
        }else if(block==1){
348
            if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
349
                C = mot_val[off[block] - wrap];
350
                *px = mid_pred(A[0], 0, C[0]);
351
                *py = mid_pred(A[1], 0, C[1]);
352
            }else{
353
                *px = A[0];
354
                *py = A[1];
355
            }
356
        }else{ /* block==2*/
357
            B = mot_val[ - wrap];
358
            C = mot_val[off[block] - wrap];
359
            if(s->mb_x == s->resync_mb_x) //rare
360
                A[0]=A[1]=0;
361

    
362
            *px = mid_pred(A[0], B[0], C[0]);
363
            *py = mid_pred(A[1], B[1], C[1]);
364
        }
365
    } else {
366
        B = mot_val[ - wrap];
367
        C = mot_val[off[block] - wrap];
368
        *px = mid_pred(A[0], B[0], C[0]);
369
        *py = mid_pred(A[1], B[1], C[1]);
370
    }
371
    return *mot_val;
372
}
373

    
374

    
375
/**
376
 * Get the GOB height based on picture height.
377
 */
378
int ff_h263_get_gob_height(MpegEncContext *s){
379
    if (s->height <= 400)
380
        return 1;
381
    else if (s->height <= 800)
382
        return  2;
383
    else
384
        return 4;
385
}