Statistics
| Branch: | Revision:

ffmpeg / libavcodec / dsputil_internal.h @ 8dbe5856

History | View | Annotate | Download (55.8 KB)

1
/*
2
 * DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
7
 *
8
 * This file is part of FFmpeg.
9
 *
10
 * FFmpeg is free software; you can redistribute it and/or
11
 * modify it under the terms of the GNU Lesser General Public
12
 * License as published by the Free Software Foundation; either
13
 * version 2.1 of the License, or (at your option) any later version.
14
 *
15
 * FFmpeg is distributed in the hope that it will be useful,
16
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18
 * Lesser General Public License for more details.
19
 *
20
 * You should have received a copy of the GNU Lesser General Public
21
 * License along with FFmpeg; if not, write to the Free Software
22
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23
 */
24

    
25
/**
26
 * @file
27
 * DSP utils
28
 */
29

    
30
#include "h264_high_depth.h"
31

    
32
static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
33
{
34
    int i;
35
    for(i=0; i<h; i++)
36
    {
37
        AV_WN2P(dst   , AV_RN2P(src   ));
38
        dst+=dstStride;
39
        src+=srcStride;
40
    }
41
}
42

    
43
static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
44
{
45
    int i;
46
    for(i=0; i<h; i++)
47
    {
48
        AV_WN4P(dst   , AV_RN4P(src   ));
49
        dst+=dstStride;
50
        src+=srcStride;
51
    }
52
}
53

    
54
static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
55
{
56
    int i;
57
    for(i=0; i<h; i++)
58
    {
59
        AV_WN4P(dst                , AV_RN4P(src                ));
60
        AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
61
        dst+=dstStride;
62
        src+=srcStride;
63
    }
64
}
65

    
66
static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
67
{
68
    int i;
69
    for(i=0; i<h; i++)
70
    {
71
        AV_WN4P(dst                 , AV_RN4P(src                 ));
72
        AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
73
        AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
74
        AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
75
        dst+=dstStride;
76
        src+=srcStride;
77
    }
78
}
79

    
80
/* draw the edges of width 'w' of an image of size width, height */
81
//FIXME check that this is ok for mpeg4 interlaced
82
static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int sides)
83
{
84
    pixel *buf = (pixel*)p_buf;
85
    int wrap = p_wrap / sizeof(pixel);
86
    pixel *ptr, *last_line;
87
    int i;
88

    
89
    /* left and right */
90
    ptr = buf;
91
    for(i=0;i<height;i++) {
92
#if BIT_DEPTH > 8
93
        int j;
94
        for (j = 0; j < w; j++) {
95
            ptr[j-w] = ptr[0];
96
            ptr[j+width] = ptr[width-1];
97
        }
98
#else
99
        memset(ptr - w, ptr[0], w);
100
        memset(ptr + width, ptr[width-1], w);
101
#endif
102
        ptr += wrap;
103
    }
104

    
105
    /* top and bottom + corners */
106
    buf -= w;
107
    last_line = buf + (height - 1) * wrap;
108
    if (sides & EDGE_TOP)
109
        for(i = 0; i < w; i++)
110
            memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
111
    if (sides & EDGE_BOTTOM)
112
        for (i = 0; i < w; i++)
113
            memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
114
}
115

    
116
/**
117
 * Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
118
 * @param buf destination buffer
119
 * @param src source buffer
120
 * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
121
 * @param block_w width of block
122
 * @param block_h height of block
123
 * @param src_x x coordinate of the top left sample of the block in the source buffer
124
 * @param src_y y coordinate of the top left sample of the block in the source buffer
125
 * @param w width of the source buffer
126
 * @param h height of the source buffer
127
 */
128
void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
129
                                    int src_x, int src_y, int w, int h){
130
    int x, y;
131
    int start_y, start_x, end_y, end_x;
132

    
133
    if(src_y>= h){
134
        src+= (h-1-src_y)*linesize;
135
        src_y=h-1;
136
    }else if(src_y<=-block_h){
137
        src+= (1-block_h-src_y)*linesize;
138
        src_y=1-block_h;
139
    }
140
    if(src_x>= w){
141
        src+= (w-1-src_x)*sizeof(pixel);
142
        src_x=w-1;
143
    }else if(src_x<=-block_w){
144
        src+= (1-block_w-src_x)*sizeof(pixel);
145
        src_x=1-block_w;
146
    }
147

    
148
    start_y= FFMAX(0, -src_y);
149
    start_x= FFMAX(0, -src_x);
150
    end_y= FFMIN(block_h, h-src_y);
151
    end_x= FFMIN(block_w, w-src_x);
152
    assert(start_y < end_y && block_h);
153
    assert(start_x < end_x && block_w);
154

    
155
    w    = end_x - start_x;
156
    src += start_y*linesize + start_x*sizeof(pixel);
157
    buf += start_x*sizeof(pixel);
158

    
159
    //top
160
    for(y=0; y<start_y; y++){
161
        memcpy(buf, src, w*sizeof(pixel));
162
        buf += linesize;
163
    }
164

    
165
    // copy existing part
166
    for(; y<end_y; y++){
167
        memcpy(buf, src, w*sizeof(pixel));
168
        src += linesize;
169
        buf += linesize;
170
    }
171

    
172
    //bottom
173
    src -= linesize;
174
    for(; y<block_h; y++){
175
        memcpy(buf, src, w*sizeof(pixel));
176
        buf += linesize;
177
    }
178

    
179
    buf -= block_h * linesize + start_x*sizeof(pixel);
180
    while (block_h--){
181
        pixel *bufp = (pixel*)buf;
182
       //left
183
        for(x=0; x<start_x; x++){
184
            bufp[x] = bufp[start_x];
185
        }
186

    
187
       //right
188
        for(x=end_x; x<block_w; x++){
189
            bufp[x] = bufp[end_x - 1];
190
        }
191
        buf += linesize;
192
    }
193
}
194

    
195
static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
196
{
197
    int i;
198
    pixel *restrict pixels = (pixel *restrict)p_pixels;
199
    dctcoef *block = (dctcoef*)p_block;
200
    line_size >>= sizeof(pixel)-1;
201

    
202
    for(i=0;i<8;i++) {
203
        pixels[0] += block[0];
204
        pixels[1] += block[1];
205
        pixels[2] += block[2];
206
        pixels[3] += block[3];
207
        pixels[4] += block[4];
208
        pixels[5] += block[5];
209
        pixels[6] += block[6];
210
        pixels[7] += block[7];
211
        pixels += line_size;
212
        block += 8;
213
    }
214
}
215

    
216
static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
217
{
218
    int i;
219
    pixel *restrict pixels = (pixel *restrict)p_pixels;
220
    dctcoef *block = (dctcoef*)p_block;
221
    line_size >>= sizeof(pixel)-1;
222

    
223
    for(i=0;i<4;i++) {
224
        pixels[0] += block[0];
225
        pixels[1] += block[1];
226
        pixels[2] += block[2];
227
        pixels[3] += block[3];
228
        pixels += line_size;
229
        block += 4;
230
    }
231
}
232

    
233
#if 0
234

235
#define PIXOP2(OPNAME, OP) \
236
static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
237
{\
238
    int i;\
239
    for(i=0; i<h; i++){\
240
        OP(*((uint64_t*)block), AV_RN64(pixels));\
241
        pixels+=line_size;\
242
        block +=line_size;\
243
    }\
244
}\
245
\
246
static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
247
{\
248
    int i;\
249
    for(i=0; i<h; i++){\
250
        const uint64_t a= AV_RN64(pixels  );\
251
        const uint64_t b= AV_RN64(pixels+1);\
252
        OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
253
        pixels+=line_size;\
254
        block +=line_size;\
255
    }\
256
}\
257
\
258
static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
259
{\
260
    int i;\
261
    for(i=0; i<h; i++){\
262
        const uint64_t a= AV_RN64(pixels  );\
263
        const uint64_t b= AV_RN64(pixels+1);\
264
        OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
265
        pixels+=line_size;\
266
        block +=line_size;\
267
    }\
268
}\
269
\
270
static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
271
{\
272
    int i;\
273
    for(i=0; i<h; i++){\
274
        const uint64_t a= AV_RN64(pixels          );\
275
        const uint64_t b= AV_RN64(pixels+line_size);\
276
        OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
277
        pixels+=line_size;\
278
        block +=line_size;\
279
    }\
280
}\
281
\
282
static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
283
{\
284
    int i;\
285
    for(i=0; i<h; i++){\
286
        const uint64_t a= AV_RN64(pixels          );\
287
        const uint64_t b= AV_RN64(pixels+line_size);\
288
        OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
289
        pixels+=line_size;\
290
        block +=line_size;\
291
    }\
292
}\
293
\
294
static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
295
{\
296
        int i;\
297
        const uint64_t a= AV_RN64(pixels  );\
298
        const uint64_t b= AV_RN64(pixels+1);\
299
        uint64_t l0=  (a&0x0303030303030303ULL)\
300
                    + (b&0x0303030303030303ULL)\
301
                    + 0x0202020202020202ULL;\
302
        uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
303
                   + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
304
        uint64_t l1,h1;\
305
\
306
        pixels+=line_size;\
307
        for(i=0; i<h; i+=2){\
308
            uint64_t a= AV_RN64(pixels  );\
309
            uint64_t b= AV_RN64(pixels+1);\
310
            l1=  (a&0x0303030303030303ULL)\
311
               + (b&0x0303030303030303ULL);\
312
            h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
313
              + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
314
            OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
315
            pixels+=line_size;\
316
            block +=line_size;\
317
            a= AV_RN64(pixels  );\
318
            b= AV_RN64(pixels+1);\
319
            l0=  (a&0x0303030303030303ULL)\
320
               + (b&0x0303030303030303ULL)\
321
               + 0x0202020202020202ULL;\
322
            h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
323
              + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
324
            OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
325
            pixels+=line_size;\
326
            block +=line_size;\
327
        }\
328
}\
329
\
330
static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
331
{\
332
        int i;\
333
        const uint64_t a= AV_RN64(pixels  );\
334
        const uint64_t b= AV_RN64(pixels+1);\
335
        uint64_t l0=  (a&0x0303030303030303ULL)\
336
                    + (b&0x0303030303030303ULL)\
337
                    + 0x0101010101010101ULL;\
338
        uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
339
                   + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
340
        uint64_t l1,h1;\
341
\
342
        pixels+=line_size;\
343
        for(i=0; i<h; i+=2){\
344
            uint64_t a= AV_RN64(pixels  );\
345
            uint64_t b= AV_RN64(pixels+1);\
346
            l1=  (a&0x0303030303030303ULL)\
347
               + (b&0x0303030303030303ULL);\
348
            h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
349
              + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
350
            OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
351
            pixels+=line_size;\
352
            block +=line_size;\
353
            a= AV_RN64(pixels  );\
354
            b= AV_RN64(pixels+1);\
355
            l0=  (a&0x0303030303030303ULL)\
356
               + (b&0x0303030303030303ULL)\
357
               + 0x0101010101010101ULL;\
358
            h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
359
              + ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
360
            OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
361
            pixels+=line_size;\
362
            block +=line_size;\
363
        }\
364
}\
365
\
366
CALL_2X_PIXELS(OPNAME ## _pixels16_c    , OPNAME ## _pixels_c    , 8*sizeof(pixel))\
367
CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\
368
CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\
369
CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\
370
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\
371
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\
372
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel))
373

374
#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
375
#else // 64 bit variant
376

    
377
#define PIXOP2(OPNAME, OP) \
378
static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
379
    int i;\
380
    for(i=0; i<h; i++){\
381
        OP(*((pixel2*)(block  )), AV_RN2P(pixels  ));\
382
        pixels+=line_size;\
383
        block +=line_size;\
384
    }\
385
}\
386
static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
387
    int i;\
388
    for(i=0; i<h; i++){\
389
        OP(*((pixel4*)(block  )), AV_RN4P(pixels  ));\
390
        pixels+=line_size;\
391
        block +=line_size;\
392
    }\
393
}\
394
static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
395
    int i;\
396
    for(i=0; i<h; i++){\
397
        OP(*((pixel4*)(block                )), AV_RN4P(pixels                ));\
398
        OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
399
        pixels+=line_size;\
400
        block +=line_size;\
401
    }\
402
}\
403
static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
404
    FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
405
}\
406
\
407
static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
408
                                                int src_stride1, int src_stride2, int h){\
409
    int i;\
410
    for(i=0; i<h; i++){\
411
        pixel4 a,b;\
412
        a= AV_RN4P(&src1[i*src_stride1  ]);\
413
        b= AV_RN4P(&src2[i*src_stride2  ]);\
414
        OP(*((pixel4*)&dst[i*dst_stride  ]), no_rnd_avg_pixel4(a, b));\
415
        a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
416
        b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
417
        OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
418
    }\
419
}\
420
\
421
static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
422
                                                int src_stride1, int src_stride2, int h){\
423
    int i;\
424
    for(i=0; i<h; i++){\
425
        pixel4 a,b;\
426
        a= AV_RN4P(&src1[i*src_stride1  ]);\
427
        b= AV_RN4P(&src2[i*src_stride2  ]);\
428
        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
429
        a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
430
        b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
431
        OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
432
    }\
433
}\
434
\
435
static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
436
                                                int src_stride1, int src_stride2, int h){\
437
    int i;\
438
    for(i=0; i<h; i++){\
439
        pixel4 a,b;\
440
        a= AV_RN4P(&src1[i*src_stride1  ]);\
441
        b= AV_RN4P(&src2[i*src_stride2  ]);\
442
        OP(*((pixel4*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
443
    }\
444
}\
445
\
446
static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
447
                                                int src_stride1, int src_stride2, int h){\
448
    int i;\
449
    for(i=0; i<h; i++){\
450
        pixel4 a,b;\
451
        a= AV_RN2P(&src1[i*src_stride1  ]);\
452
        b= AV_RN2P(&src2[i*src_stride2  ]);\
453
        OP(*((pixel2*)&dst[i*dst_stride  ]), rnd_avg_pixel4(a, b));\
454
    }\
455
}\
456
\
457
static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
458
                                                int src_stride1, int src_stride2, int h){\
459
    FUNC(OPNAME ## _pixels8_l2)(dst  , src1  , src2  , dst_stride, src_stride1, src_stride2, h);\
460
    FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
461
}\
462
\
463
static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
464
                                                int src_stride1, int src_stride2, int h){\
465
    FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst  , src1  , src2  , dst_stride, src_stride1, src_stride2, h);\
466
    FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
467
}\
468
\
469
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
470
    FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
471
}\
472
\
473
static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
474
    FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
475
}\
476
\
477
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
478
    FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
479
}\
480
\
481
static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
482
    FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
483
}\
484
\
485
static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
486
                 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
487
    /* FIXME HIGH BIT DEPTH */\
488
    int i;\
489
    for(i=0; i<h; i++){\
490
        uint32_t a, b, c, d, l0, l1, h0, h1;\
491
        a= AV_RN32(&src1[i*src_stride1]);\
492
        b= AV_RN32(&src2[i*src_stride2]);\
493
        c= AV_RN32(&src3[i*src_stride3]);\
494
        d= AV_RN32(&src4[i*src_stride4]);\
495
        l0=  (a&0x03030303UL)\
496
           + (b&0x03030303UL)\
497
           + 0x02020202UL;\
498
        h0= ((a&0xFCFCFCFCUL)>>2)\
499
          + ((b&0xFCFCFCFCUL)>>2);\
500
        l1=  (c&0x03030303UL)\
501
           + (d&0x03030303UL);\
502
        h1= ((c&0xFCFCFCFCUL)>>2)\
503
          + ((d&0xFCFCFCFCUL)>>2);\
504
        OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
505
        a= AV_RN32(&src1[i*src_stride1+4]);\
506
        b= AV_RN32(&src2[i*src_stride2+4]);\
507
        c= AV_RN32(&src3[i*src_stride3+4]);\
508
        d= AV_RN32(&src4[i*src_stride4+4]);\
509
        l0=  (a&0x03030303UL)\
510
           + (b&0x03030303UL)\
511
           + 0x02020202UL;\
512
        h0= ((a&0xFCFCFCFCUL)>>2)\
513
          + ((b&0xFCFCFCFCUL)>>2);\
514
        l1=  (c&0x03030303UL)\
515
           + (d&0x03030303UL);\
516
        h1= ((c&0xFCFCFCFCUL)>>2)\
517
          + ((d&0xFCFCFCFCUL)>>2);\
518
        OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
519
    }\
520
}\
521
\
522
static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
523
    FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
524
}\
525
\
526
static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
527
    FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
528
}\
529
\
530
static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
531
    FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
532
}\
533
\
534
static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
535
    FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
536
}\
537
\
538
static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
539
                 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
540
    /* FIXME HIGH BIT DEPTH*/\
541
    int i;\
542
    for(i=0; i<h; i++){\
543
        uint32_t a, b, c, d, l0, l1, h0, h1;\
544
        a= AV_RN32(&src1[i*src_stride1]);\
545
        b= AV_RN32(&src2[i*src_stride2]);\
546
        c= AV_RN32(&src3[i*src_stride3]);\
547
        d= AV_RN32(&src4[i*src_stride4]);\
548
        l0=  (a&0x03030303UL)\
549
           + (b&0x03030303UL)\
550
           + 0x01010101UL;\
551
        h0= ((a&0xFCFCFCFCUL)>>2)\
552
          + ((b&0xFCFCFCFCUL)>>2);\
553
        l1=  (c&0x03030303UL)\
554
           + (d&0x03030303UL);\
555
        h1= ((c&0xFCFCFCFCUL)>>2)\
556
          + ((d&0xFCFCFCFCUL)>>2);\
557
        OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
558
        a= AV_RN32(&src1[i*src_stride1+4]);\
559
        b= AV_RN32(&src2[i*src_stride2+4]);\
560
        c= AV_RN32(&src3[i*src_stride3+4]);\
561
        d= AV_RN32(&src4[i*src_stride4+4]);\
562
        l0=  (a&0x03030303UL)\
563
           + (b&0x03030303UL)\
564
           + 0x01010101UL;\
565
        h0= ((a&0xFCFCFCFCUL)>>2)\
566
          + ((b&0xFCFCFCFCUL)>>2);\
567
        l1=  (c&0x03030303UL)\
568
           + (d&0x03030303UL);\
569
        h1= ((c&0xFCFCFCFCUL)>>2)\
570
          + ((d&0xFCFCFCFCUL)>>2);\
571
        OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
572
    }\
573
}\
574
static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
575
                 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
576
    FUNC(OPNAME ## _pixels8_l4)(dst  , src1  , src2  , src3  , src4  , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
577
    FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
578
}\
579
static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
580
                 int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
581
    FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst  , src1  , src2  , src3  , src4  , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
582
    FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
583
}\
584
\
585
static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
586
{\
587
        int i, a0, b0, a1, b1;\
588
        pixel *block = (pixel*)p_block;\
589
        const pixel *pixels = (const pixel*)p_pixels;\
590
        line_size >>= sizeof(pixel)-1;\
591
        a0= pixels[0];\
592
        b0= pixels[1] + 2;\
593
        a0 += b0;\
594
        b0 += pixels[2];\
595
\
596
        pixels+=line_size;\
597
        for(i=0; i<h; i+=2){\
598
            a1= pixels[0];\
599
            b1= pixels[1];\
600
            a1 += b1;\
601
            b1 += pixels[2];\
602
\
603
            block[0]= (a1+a0)>>2; /* FIXME non put */\
604
            block[1]= (b1+b0)>>2;\
605
\
606
            pixels+=line_size;\
607
            block +=line_size;\
608
\
609
            a0= pixels[0];\
610
            b0= pixels[1] + 2;\
611
            a0 += b0;\
612
            b0 += pixels[2];\
613
\
614
            block[0]= (a1+a0)>>2;\
615
            block[1]= (b1+b0)>>2;\
616
            pixels+=line_size;\
617
            block +=line_size;\
618
        }\
619
}\
620
\
621
static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
622
{\
623
        /* FIXME HIGH BIT DEPTH */\
624
        int i;\
625
        const uint32_t a= AV_RN32(pixels  );\
626
        const uint32_t b= AV_RN32(pixels+1);\
627
        uint32_t l0=  (a&0x03030303UL)\
628
                    + (b&0x03030303UL)\
629
                    + 0x02020202UL;\
630
        uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
631
                   + ((b&0xFCFCFCFCUL)>>2);\
632
        uint32_t l1,h1;\
633
\
634
        pixels+=line_size;\
635
        for(i=0; i<h; i+=2){\
636
            uint32_t a= AV_RN32(pixels  );\
637
            uint32_t b= AV_RN32(pixels+1);\
638
            l1=  (a&0x03030303UL)\
639
               + (b&0x03030303UL);\
640
            h1= ((a&0xFCFCFCFCUL)>>2)\
641
              + ((b&0xFCFCFCFCUL)>>2);\
642
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
643
            pixels+=line_size;\
644
            block +=line_size;\
645
            a= AV_RN32(pixels  );\
646
            b= AV_RN32(pixels+1);\
647
            l0=  (a&0x03030303UL)\
648
               + (b&0x03030303UL)\
649
               + 0x02020202UL;\
650
            h0= ((a&0xFCFCFCFCUL)>>2)\
651
              + ((b&0xFCFCFCFCUL)>>2);\
652
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
653
            pixels+=line_size;\
654
            block +=line_size;\
655
        }\
656
}\
657
\
658
static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
659
{\
660
    /* FIXME HIGH BIT DEPTH */\
661
    int j;\
662
    for(j=0; j<2; j++){\
663
        int i;\
664
        const uint32_t a= AV_RN32(pixels  );\
665
        const uint32_t b= AV_RN32(pixels+1);\
666
        uint32_t l0=  (a&0x03030303UL)\
667
                    + (b&0x03030303UL)\
668
                    + 0x02020202UL;\
669
        uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
670
                   + ((b&0xFCFCFCFCUL)>>2);\
671
        uint32_t l1,h1;\
672
\
673
        pixels+=line_size;\
674
        for(i=0; i<h; i+=2){\
675
            uint32_t a= AV_RN32(pixels  );\
676
            uint32_t b= AV_RN32(pixels+1);\
677
            l1=  (a&0x03030303UL)\
678
               + (b&0x03030303UL);\
679
            h1= ((a&0xFCFCFCFCUL)>>2)\
680
              + ((b&0xFCFCFCFCUL)>>2);\
681
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
682
            pixels+=line_size;\
683
            block +=line_size;\
684
            a= AV_RN32(pixels  );\
685
            b= AV_RN32(pixels+1);\
686
            l0=  (a&0x03030303UL)\
687
               + (b&0x03030303UL)\
688
               + 0x02020202UL;\
689
            h0= ((a&0xFCFCFCFCUL)>>2)\
690
              + ((b&0xFCFCFCFCUL)>>2);\
691
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
692
            pixels+=line_size;\
693
            block +=line_size;\
694
        }\
695
        pixels+=4-line_size*(h+1);\
696
        block +=4-line_size*h;\
697
    }\
698
}\
699
\
700
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
701
{\
702
    /* FIXME HIGH BIT DEPTH */\
703
    int j;\
704
    for(j=0; j<2; j++){\
705
        int i;\
706
        const uint32_t a= AV_RN32(pixels  );\
707
        const uint32_t b= AV_RN32(pixels+1);\
708
        uint32_t l0=  (a&0x03030303UL)\
709
                    + (b&0x03030303UL)\
710
                    + 0x01010101UL;\
711
        uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
712
                   + ((b&0xFCFCFCFCUL)>>2);\
713
        uint32_t l1,h1;\
714
\
715
        pixels+=line_size;\
716
        for(i=0; i<h; i+=2){\
717
            uint32_t a= AV_RN32(pixels  );\
718
            uint32_t b= AV_RN32(pixels+1);\
719
            l1=  (a&0x03030303UL)\
720
               + (b&0x03030303UL);\
721
            h1= ((a&0xFCFCFCFCUL)>>2)\
722
              + ((b&0xFCFCFCFCUL)>>2);\
723
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
724
            pixels+=line_size;\
725
            block +=line_size;\
726
            a= AV_RN32(pixels  );\
727
            b= AV_RN32(pixels+1);\
728
            l0=  (a&0x03030303UL)\
729
               + (b&0x03030303UL)\
730
               + 0x01010101UL;\
731
            h0= ((a&0xFCFCFCFCUL)>>2)\
732
              + ((b&0xFCFCFCFCUL)>>2);\
733
            OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
734
            pixels+=line_size;\
735
            block +=line_size;\
736
        }\
737
        pixels+=4-line_size*(h+1);\
738
        block +=4-line_size*h;\
739
    }\
740
}\
741
\
742
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16)    , FUNCC(OPNAME ## _pixels8)    , 8*sizeof(pixel))\
743
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
744
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
745
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
746
av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16)    , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
747
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
748
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
749
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
750

    
751
#define op_avg(a, b) a = rnd_avg_pixel4(a, b)
752
#endif
753
#define op_put(a, b) a = b
754

    
755
PIXOP2(avg, op_avg)
756
PIXOP2(put, op_put)
757
#undef op_avg
758
#undef op_put
759

    
760
#define put_no_rnd_pixels8_c  put_pixels8_c
761
#define put_no_rnd_pixels16_c put_pixels16_c
762

    
763
static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
764
    FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
765
}
766

    
767
static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
768
    FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
769
}
770

    
771
#define H264_CHROMA_MC(OPNAME, OP)\
772
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
773
    pixel *dst = (pixel*)p_dst;\
774
    pixel *src = (pixel*)p_src;\
775
    const int A=(8-x)*(8-y);\
776
    const int B=(  x)*(8-y);\
777
    const int C=(8-x)*(  y);\
778
    const int D=(  x)*(  y);\
779
    int i;\
780
    stride >>= sizeof(pixel)-1;\
781
    \
782
    assert(x<8 && y<8 && x>=0 && y>=0);\
783
\
784
    if(D){\
785
        for(i=0; i<h; i++){\
786
            OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
787
            OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
788
            dst+= stride;\
789
            src+= stride;\
790
        }\
791
    }else{\
792
        const int E= B+C;\
793
        const int step= C ? stride : 1;\
794
        for(i=0; i<h; i++){\
795
            OP(dst[0], (A*src[0] + E*src[step+0]));\
796
            OP(dst[1], (A*src[1] + E*src[step+1]));\
797
            dst+= stride;\
798
            src+= stride;\
799
        }\
800
    }\
801
}\
802
\
803
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
804
    pixel *dst = (pixel*)p_dst;\
805
    pixel *src = (pixel*)p_src;\
806
    const int A=(8-x)*(8-y);\
807
    const int B=(  x)*(8-y);\
808
    const int C=(8-x)*(  y);\
809
    const int D=(  x)*(  y);\
810
    int i;\
811
    stride >>= sizeof(pixel)-1;\
812
    \
813
    assert(x<8 && y<8 && x>=0 && y>=0);\
814
\
815
    if(D){\
816
        for(i=0; i<h; i++){\
817
            OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
818
            OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
819
            OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
820
            OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
821
            dst+= stride;\
822
            src+= stride;\
823
        }\
824
    }else{\
825
        const int E= B+C;\
826
        const int step= C ? stride : 1;\
827
        for(i=0; i<h; i++){\
828
            OP(dst[0], (A*src[0] + E*src[step+0]));\
829
            OP(dst[1], (A*src[1] + E*src[step+1]));\
830
            OP(dst[2], (A*src[2] + E*src[step+2]));\
831
            OP(dst[3], (A*src[3] + E*src[step+3]));\
832
            dst+= stride;\
833
            src+= stride;\
834
        }\
835
    }\
836
}\
837
\
838
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
839
    pixel *dst = (pixel*)p_dst;\
840
    pixel *src = (pixel*)p_src;\
841
    const int A=(8-x)*(8-y);\
842
    const int B=(  x)*(8-y);\
843
    const int C=(8-x)*(  y);\
844
    const int D=(  x)*(  y);\
845
    int i;\
846
    stride >>= sizeof(pixel)-1;\
847
    \
848
    assert(x<8 && y<8 && x>=0 && y>=0);\
849
\
850
    if(D){\
851
        for(i=0; i<h; i++){\
852
            OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
853
            OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
854
            OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
855
            OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
856
            OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
857
            OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
858
            OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
859
            OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
860
            dst+= stride;\
861
            src+= stride;\
862
        }\
863
    }else{\
864
        const int E= B+C;\
865
        const int step= C ? stride : 1;\
866
        for(i=0; i<h; i++){\
867
            OP(dst[0], (A*src[0] + E*src[step+0]));\
868
            OP(dst[1], (A*src[1] + E*src[step+1]));\
869
            OP(dst[2], (A*src[2] + E*src[step+2]));\
870
            OP(dst[3], (A*src[3] + E*src[step+3]));\
871
            OP(dst[4], (A*src[4] + E*src[step+4]));\
872
            OP(dst[5], (A*src[5] + E*src[step+5]));\
873
            OP(dst[6], (A*src[6] + E*src[step+6]));\
874
            OP(dst[7], (A*src[7] + E*src[step+7]));\
875
            dst+= stride;\
876
            src+= stride;\
877
        }\
878
    }\
879
}
880

    
881
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
882
#define op_put(a, b) a = (((b) + 32)>>6)
883

    
884
H264_CHROMA_MC(put_       , op_put)
885
H264_CHROMA_MC(avg_       , op_avg)
886
#undef op_avg
887
#undef op_put
888

    
889
#if 1
890
#define H264_LOWPASS(OPNAME, OP, OP2) \
891
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
892
    const int h=2;\
893
    INIT_CLIP\
894
    int i;\
895
    pixel *dst = (pixel*)p_dst;\
896
    pixel *src = (pixel*)p_src;\
897
    dstStride >>= sizeof(pixel)-1;\
898
    srcStride >>= sizeof(pixel)-1;\
899
    for(i=0; i<h; i++)\
900
    {\
901
        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
902
        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
903
        dst+=dstStride;\
904
        src+=srcStride;\
905
    }\
906
}\
907
\
908
static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
909
    const int w=2;\
910
    INIT_CLIP\
911
    int i;\
912
    pixel *dst = (pixel*)p_dst;\
913
    pixel *src = (pixel*)p_src;\
914
    dstStride >>= sizeof(pixel)-1;\
915
    srcStride >>= sizeof(pixel)-1;\
916
    for(i=0; i<w; i++)\
917
    {\
918
        const int srcB= src[-2*srcStride];\
919
        const int srcA= src[-1*srcStride];\
920
        const int src0= src[0 *srcStride];\
921
        const int src1= src[1 *srcStride];\
922
        const int src2= src[2 *srcStride];\
923
        const int src3= src[3 *srcStride];\
924
        const int src4= src[4 *srcStride];\
925
        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
926
        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
927
        dst++;\
928
        src++;\
929
    }\
930
}\
931
\
932
static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
933
    const int h=2;\
934
    const int w=2;\
935
    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
936
    INIT_CLIP\
937
    int i;\
938
    pixel *dst = (pixel*)p_dst;\
939
    pixel *src = (pixel*)p_src;\
940
    dstStride >>= sizeof(pixel)-1;\
941
    srcStride >>= sizeof(pixel)-1;\
942
    src -= 2*srcStride;\
943
    for(i=0; i<h+5; i++)\
944
    {\
945
        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
946
        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
947
        tmp+=tmpStride;\
948
        src+=srcStride;\
949
    }\
950
    tmp -= tmpStride*(h+5-2);\
951
    for(i=0; i<w; i++)\
952
    {\
953
        const int tmpB= tmp[-2*tmpStride] - pad;\
954
        const int tmpA= tmp[-1*tmpStride] - pad;\
955
        const int tmp0= tmp[0 *tmpStride] - pad;\
956
        const int tmp1= tmp[1 *tmpStride] - pad;\
957
        const int tmp2= tmp[2 *tmpStride] - pad;\
958
        const int tmp3= tmp[3 *tmpStride] - pad;\
959
        const int tmp4= tmp[4 *tmpStride] - pad;\
960
        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
961
        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
962
        dst++;\
963
        tmp++;\
964
    }\
965
}\
966
static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
967
    const int h=4;\
968
    INIT_CLIP\
969
    int i;\
970
    pixel *dst = (pixel*)p_dst;\
971
    pixel *src = (pixel*)p_src;\
972
    dstStride >>= sizeof(pixel)-1;\
973
    srcStride >>= sizeof(pixel)-1;\
974
    for(i=0; i<h; i++)\
975
    {\
976
        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
977
        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
978
        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
979
        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
980
        dst+=dstStride;\
981
        src+=srcStride;\
982
    }\
983
}\
984
\
985
static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
986
    const int w=4;\
987
    INIT_CLIP\
988
    int i;\
989
    pixel *dst = (pixel*)p_dst;\
990
    pixel *src = (pixel*)p_src;\
991
    dstStride >>= sizeof(pixel)-1;\
992
    srcStride >>= sizeof(pixel)-1;\
993
    for(i=0; i<w; i++)\
994
    {\
995
        const int srcB= src[-2*srcStride];\
996
        const int srcA= src[-1*srcStride];\
997
        const int src0= src[0 *srcStride];\
998
        const int src1= src[1 *srcStride];\
999
        const int src2= src[2 *srcStride];\
1000
        const int src3= src[3 *srcStride];\
1001
        const int src4= src[4 *srcStride];\
1002
        const int src5= src[5 *srcStride];\
1003
        const int src6= src[6 *srcStride];\
1004
        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1005
        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1006
        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1007
        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1008
        dst++;\
1009
        src++;\
1010
    }\
1011
}\
1012
\
1013
static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
1014
    const int h=4;\
1015
    const int w=4;\
1016
    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1017
    INIT_CLIP\
1018
    int i;\
1019
    pixel *dst = (pixel*)p_dst;\
1020
    pixel *src = (pixel*)p_src;\
1021
    dstStride >>= sizeof(pixel)-1;\
1022
    srcStride >>= sizeof(pixel)-1;\
1023
    src -= 2*srcStride;\
1024
    for(i=0; i<h+5; i++)\
1025
    {\
1026
        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
1027
        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
1028
        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
1029
        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
1030
        tmp+=tmpStride;\
1031
        src+=srcStride;\
1032
    }\
1033
    tmp -= tmpStride*(h+5-2);\
1034
    for(i=0; i<w; i++)\
1035
    {\
1036
        const int tmpB= tmp[-2*tmpStride] - pad;\
1037
        const int tmpA= tmp[-1*tmpStride] - pad;\
1038
        const int tmp0= tmp[0 *tmpStride] - pad;\
1039
        const int tmp1= tmp[1 *tmpStride] - pad;\
1040
        const int tmp2= tmp[2 *tmpStride] - pad;\
1041
        const int tmp3= tmp[3 *tmpStride] - pad;\
1042
        const int tmp4= tmp[4 *tmpStride] - pad;\
1043
        const int tmp5= tmp[5 *tmpStride] - pad;\
1044
        const int tmp6= tmp[6 *tmpStride] - pad;\
1045
        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1046
        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1047
        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1048
        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1049
        dst++;\
1050
        tmp++;\
1051
    }\
1052
}\
1053
\
1054
static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
1055
    const int h=8;\
1056
    INIT_CLIP\
1057
    int i;\
1058
    pixel *dst = (pixel*)p_dst;\
1059
    pixel *src = (pixel*)p_src;\
1060
    dstStride >>= sizeof(pixel)-1;\
1061
    srcStride >>= sizeof(pixel)-1;\
1062
    for(i=0; i<h; i++)\
1063
    {\
1064
        OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
1065
        OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
1066
        OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
1067
        OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
1068
        OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
1069
        OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
1070
        OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
1071
        OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
1072
        dst+=dstStride;\
1073
        src+=srcStride;\
1074
    }\
1075
}\
1076
\
1077
static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
1078
    const int w=8;\
1079
    INIT_CLIP\
1080
    int i;\
1081
    pixel *dst = (pixel*)p_dst;\
1082
    pixel *src = (pixel*)p_src;\
1083
    dstStride >>= sizeof(pixel)-1;\
1084
    srcStride >>= sizeof(pixel)-1;\
1085
    for(i=0; i<w; i++)\
1086
    {\
1087
        const int srcB= src[-2*srcStride];\
1088
        const int srcA= src[-1*srcStride];\
1089
        const int src0= src[0 *srcStride];\
1090
        const int src1= src[1 *srcStride];\
1091
        const int src2= src[2 *srcStride];\
1092
        const int src3= src[3 *srcStride];\
1093
        const int src4= src[4 *srcStride];\
1094
        const int src5= src[5 *srcStride];\
1095
        const int src6= src[6 *srcStride];\
1096
        const int src7= src[7 *srcStride];\
1097
        const int src8= src[8 *srcStride];\
1098
        const int src9= src[9 *srcStride];\
1099
        const int src10=src[10*srcStride];\
1100
        OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
1101
        OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
1102
        OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
1103
        OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
1104
        OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
1105
        OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
1106
        OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
1107
        OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
1108
        dst++;\
1109
        src++;\
1110
    }\
1111
}\
1112
\
1113
static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
1114
    const int h=8;\
1115
    const int w=8;\
1116
    const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
1117
    INIT_CLIP\
1118
    int i;\
1119
    pixel *dst = (pixel*)p_dst;\
1120
    pixel *src = (pixel*)p_src;\
1121
    dstStride >>= sizeof(pixel)-1;\
1122
    srcStride >>= sizeof(pixel)-1;\
1123
    src -= 2*srcStride;\
1124
    for(i=0; i<h+5; i++)\
1125
    {\
1126
        tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
1127
        tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
1128
        tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
1129
        tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
1130
        tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
1131
        tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
1132
        tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
1133
        tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
1134
        tmp+=tmpStride;\
1135
        src+=srcStride;\
1136
    }\
1137
    tmp -= tmpStride*(h+5-2);\
1138
    for(i=0; i<w; i++)\
1139
    {\
1140
        const int tmpB= tmp[-2*tmpStride] - pad;\
1141
        const int tmpA= tmp[-1*tmpStride] - pad;\
1142
        const int tmp0= tmp[0 *tmpStride] - pad;\
1143
        const int tmp1= tmp[1 *tmpStride] - pad;\
1144
        const int tmp2= tmp[2 *tmpStride] - pad;\
1145
        const int tmp3= tmp[3 *tmpStride] - pad;\
1146
        const int tmp4= tmp[4 *tmpStride] - pad;\
1147
        const int tmp5= tmp[5 *tmpStride] - pad;\
1148
        const int tmp6= tmp[6 *tmpStride] - pad;\
1149
        const int tmp7= tmp[7 *tmpStride] - pad;\
1150
        const int tmp8= tmp[8 *tmpStride] - pad;\
1151
        const int tmp9= tmp[9 *tmpStride] - pad;\
1152
        const int tmp10=tmp[10*tmpStride] - pad;\
1153
        OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
1154
        OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
1155
        OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
1156
        OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
1157
        OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
1158
        OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
1159
        OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
1160
        OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
1161
        dst++;\
1162
        tmp++;\
1163
    }\
1164
}\
1165
\
1166
static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1167
    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
1168
    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1169
    src += 8*srcStride;\
1170
    dst += 8*dstStride;\
1171
    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst                , src                , dstStride, srcStride);\
1172
    FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1173
}\
1174
\
1175
static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1176
    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
1177
    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1178
    src += 8*srcStride;\
1179
    dst += 8*dstStride;\
1180
    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst                , src                , dstStride, srcStride);\
1181
    FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
1182
}\
1183
\
1184
static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1185
    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
1186
    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1187
    src += 8*srcStride;\
1188
    dst += 8*dstStride;\
1189
    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst                , tmp  , src                , dstStride, tmpStride, srcStride);\
1190
    FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
1191
}\
1192

    
1193
#define H264_MC(OPNAME, SIZE) \
1194
static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
1195
    FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
1196
}\
1197
\
1198
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
1199
    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1200
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1201
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1202
}\
1203
\
1204
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
1205
    FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
1206
}\
1207
\
1208
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
1209
    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1210
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
1211
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
1212
}\
1213
\
1214
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
1215
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1216
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1217
    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1218
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1219
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1220
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1221
}\
1222
\
1223
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
1224
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1225
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1226
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1227
    FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
1228
}\
1229
\
1230
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
1231
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1232
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1233
    uint8_t half[SIZE*SIZE*sizeof(pixel)];\
1234
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1235
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1236
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1237
}\
1238
\
1239
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
1240
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1241
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1242
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1243
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1244
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1245
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1246
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1247
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1248
}\
1249
\
1250
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
1251
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1252
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1253
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1254
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1255
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1256
    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
1257
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1258
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1259
}\
1260
\
1261
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
1262
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1263
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1264
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1265
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1266
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1267
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1268
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1269
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1270
}\
1271
\
1272
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
1273
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1274
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1275
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1276
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1277
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1278
    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
1279
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1280
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1281
}\
1282
\
1283
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
1284
    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1285
    FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
1286
}\
1287
\
1288
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
1289
    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1290
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1291
    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1292
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
1293
    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1294
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1295
}\
1296
\
1297
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
1298
    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1299
    uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
1300
    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1301
    FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
1302
    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1303
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1304
}\
1305
\
1306
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
1307
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1308
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1309
    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1310
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1311
    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1312
    FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel),  stride, SIZE + 5);\
1313
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1314
    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1315
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1316
}\
1317
\
1318
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
1319
    uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
1320
    uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
1321
    int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
1322
    uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
1323
    uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
1324
    FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel),  stride, SIZE + 5);\
1325
    FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
1326
    FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
1327
    FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
1328
}\
1329

    
1330
#define op_avg(a, b)  a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
1331
//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
1332
#define op_put(a, b)  a = CLIP(((b) + 16)>>5)
1333
#define op2_avg(a, b)  a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
1334
#define op2_put(a, b)  a = CLIP(((b) + 512)>>10)
1335

    
1336
H264_LOWPASS(put_       , op_put, op2_put)
1337
H264_LOWPASS(avg_       , op_avg, op2_avg)
1338
H264_MC(put_, 2)
1339
H264_MC(put_, 4)
1340
H264_MC(put_, 8)
1341
H264_MC(put_, 16)
1342
H264_MC(avg_, 4)
1343
H264_MC(avg_, 8)
1344
H264_MC(avg_, 16)
1345

    
1346
#undef op_avg
1347
#undef op_put
1348
#undef op2_avg
1349
#undef op2_put
1350
#endif
1351

    
1352
#if BIT_DEPTH == 8
1353
#   define put_h264_qpel8_mc00_8_c  ff_put_pixels8x8_8_c
1354
#   define avg_h264_qpel8_mc00_8_c  ff_avg_pixels8x8_8_c
1355
#   define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
1356
#   define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
1357
#elif BIT_DEPTH == 9
1358
#   define put_h264_qpel8_mc00_9_c  ff_put_pixels8x8_9_c
1359
#   define avg_h264_qpel8_mc00_9_c  ff_avg_pixels8x8_9_c
1360
#   define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
1361
#   define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
1362
#elif BIT_DEPTH == 10
1363
#   define put_h264_qpel8_mc00_10_c  ff_put_pixels8x8_10_c
1364
#   define avg_h264_qpel8_mc00_10_c  ff_avg_pixels8x8_10_c
1365
#   define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
1366
#   define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
1367
#endif
1368

    
1369
void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1370
    FUNCC(put_pixels8)(dst, src, stride, 8);
1371
}
1372
void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
1373
    FUNCC(avg_pixels8)(dst, src, stride, 8);
1374
}
1375
void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1376
    FUNCC(put_pixels16)(dst, src, stride, 16);
1377
}
1378
void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
1379
    FUNCC(avg_pixels16)(dst, src, stride, 16);
1380
}
1381

    
1382
static void FUNCC(clear_block)(DCTELEM *block)
1383
{
1384
    memset(block, 0, sizeof(dctcoef)*64);
1385
}
1386

    
1387
/**
1388
 * memset(blocks, 0, sizeof(DCTELEM)*6*64)
1389
 */
1390
static void FUNCC(clear_blocks)(DCTELEM *blocks)
1391
{
1392
    memset(blocks, 0, sizeof(dctcoef)*6*64);
1393
}