Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / h264dsp_mmx.c @ a1ce6110

History | View | Annotate | Download (58.2 KB)

1
/*
2
 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3
 *
4
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2.1 of the License, or (at your option) any later version.
10
 *
11
 * FFmpeg is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with FFmpeg; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19
 */
20

    
21

    
22
/***********************************/
23
/* IDCT */
24

    
25
/* in/out: mma=mma+mmb, mmb=mmb-mma */
26
#define SUMSUB_BA( a, b ) \
27
    "paddw "#b", "#a" \n\t"\
28
    "paddw "#b", "#b" \n\t"\
29
    "psubw "#a", "#b" \n\t"
30

    
31
#define SUMSUB_BADC( a, b, c, d ) \
32
    "paddw "#b", "#a" \n\t"\
33
    "paddw "#d", "#c" \n\t"\
34
    "paddw "#b", "#b" \n\t"\
35
    "paddw "#d", "#d" \n\t"\
36
    "psubw "#a", "#b" \n\t"\
37
    "psubw "#c", "#d" \n\t"
38

    
39
#define SUMSUBD2_AB( a, b, t ) \
40
    "movq  "#b", "#t" \n\t"\
41
    "psraw  $1 , "#b" \n\t"\
42
    "paddw "#a", "#b" \n\t"\
43
    "psraw  $1 , "#a" \n\t"\
44
    "psubw "#t", "#a" \n\t"
45

    
46
#define IDCT4_1D( s02, s13, d02, d13, t ) \
47
    SUMSUB_BA  ( s02, d02 )\
48
    SUMSUBD2_AB( s13, d13, t )\
49
    SUMSUB_BADC( d13, s02, s13, d02 )
50

    
51
#define TRANSPOSE4(a,b,c,d,t)\
52
    SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
53
    SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
54
    SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
55
    SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
56

    
57
#define STORE_DIFF_4P( p, t, z ) \
58
    "psraw      $6,     "#p" \n\t"\
59
    "movd       (%0),   "#t" \n\t"\
60
    "punpcklbw "#z",    "#t" \n\t"\
61
    "paddsw    "#t",    "#p" \n\t"\
62
    "packuswb  "#z",    "#p" \n\t"\
63
    "movd      "#p",    (%0) \n\t"
64

    
65
static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
66
{
67
    /* Load dct coeffs */
68
    asm volatile(
69
        "movq   (%0), %%mm0 \n\t"
70
        "movq  8(%0), %%mm1 \n\t"
71
        "movq 16(%0), %%mm2 \n\t"
72
        "movq 24(%0), %%mm3 \n\t"
73
    :: "r"(block) );
74

    
75
    asm volatile(
76
        /* mm1=s02+s13  mm2=s02-s13  mm4=d02+d13  mm0=d02-d13 */
77
        IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
78

    
79
        "movq      %0,    %%mm6 \n\t"
80
        /* in: 1,4,0,2  out: 1,2,3,0 */
81
        TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
82

    
83
        "paddw     %%mm6, %%mm3 \n\t"
84

    
85
        /* mm2=s02+s13  mm3=s02-s13  mm4=d02+d13  mm1=d02-d13 */
86
        IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
87

    
88
        "pxor %%mm7, %%mm7    \n\t"
89
    :: "m"(ff_pw_32));
90

    
91
    asm volatile(
92
    STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
93
        "add %1, %0             \n\t"
94
    STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
95
        "add %1, %0             \n\t"
96
    STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
97
        "add %1, %0             \n\t"
98
    STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
99
        : "+r"(dst)
100
        : "r" ((long)stride)
101
    );
102
}
103

    
104
static inline void h264_idct8_1d(int16_t *block)
105
{
106
    asm volatile(
107
        "movq 112(%0), %%mm7  \n\t"
108
        "movq  80(%0), %%mm5  \n\t"
109
        "movq  48(%0), %%mm3  \n\t"
110
        "movq  16(%0), %%mm1  \n\t"
111

    
112
        "movq   %%mm7, %%mm4  \n\t"
113
        "movq   %%mm3, %%mm6  \n\t"
114
        "movq   %%mm5, %%mm0  \n\t"
115
        "movq   %%mm7, %%mm2  \n\t"
116
        "psraw  $1,    %%mm4  \n\t"
117
        "psraw  $1,    %%mm6  \n\t"
118
        "psubw  %%mm7, %%mm0  \n\t"
119
        "psubw  %%mm6, %%mm2  \n\t"
120
        "psubw  %%mm4, %%mm0  \n\t"
121
        "psubw  %%mm3, %%mm2  \n\t"
122
        "psubw  %%mm3, %%mm0  \n\t"
123
        "paddw  %%mm1, %%mm2  \n\t"
124

    
125
        "movq   %%mm5, %%mm4  \n\t"
126
        "movq   %%mm1, %%mm6  \n\t"
127
        "psraw  $1,    %%mm4  \n\t"
128
        "psraw  $1,    %%mm6  \n\t"
129
        "paddw  %%mm5, %%mm4  \n\t"
130
        "paddw  %%mm1, %%mm6  \n\t"
131
        "paddw  %%mm7, %%mm4  \n\t"
132
        "paddw  %%mm5, %%mm6  \n\t"
133
        "psubw  %%mm1, %%mm4  \n\t"
134
        "paddw  %%mm3, %%mm6  \n\t"
135

    
136
        "movq   %%mm0, %%mm1  \n\t"
137
        "movq   %%mm4, %%mm3  \n\t"
138
        "movq   %%mm2, %%mm5  \n\t"
139
        "movq   %%mm6, %%mm7  \n\t"
140
        "psraw  $2,    %%mm6  \n\t"
141
        "psraw  $2,    %%mm3  \n\t"
142
        "psraw  $2,    %%mm5  \n\t"
143
        "psraw  $2,    %%mm0  \n\t"
144
        "paddw  %%mm6, %%mm1  \n\t"
145
        "paddw  %%mm2, %%mm3  \n\t"
146
        "psubw  %%mm4, %%mm5  \n\t"
147
        "psubw  %%mm0, %%mm7  \n\t"
148

    
149
        "movq  32(%0), %%mm2  \n\t"
150
        "movq  96(%0), %%mm6  \n\t"
151
        "movq   %%mm2, %%mm4  \n\t"
152
        "movq   %%mm6, %%mm0  \n\t"
153
        "psraw  $1,    %%mm4  \n\t"
154
        "psraw  $1,    %%mm6  \n\t"
155
        "psubw  %%mm0, %%mm4  \n\t"
156
        "paddw  %%mm2, %%mm6  \n\t"
157

    
158
        "movq    (%0), %%mm2  \n\t"
159
        "movq  64(%0), %%mm0  \n\t"
160
        SUMSUB_BA( %%mm0, %%mm2 )
161
        SUMSUB_BA( %%mm6, %%mm0 )
162
        SUMSUB_BA( %%mm4, %%mm2 )
163
        SUMSUB_BA( %%mm7, %%mm6 )
164
        SUMSUB_BA( %%mm5, %%mm4 )
165
        SUMSUB_BA( %%mm3, %%mm2 )
166
        SUMSUB_BA( %%mm1, %%mm0 )
167
        :: "r"(block)
168
    );
169
}
170

    
171
static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
172
{
173
    int i;
174
    int16_t __attribute__ ((aligned(8))) b2[64];
175

    
176
    block[0] += 32;
177

    
178
    for(i=0; i<2; i++){
179
        DECLARE_ALIGNED_8(uint64_t, tmp);
180

    
181
        h264_idct8_1d(block+4*i);
182

    
183
        asm volatile(
184
            "movq   %%mm7,    %0   \n\t"
185
            TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
186
            "movq   %%mm0,  8(%1)  \n\t"
187
            "movq   %%mm6, 24(%1)  \n\t"
188
            "movq   %%mm7, 40(%1)  \n\t"
189
            "movq   %%mm4, 56(%1)  \n\t"
190
            "movq    %0,    %%mm7  \n\t"
191
            TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
192
            "movq   %%mm7,   (%1)  \n\t"
193
            "movq   %%mm1, 16(%1)  \n\t"
194
            "movq   %%mm0, 32(%1)  \n\t"
195
            "movq   %%mm3, 48(%1)  \n\t"
196
            : "=m"(tmp)
197
            : "r"(b2+32*i)
198
            : "memory"
199
        );
200
    }
201

    
202
    for(i=0; i<2; i++){
203
        h264_idct8_1d(b2+4*i);
204

    
205
        asm volatile(
206
            "psraw     $6, %%mm7  \n\t"
207
            "psraw     $6, %%mm6  \n\t"
208
            "psraw     $6, %%mm5  \n\t"
209
            "psraw     $6, %%mm4  \n\t"
210
            "psraw     $6, %%mm3  \n\t"
211
            "psraw     $6, %%mm2  \n\t"
212
            "psraw     $6, %%mm1  \n\t"
213
            "psraw     $6, %%mm0  \n\t"
214

    
215
            "movq   %%mm7,    (%0)  \n\t"
216
            "movq   %%mm5,  16(%0)  \n\t"
217
            "movq   %%mm3,  32(%0)  \n\t"
218
            "movq   %%mm1,  48(%0)  \n\t"
219
            "movq   %%mm0,  64(%0)  \n\t"
220
            "movq   %%mm2,  80(%0)  \n\t"
221
            "movq   %%mm4,  96(%0)  \n\t"
222
            "movq   %%mm6, 112(%0)  \n\t"
223
            :: "r"(b2+4*i)
224
            : "memory"
225
        );
226
    }
227

    
228
    add_pixels_clamped_mmx(b2, dst, stride);
229
}
230

    
231
static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
232
{
233
    int dc = (block[0] + 32) >> 6;
234
    asm volatile(
235
        "movd          %0, %%mm0 \n\t"
236
        "pshufw $0, %%mm0, %%mm0 \n\t"
237
        "pxor       %%mm1, %%mm1 \n\t"
238
        "psubw      %%mm0, %%mm1 \n\t"
239
        "packuswb   %%mm0, %%mm0 \n\t"
240
        "packuswb   %%mm1, %%mm1 \n\t"
241
        ::"r"(dc)
242
    );
243
    asm volatile(
244
        "movd          %0, %%mm2 \n\t"
245
        "movd          %1, %%mm3 \n\t"
246
        "movd          %2, %%mm4 \n\t"
247
        "movd          %3, %%mm5 \n\t"
248
        "paddusb    %%mm0, %%mm2 \n\t"
249
        "paddusb    %%mm0, %%mm3 \n\t"
250
        "paddusb    %%mm0, %%mm4 \n\t"
251
        "paddusb    %%mm0, %%mm5 \n\t"
252
        "psubusb    %%mm1, %%mm2 \n\t"
253
        "psubusb    %%mm1, %%mm3 \n\t"
254
        "psubusb    %%mm1, %%mm4 \n\t"
255
        "psubusb    %%mm1, %%mm5 \n\t"
256
        "movd       %%mm2, %0    \n\t"
257
        "movd       %%mm3, %1    \n\t"
258
        "movd       %%mm4, %2    \n\t"
259
        "movd       %%mm5, %3    \n\t"
260
        :"+m"(*(uint32_t*)(dst+0*stride)),
261
         "+m"(*(uint32_t*)(dst+1*stride)),
262
         "+m"(*(uint32_t*)(dst+2*stride)),
263
         "+m"(*(uint32_t*)(dst+3*stride))
264
    );
265
}
266

    
267
static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
268
{
269
    int dc = (block[0] + 32) >> 6;
270
    int y;
271
    asm volatile(
272
        "movd          %0, %%mm0 \n\t"
273
        "pshufw $0, %%mm0, %%mm0 \n\t"
274
        "pxor       %%mm1, %%mm1 \n\t"
275
        "psubw      %%mm0, %%mm1 \n\t"
276
        "packuswb   %%mm0, %%mm0 \n\t"
277
        "packuswb   %%mm1, %%mm1 \n\t"
278
        ::"r"(dc)
279
    );
280
    for(y=2; y--; dst += 4*stride){
281
    asm volatile(
282
        "movq          %0, %%mm2 \n\t"
283
        "movq          %1, %%mm3 \n\t"
284
        "movq          %2, %%mm4 \n\t"
285
        "movq          %3, %%mm5 \n\t"
286
        "paddusb    %%mm0, %%mm2 \n\t"
287
        "paddusb    %%mm0, %%mm3 \n\t"
288
        "paddusb    %%mm0, %%mm4 \n\t"
289
        "paddusb    %%mm0, %%mm5 \n\t"
290
        "psubusb    %%mm1, %%mm2 \n\t"
291
        "psubusb    %%mm1, %%mm3 \n\t"
292
        "psubusb    %%mm1, %%mm4 \n\t"
293
        "psubusb    %%mm1, %%mm5 \n\t"
294
        "movq       %%mm2, %0    \n\t"
295
        "movq       %%mm3, %1    \n\t"
296
        "movq       %%mm4, %2    \n\t"
297
        "movq       %%mm5, %3    \n\t"
298
        :"+m"(*(uint64_t*)(dst+0*stride)),
299
         "+m"(*(uint64_t*)(dst+1*stride)),
300
         "+m"(*(uint64_t*)(dst+2*stride)),
301
         "+m"(*(uint64_t*)(dst+3*stride))
302
    );
303
    }
304
}
305

    
306

    
307
/***********************************/
308
/* deblocking */
309

    
310
// out: o = |x-y|>a
311
// clobbers: t
312
#define DIFF_GT_MMX(x,y,a,o,t)\
313
    "movq     "#y", "#t"  \n\t"\
314
    "movq     "#x", "#o"  \n\t"\
315
    "psubusb  "#x", "#t"  \n\t"\
316
    "psubusb  "#y", "#o"  \n\t"\
317
    "por      "#t", "#o"  \n\t"\
318
    "psubusb  "#a", "#o"  \n\t"
319

    
320
// out: o = |x-y|>a
321
// clobbers: t
322
#define DIFF_GT2_MMX(x,y,a,o,t)\
323
    "movq     "#y", "#t"  \n\t"\
324
    "movq     "#x", "#o"  \n\t"\
325
    "psubusb  "#x", "#t"  \n\t"\
326
    "psubusb  "#y", "#o"  \n\t"\
327
    "psubusb  "#a", "#t"  \n\t"\
328
    "psubusb  "#a", "#o"  \n\t"\
329
    "pcmpeqb  "#t", "#o"  \n\t"\
330

    
331
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
332
// out: mm5=beta-1, mm7=mask
333
// clobbers: mm4,mm6
334
#define H264_DEBLOCK_MASK(alpha1, beta1) \
335
    "pshufw $0, "#alpha1", %%mm4 \n\t"\
336
    "pshufw $0, "#beta1 ", %%mm5 \n\t"\
337
    "packuswb  %%mm4, %%mm4      \n\t"\
338
    "packuswb  %%mm5, %%mm5      \n\t"\
339
    DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
340
    DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
341
    "por       %%mm4, %%mm7      \n\t"\
342
    DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
343
    "por       %%mm4, %%mm7      \n\t"\
344
    "pxor      %%mm6, %%mm6      \n\t"\
345
    "pcmpeqb   %%mm6, %%mm7      \n\t"
346

    
347
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
348
// out: mm1=p0' mm2=q0'
349
// clobbers: mm0,3-6
350
#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
351
        "movq    %%mm1              , %%mm5 \n\t"\
352
        "pxor    %%mm2              , %%mm5 \n\t" /* p0^q0*/\
353
        "pand    "#pb_01"           , %%mm5 \n\t" /* (p0^q0)&1*/\
354
        "pcmpeqb %%mm4              , %%mm4 \n\t"\
355
        "pxor    %%mm4              , %%mm3 \n\t"\
356
        "pavgb   %%mm0              , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
357
        "pavgb   "MANGLE(ff_pb_3)"  , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
358
        "pxor    %%mm1              , %%mm4 \n\t"\
359
        "pavgb   %%mm2              , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
360
        "pavgb   %%mm5              , %%mm3 \n\t"\
361
        "paddusb %%mm4              , %%mm3 \n\t" /* d+128+33*/\
362
        "movq    "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
363
        "psubusb %%mm3              , %%mm6 \n\t"\
364
        "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
365
        "pminub  %%mm7              , %%mm6 \n\t"\
366
        "pminub  %%mm7              , %%mm3 \n\t"\
367
        "psubusb %%mm6              , %%mm1 \n\t"\
368
        "psubusb %%mm3              , %%mm2 \n\t"\
369
        "paddusb %%mm3              , %%mm1 \n\t"\
370
        "paddusb %%mm6              , %%mm2 \n\t"
371

    
372
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
373
// out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
374
// clobbers: q2, tmp, tc0
375
#define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
376
        "movq     %%mm1,  "#tmp"   \n\t"\
377
        "pavgb    %%mm2,  "#tmp"   \n\t"\
378
        "pavgb    "#tmp", "#q2"    \n\t" /* avg(p2,avg(p0,q0)) */\
379
        "pxor   "q2addr", "#tmp"   \n\t"\
380
        "pand     %8,     "#tmp"   \n\t" /* (p2^avg(p0,q0))&1 */\
381
        "psubusb  "#tmp", "#q2"    \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
382
        "movq     "#p1",  "#tmp"   \n\t"\
383
        "psubusb  "#tc0", "#tmp"   \n\t"\
384
        "paddusb  "#p1",  "#tc0"   \n\t"\
385
        "pmaxub   "#tmp", "#q2"    \n\t"\
386
        "pminub   "#tc0", "#q2"    \n\t"\
387
        "movq     "#q2",  "q1addr" \n\t"
388

    
389
static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
390
{
391
    DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
392

    
393
    asm volatile(
394
        "movq    (%1,%3), %%mm0    \n\t" //p1
395
        "movq    (%1,%3,2), %%mm1  \n\t" //p0
396
        "movq    (%2),    %%mm2    \n\t" //q0
397
        "movq    (%2,%3), %%mm3    \n\t" //q1
398
        H264_DEBLOCK_MASK(%6, %7)
399

    
400
        "movd      %5,    %%mm4    \n\t"
401
        "punpcklbw %%mm4, %%mm4    \n\t"
402
        "punpcklwd %%mm4, %%mm4    \n\t"
403
        "pcmpeqb   %%mm3, %%mm3    \n\t"
404
        "movq      %%mm4, %%mm6    \n\t"
405
        "pcmpgtb   %%mm3, %%mm4    \n\t"
406
        "movq      %%mm6, 8+%0     \n\t"
407
        "pand      %%mm4, %%mm7    \n\t"
408
        "movq      %%mm7, %0       \n\t"
409

    
410
        /* filter p1 */
411
        "movq     (%1),   %%mm3    \n\t" //p2
412
        DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
413
        "pand     %%mm7,  %%mm6    \n\t" // mask & |p2-p0|<beta
414
        "pand     8+%0,   %%mm7    \n\t" // mask & tc0
415
        "movq     %%mm7,  %%mm4    \n\t"
416
        "psubb    %%mm6,  %%mm7    \n\t"
417
        "pand     %%mm4,  %%mm6    \n\t" // mask & |p2-p0|<beta & tc0
418
        H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
419

    
420
        /* filter q1 */
421
        "movq    (%2,%3,2), %%mm4  \n\t" //q2
422
        DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
423
        "pand     %0,     %%mm6    \n\t"
424
        "movq     8+%0,   %%mm5    \n\t" // can be merged with the and below but is slower then
425
        "pand     %%mm6,  %%mm5    \n\t"
426
        "psubb    %%mm6,  %%mm7    \n\t"
427
        "movq    (%2,%3), %%mm3    \n\t"
428
        H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
429

    
430
        /* filter p0, q0 */
431
        H264_DEBLOCK_P0_Q0(%8, unused)
432
        "movq      %%mm1, (%1,%3,2) \n\t"
433
        "movq      %%mm2, (%2)      \n\t"
434

    
435
        : "=m"(*tmp0)
436
        : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
437
          "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
438
          "m"(mm_bone)
439
    );
440
}
441

    
442
static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
443
{
444
    if((tc0[0] & tc0[1]) >= 0)
445
        h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
446
    if((tc0[2] & tc0[3]) >= 0)
447
        h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
448
}
449
static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
450
{
451
    //FIXME: could cut some load/stores by merging transpose with filter
452
    // also, it only needs to transpose 6x8
453
    DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
454
    int i;
455
    for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
456
        if((tc0[0] & tc0[1]) < 0)
457
            continue;
458
        transpose4x4(trans,       pix-4,          8, stride);
459
        transpose4x4(trans  +4*8, pix,            8, stride);
460
        transpose4x4(trans+4,     pix-4+4*stride, 8, stride);
461
        transpose4x4(trans+4+4*8, pix  +4*stride, 8, stride);
462
        h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
463
        transpose4x4(pix-2,          trans  +2*8, stride, 8);
464
        transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
465
    }
466
}
467

    
468
static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
469
{
470
    asm volatile(
471
        "movq    (%0),    %%mm0     \n\t" //p1
472
        "movq    (%0,%2), %%mm1     \n\t" //p0
473
        "movq    (%1),    %%mm2     \n\t" //q0
474
        "movq    (%1,%2), %%mm3     \n\t" //q1
475
        H264_DEBLOCK_MASK(%4, %5)
476
        "movd      %3,    %%mm6     \n\t"
477
        "punpcklbw %%mm6, %%mm6     \n\t"
478
        "pand      %%mm6, %%mm7     \n\t" // mm7 = tc&mask
479
        H264_DEBLOCK_P0_Q0(%6, %7)
480
        "movq      %%mm1, (%0,%2)   \n\t"
481
        "movq      %%mm2, (%1)      \n\t"
482

    
483
        :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
484
           "r"(*(uint32_t*)tc0),
485
           "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
486
    );
487
}
488

    
489
static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
490
{
491
    h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
492
}
493

    
494
static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
495
{
496
    //FIXME: could cut some load/stores by merging transpose with filter
497
    DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
498
    transpose4x4(trans, pix-2, 8, stride);
499
    transpose4x4(trans+4, pix-2+4*stride, 8, stride);
500
    h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
501
    transpose4x4(pix-2, trans, stride, 8);
502
    transpose4x4(pix-2+4*stride, trans+4, stride, 8);
503
}
504

    
505
// p0 = (p0 + q1 + 2*p1 + 2) >> 2
506
#define H264_FILTER_CHROMA4(p0, p1, q1, one) \
507
    "movq    "#p0", %%mm4  \n\t"\
508
    "pxor    "#q1", %%mm4  \n\t"\
509
    "pand   "#one", %%mm4  \n\t" /* mm4 = (p0^q1)&1 */\
510
    "pavgb   "#q1", "#p0"  \n\t"\
511
    "psubusb %%mm4, "#p0"  \n\t"\
512
    "pavgb   "#p1", "#p0"  \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
513

    
514
static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
515
{
516
    asm volatile(
517
        "movq    (%0),    %%mm0     \n\t"
518
        "movq    (%0,%2), %%mm1     \n\t"
519
        "movq    (%1),    %%mm2     \n\t"
520
        "movq    (%1,%2), %%mm3     \n\t"
521
        H264_DEBLOCK_MASK(%3, %4)
522
        "movq    %%mm1,   %%mm5     \n\t"
523
        "movq    %%mm2,   %%mm6     \n\t"
524
        H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
525
        H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
526
        "psubb   %%mm5,   %%mm1     \n\t"
527
        "psubb   %%mm6,   %%mm2     \n\t"
528
        "pand    %%mm7,   %%mm1     \n\t"
529
        "pand    %%mm7,   %%mm2     \n\t"
530
        "paddb   %%mm5,   %%mm1     \n\t"
531
        "paddb   %%mm6,   %%mm2     \n\t"
532
        "movq    %%mm1,   (%0,%2)   \n\t"
533
        "movq    %%mm2,   (%1)      \n\t"
534
        :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
535
           "m"(alpha1), "m"(beta1), "m"(mm_bone)
536
    );
537
}
538

    
539
static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
540
{
541
    h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
542
}
543

    
544
static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
545
{
546
    //FIXME: could cut some load/stores by merging transpose with filter
547
    DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
548
    transpose4x4(trans, pix-2, 8, stride);
549
    transpose4x4(trans+4, pix-2+4*stride, 8, stride);
550
    h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
551
    transpose4x4(pix-2, trans, stride, 8);
552
    transpose4x4(pix-2+4*stride, trans+4, stride, 8);
553
}
554

    
555
static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
556
                                            int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) {
557
    int dir;
558
    asm volatile(
559
        "pxor %%mm7, %%mm7 \n\t"
560
        "movq %0, %%mm6 \n\t"
561
        "movq %1, %%mm5 \n\t"
562
        "movq %2, %%mm4 \n\t"
563
        ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
564
    );
565
    // could do a special case for dir==0 && edges==1, but it only reduces the
566
    // average filter time by 1.2%
567
    for( dir=1; dir>=0; dir-- ) {
568
        const int d_idx = dir ? -8 : -1;
569
        const int mask_mv = dir ? mask_mv1 : mask_mv0;
570
        DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
571
        int b_idx, edge, l;
572
        for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
573
            asm volatile(
574
                "pand %0, %%mm0 \n\t"
575
                ::"m"(mask_dir)
576
            );
577
            if(!(mask_mv & edge)) {
578
                asm volatile("pxor %%mm0, %%mm0 \n\t":);
579
                for( l = bidir; l >= 0; l-- ) {
580
                    asm volatile(
581
                        "movd %0, %%mm1 \n\t"
582
                        "punpckldq %1, %%mm1 \n\t"
583
                        "movq %%mm1, %%mm2 \n\t"
584
                        "psrlw $7, %%mm2 \n\t"
585
                        "pand %%mm6, %%mm2 \n\t"
586
                        "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
587
                        "punpckldq %%mm1, %%mm2 \n\t"
588
                        "pcmpeqb %%mm2, %%mm1 \n\t"
589
                        "paddb %%mm6, %%mm1 \n\t"
590
                        "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
591
                        "por %%mm1, %%mm0 \n\t"
592

    
593
                        "movq %2, %%mm1 \n\t"
594
                        "movq %3, %%mm2 \n\t"
595
                        "psubw %4, %%mm1 \n\t"
596
                        "psubw %5, %%mm2 \n\t"
597
                        "packsswb %%mm2, %%mm1 \n\t"
598
                        "paddb %%mm5, %%mm1 \n\t"
599
                        "pminub %%mm4, %%mm1 \n\t"
600
                        "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
601
                        "por %%mm1, %%mm0 \n\t"
602
                        ::"m"(ref[l][b_idx]),
603
                          "m"(ref[l][b_idx+d_idx]),
604
                          "m"(mv[l][b_idx][0]),
605
                          "m"(mv[l][b_idx+2][0]),
606
                          "m"(mv[l][b_idx+d_idx][0]),
607
                          "m"(mv[l][b_idx+d_idx+2][0])
608
                    );
609
                }
610
            }
611
            asm volatile(
612
                "movd %0, %%mm1 \n\t"
613
                "por  %1, %%mm1 \n\t"
614
                "punpcklbw %%mm7, %%mm1 \n\t"
615
                "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
616
                ::"m"(nnz[b_idx]),
617
                  "m"(nnz[b_idx+d_idx])
618
            );
619
            asm volatile(
620
                "pcmpeqw %%mm7, %%mm0 \n\t"
621
                "pcmpeqw %%mm7, %%mm0 \n\t"
622
                "psrlw $15, %%mm0 \n\t" // nonzero -> 1
623
                "psrlw $14, %%mm1 \n\t"
624
                "movq %%mm0, %%mm2 \n\t"
625
                "por %%mm1, %%mm2 \n\t"
626
                "psrlw $1, %%mm1 \n\t"
627
                "pandn %%mm2, %%mm1 \n\t"
628
                "movq %%mm1, %0 \n\t"
629
                :"=m"(*bS[dir][edge])
630
                ::"memory"
631
            );
632
        }
633
        edges = 4;
634
        step = 1;
635
    }
636
    asm volatile(
637
        "movq   (%0), %%mm0 \n\t"
638
        "movq  8(%0), %%mm1 \n\t"
639
        "movq 16(%0), %%mm2 \n\t"
640
        "movq 24(%0), %%mm3 \n\t"
641
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
642
        "movq %%mm0,   (%0) \n\t"
643
        "movq %%mm3,  8(%0) \n\t"
644
        "movq %%mm4, 16(%0) \n\t"
645
        "movq %%mm2, 24(%0) \n\t"
646
        ::"r"(bS[0])
647
        :"memory"
648
    );
649
}
650

    
651
/***********************************/
652
/* motion compensation */
653

    
654
#define QPEL_H264V(A,B,C,D,E,F,OP)\
655
        "movd (%0), "#F"            \n\t"\
656
        "movq "#C", %%mm6           \n\t"\
657
        "paddw "#D", %%mm6          \n\t"\
658
        "psllw $2, %%mm6            \n\t"\
659
        "psubw "#B", %%mm6          \n\t"\
660
        "psubw "#E", %%mm6          \n\t"\
661
        "pmullw %4, %%mm6           \n\t"\
662
        "add %2, %0                 \n\t"\
663
        "punpcklbw %%mm7, "#F"      \n\t"\
664
        "paddw %5, "#A"             \n\t"\
665
        "paddw "#F", "#A"           \n\t"\
666
        "paddw "#A", %%mm6          \n\t"\
667
        "psraw $5, %%mm6            \n\t"\
668
        "packuswb %%mm6, %%mm6      \n\t"\
669
        OP(%%mm6, (%1), A, d)\
670
        "add %3, %1                 \n\t"
671

    
672
#define QPEL_H264HV(A,B,C,D,E,F,OF)\
673
        "movd (%0), "#F"            \n\t"\
674
        "movq "#C", %%mm6           \n\t"\
675
        "paddw "#D", %%mm6          \n\t"\
676
        "psllw $2, %%mm6            \n\t"\
677
        "psubw "#B", %%mm6          \n\t"\
678
        "psubw "#E", %%mm6          \n\t"\
679
        "pmullw %3, %%mm6           \n\t"\
680
        "add %2, %0                 \n\t"\
681
        "punpcklbw %%mm7, "#F"      \n\t"\
682
        "paddw "#F", "#A"           \n\t"\
683
        "paddw "#A", %%mm6          \n\t"\
684
        "movq %%mm6, "#OF"(%1)      \n\t"
685

    
686
#define QPEL_H264(OPNAME, OP, MMX)\
687
static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
688
    int h=4;\
689
\
690
    asm volatile(\
691
        "pxor %%mm7, %%mm7          \n\t"\
692
        "movq %5, %%mm4             \n\t"\
693
        "movq %6, %%mm5             \n\t"\
694
        "1:                         \n\t"\
695
        "movd  -1(%0), %%mm1        \n\t"\
696
        "movd    (%0), %%mm2        \n\t"\
697
        "movd   1(%0), %%mm3        \n\t"\
698
        "movd   2(%0), %%mm0        \n\t"\
699
        "punpcklbw %%mm7, %%mm1     \n\t"\
700
        "punpcklbw %%mm7, %%mm2     \n\t"\
701
        "punpcklbw %%mm7, %%mm3     \n\t"\
702
        "punpcklbw %%mm7, %%mm0     \n\t"\
703
        "paddw %%mm0, %%mm1         \n\t"\
704
        "paddw %%mm3, %%mm2         \n\t"\
705
        "movd  -2(%0), %%mm0        \n\t"\
706
        "movd   3(%0), %%mm3        \n\t"\
707
        "punpcklbw %%mm7, %%mm0     \n\t"\
708
        "punpcklbw %%mm7, %%mm3     \n\t"\
709
        "paddw %%mm3, %%mm0         \n\t"\
710
        "psllw $2, %%mm2            \n\t"\
711
        "psubw %%mm1, %%mm2         \n\t"\
712
        "pmullw %%mm4, %%mm2        \n\t"\
713
        "paddw %%mm5, %%mm0         \n\t"\
714
        "paddw %%mm2, %%mm0         \n\t"\
715
        "psraw $5, %%mm0            \n\t"\
716
        "packuswb %%mm0, %%mm0      \n\t"\
717
        OP(%%mm0, (%1),%%mm6, d)\
718
        "add %3, %0                 \n\t"\
719
        "add %4, %1                 \n\t"\
720
        "decl %2                    \n\t"\
721
        " jnz 1b                    \n\t"\
722
        : "+a"(src), "+c"(dst), "+m"(h)\
723
        : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
724
        : "memory"\
725
    );\
726
}\
727
static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
728
    int h=4;\
729
    asm volatile(\
730
        "pxor %%mm7, %%mm7          \n\t"\
731
        "movq %0, %%mm4             \n\t"\
732
        "movq %1, %%mm5             \n\t"\
733
        :: "m"(ff_pw_5), "m"(ff_pw_16)\
734
    );\
735
    do{\
736
    asm volatile(\
737
        "movd  -1(%0), %%mm1        \n\t"\
738
        "movd    (%0), %%mm2        \n\t"\
739
        "movd   1(%0), %%mm3        \n\t"\
740
        "movd   2(%0), %%mm0        \n\t"\
741
        "punpcklbw %%mm7, %%mm1     \n\t"\
742
        "punpcklbw %%mm7, %%mm2     \n\t"\
743
        "punpcklbw %%mm7, %%mm3     \n\t"\
744
        "punpcklbw %%mm7, %%mm0     \n\t"\
745
        "paddw %%mm0, %%mm1         \n\t"\
746
        "paddw %%mm3, %%mm2         \n\t"\
747
        "movd  -2(%0), %%mm0        \n\t"\
748
        "movd   3(%0), %%mm3        \n\t"\
749
        "punpcklbw %%mm7, %%mm0     \n\t"\
750
        "punpcklbw %%mm7, %%mm3     \n\t"\
751
        "paddw %%mm3, %%mm0         \n\t"\
752
        "psllw $2, %%mm2            \n\t"\
753
        "psubw %%mm1, %%mm2         \n\t"\
754
        "pmullw %%mm4, %%mm2        \n\t"\
755
        "paddw %%mm5, %%mm0         \n\t"\
756
        "paddw %%mm2, %%mm0         \n\t"\
757
        "movd   (%2), %%mm3         \n\t"\
758
        "psraw $5, %%mm0            \n\t"\
759
        "packuswb %%mm0, %%mm0      \n\t"\
760
        PAVGB" %%mm3, %%mm0         \n\t"\
761
        OP(%%mm0, (%1),%%mm6, d)\
762
        "add %4, %0                 \n\t"\
763
        "add %4, %1                 \n\t"\
764
        "add %3, %2                 \n\t"\
765
        : "+a"(src), "+c"(dst), "+d"(src2)\
766
        : "D"((long)src2Stride), "S"((long)dstStride)\
767
        : "memory"\
768
    );\
769
    }while(--h);\
770
}\
771
static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
772
    src -= 2*srcStride;\
773
    asm volatile(\
774
        "pxor %%mm7, %%mm7          \n\t"\
775
        "movd (%0), %%mm0           \n\t"\
776
        "add %2, %0                 \n\t"\
777
        "movd (%0), %%mm1           \n\t"\
778
        "add %2, %0                 \n\t"\
779
        "movd (%0), %%mm2           \n\t"\
780
        "add %2, %0                 \n\t"\
781
        "movd (%0), %%mm3           \n\t"\
782
        "add %2, %0                 \n\t"\
783
        "movd (%0), %%mm4           \n\t"\
784
        "add %2, %0                 \n\t"\
785
        "punpcklbw %%mm7, %%mm0     \n\t"\
786
        "punpcklbw %%mm7, %%mm1     \n\t"\
787
        "punpcklbw %%mm7, %%mm2     \n\t"\
788
        "punpcklbw %%mm7, %%mm3     \n\t"\
789
        "punpcklbw %%mm7, %%mm4     \n\t"\
790
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
791
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
792
        QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
793
        QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
794
         \
795
        : "+a"(src), "+c"(dst)\
796
        : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
797
        : "memory"\
798
    );\
799
}\
800
static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
801
    int h=4;\
802
    int w=3;\
803
    src -= 2*srcStride+2;\
804
    while(w--){\
805
        asm volatile(\
806
            "pxor %%mm7, %%mm7      \n\t"\
807
            "movd (%0), %%mm0       \n\t"\
808
            "add %2, %0             \n\t"\
809
            "movd (%0), %%mm1       \n\t"\
810
            "add %2, %0             \n\t"\
811
            "movd (%0), %%mm2       \n\t"\
812
            "add %2, %0             \n\t"\
813
            "movd (%0), %%mm3       \n\t"\
814
            "add %2, %0             \n\t"\
815
            "movd (%0), %%mm4       \n\t"\
816
            "add %2, %0             \n\t"\
817
            "punpcklbw %%mm7, %%mm0 \n\t"\
818
            "punpcklbw %%mm7, %%mm1 \n\t"\
819
            "punpcklbw %%mm7, %%mm2 \n\t"\
820
            "punpcklbw %%mm7, %%mm3 \n\t"\
821
            "punpcklbw %%mm7, %%mm4 \n\t"\
822
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
823
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
824
            QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
825
            QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
826
             \
827
            : "+a"(src)\
828
            : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
829
            : "memory"\
830
        );\
831
        tmp += 4;\
832
        src += 4 - 9*srcStride;\
833
    }\
834
    tmp -= 3*4;\
835
    asm volatile(\
836
        "movq %4, %%mm6             \n\t"\
837
        "1:                         \n\t"\
838
        "movq     (%0), %%mm0       \n\t"\
839
        "paddw  10(%0), %%mm0       \n\t"\
840
        "movq    2(%0), %%mm1       \n\t"\
841
        "paddw   8(%0), %%mm1       \n\t"\
842
        "movq    4(%0), %%mm2       \n\t"\
843
        "paddw   6(%0), %%mm2       \n\t"\
844
        "psubw %%mm1, %%mm0         \n\t"/*a-b   (abccba)*/\
845
        "psraw $2, %%mm0            \n\t"/*(a-b)/4 */\
846
        "psubw %%mm1, %%mm0         \n\t"/*(a-b)/4-b */\
847
        "paddsw %%mm2, %%mm0        \n\t"\
848
        "psraw $2, %%mm0            \n\t"/*((a-b)/4-b+c)/4 */\
849
        "paddw %%mm6, %%mm2         \n\t"\
850
        "paddw %%mm2, %%mm0         \n\t"/*(a-5*b+20*c)/16 +32 */\
851
        "psraw $6, %%mm0            \n\t"\
852
        "packuswb %%mm0, %%mm0      \n\t"\
853
        OP(%%mm0, (%1),%%mm7, d)\
854
        "add $24, %0                \n\t"\
855
        "add %3, %1                 \n\t"\
856
        "decl %2                    \n\t"\
857
        " jnz 1b                    \n\t"\
858
        : "+a"(tmp), "+c"(dst), "+m"(h)\
859
        : "S"((long)dstStride), "m"(ff_pw_32)\
860
        : "memory"\
861
    );\
862
}\
863
\
864
static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
865
    int h=8;\
866
    asm volatile(\
867
        "pxor %%mm7, %%mm7          \n\t"\
868
        "movq %5, %%mm6             \n\t"\
869
        "1:                         \n\t"\
870
        "movq    (%0), %%mm0        \n\t"\
871
        "movq   1(%0), %%mm2        \n\t"\
872
        "movq %%mm0, %%mm1          \n\t"\
873
        "movq %%mm2, %%mm3          \n\t"\
874
        "punpcklbw %%mm7, %%mm0     \n\t"\
875
        "punpckhbw %%mm7, %%mm1     \n\t"\
876
        "punpcklbw %%mm7, %%mm2     \n\t"\
877
        "punpckhbw %%mm7, %%mm3     \n\t"\
878
        "paddw %%mm2, %%mm0         \n\t"\
879
        "paddw %%mm3, %%mm1         \n\t"\
880
        "psllw $2, %%mm0            \n\t"\
881
        "psllw $2, %%mm1            \n\t"\
882
        "movq   -1(%0), %%mm2       \n\t"\
883
        "movq    2(%0), %%mm4       \n\t"\
884
        "movq %%mm2, %%mm3          \n\t"\
885
        "movq %%mm4, %%mm5          \n\t"\
886
        "punpcklbw %%mm7, %%mm2     \n\t"\
887
        "punpckhbw %%mm7, %%mm3     \n\t"\
888
        "punpcklbw %%mm7, %%mm4     \n\t"\
889
        "punpckhbw %%mm7, %%mm5     \n\t"\
890
        "paddw %%mm4, %%mm2         \n\t"\
891
        "paddw %%mm3, %%mm5         \n\t"\
892
        "psubw %%mm2, %%mm0         \n\t"\
893
        "psubw %%mm5, %%mm1         \n\t"\
894
        "pmullw %%mm6, %%mm0        \n\t"\
895
        "pmullw %%mm6, %%mm1        \n\t"\
896
        "movd   -2(%0), %%mm2       \n\t"\
897
        "movd    7(%0), %%mm5       \n\t"\
898
        "punpcklbw %%mm7, %%mm2     \n\t"\
899
        "punpcklbw %%mm7, %%mm5     \n\t"\
900
        "paddw %%mm3, %%mm2         \n\t"\
901
        "paddw %%mm5, %%mm4         \n\t"\
902
        "movq %6, %%mm5             \n\t"\
903
        "paddw %%mm5, %%mm2         \n\t"\
904
        "paddw %%mm5, %%mm4         \n\t"\
905
        "paddw %%mm2, %%mm0         \n\t"\
906
        "paddw %%mm4, %%mm1         \n\t"\
907
        "psraw $5, %%mm0            \n\t"\
908
        "psraw $5, %%mm1            \n\t"\
909
        "packuswb %%mm1, %%mm0      \n\t"\
910
        OP(%%mm0, (%1),%%mm5, q)\
911
        "add %3, %0                 \n\t"\
912
        "add %4, %1                 \n\t"\
913
        "decl %2                    \n\t"\
914
        " jnz 1b                    \n\t"\
915
        : "+a"(src), "+c"(dst), "+m"(h)\
916
        : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
917
        : "memory"\
918
    );\
919
}\
920
\
921
static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
922
    int h=8;\
923
    asm volatile(\
924
        "pxor %%mm7, %%mm7          \n\t"\
925
        "movq %0, %%mm6             \n\t"\
926
        :: "m"(ff_pw_5)\
927
    );\
928
    do{\
929
    asm volatile(\
930
        "movq    (%0), %%mm0        \n\t"\
931
        "movq   1(%0), %%mm2        \n\t"\
932
        "movq %%mm0, %%mm1          \n\t"\
933
        "movq %%mm2, %%mm3          \n\t"\
934
        "punpcklbw %%mm7, %%mm0     \n\t"\
935
        "punpckhbw %%mm7, %%mm1     \n\t"\
936
        "punpcklbw %%mm7, %%mm2     \n\t"\
937
        "punpckhbw %%mm7, %%mm3     \n\t"\
938
        "paddw %%mm2, %%mm0         \n\t"\
939
        "paddw %%mm3, %%mm1         \n\t"\
940
        "psllw $2, %%mm0            \n\t"\
941
        "psllw $2, %%mm1            \n\t"\
942
        "movq   -1(%0), %%mm2       \n\t"\
943
        "movq    2(%0), %%mm4       \n\t"\
944
        "movq %%mm2, %%mm3          \n\t"\
945
        "movq %%mm4, %%mm5          \n\t"\
946
        "punpcklbw %%mm7, %%mm2     \n\t"\
947
        "punpckhbw %%mm7, %%mm3     \n\t"\
948
        "punpcklbw %%mm7, %%mm4     \n\t"\
949
        "punpckhbw %%mm7, %%mm5     \n\t"\
950
        "paddw %%mm4, %%mm2         \n\t"\
951
        "paddw %%mm3, %%mm5         \n\t"\
952
        "psubw %%mm2, %%mm0         \n\t"\
953
        "psubw %%mm5, %%mm1         \n\t"\
954
        "pmullw %%mm6, %%mm0        \n\t"\
955
        "pmullw %%mm6, %%mm1        \n\t"\
956
        "movd   -2(%0), %%mm2       \n\t"\
957
        "movd    7(%0), %%mm5       \n\t"\
958
        "punpcklbw %%mm7, %%mm2     \n\t"\
959
        "punpcklbw %%mm7, %%mm5     \n\t"\
960
        "paddw %%mm3, %%mm2         \n\t"\
961
        "paddw %%mm5, %%mm4         \n\t"\
962
        "movq %5, %%mm5             \n\t"\
963
        "paddw %%mm5, %%mm2         \n\t"\
964
        "paddw %%mm5, %%mm4         \n\t"\
965
        "paddw %%mm2, %%mm0         \n\t"\
966
        "paddw %%mm4, %%mm1         \n\t"\
967
        "psraw $5, %%mm0            \n\t"\
968
        "psraw $5, %%mm1            \n\t"\
969
        "movq (%2), %%mm4           \n\t"\
970
        "packuswb %%mm1, %%mm0      \n\t"\
971
        PAVGB" %%mm4, %%mm0         \n\t"\
972
        OP(%%mm0, (%1),%%mm5, q)\
973
        "add %4, %0                 \n\t"\
974
        "add %4, %1                 \n\t"\
975
        "add %3, %2                 \n\t"\
976
        : "+a"(src), "+c"(dst), "+d"(src2)\
977
        : "D"((long)src2Stride), "S"((long)dstStride),\
978
          "m"(ff_pw_16)\
979
        : "memory"\
980
    );\
981
    }while(--h);\
982
}\
983
\
984
static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
985
    int w= 2;\
986
    src -= 2*srcStride;\
987
    \
988
    while(w--){\
989
      asm volatile(\
990
        "pxor %%mm7, %%mm7          \n\t"\
991
        "movd (%0), %%mm0           \n\t"\
992
        "add %2, %0                 \n\t"\
993
        "movd (%0), %%mm1           \n\t"\
994
        "add %2, %0                 \n\t"\
995
        "movd (%0), %%mm2           \n\t"\
996
        "add %2, %0                 \n\t"\
997
        "movd (%0), %%mm3           \n\t"\
998
        "add %2, %0                 \n\t"\
999
        "movd (%0), %%mm4           \n\t"\
1000
        "add %2, %0                 \n\t"\
1001
        "punpcklbw %%mm7, %%mm0     \n\t"\
1002
        "punpcklbw %%mm7, %%mm1     \n\t"\
1003
        "punpcklbw %%mm7, %%mm2     \n\t"\
1004
        "punpcklbw %%mm7, %%mm3     \n\t"\
1005
        "punpcklbw %%mm7, %%mm4     \n\t"\
1006
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1007
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1008
        QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1009
        QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1010
        QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1011
        QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1012
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1013
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1014
         \
1015
        : "+a"(src), "+c"(dst)\
1016
        : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1017
        : "memory"\
1018
     );\
1019
     if(h==16){\
1020
        asm volatile(\
1021
            QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1022
            QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1023
            QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1024
            QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1025
            QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1026
            QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1027
            QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1028
            QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1029
            \
1030
           : "+a"(src), "+c"(dst)\
1031
           : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1032
           : "memory"\
1033
        );\
1034
     }\
1035
     src += 4-(h+5)*srcStride;\
1036
     dst += 4-h*dstStride;\
1037
   }\
1038
}\
1039
static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
1040
    int h = size;\
1041
    int w = (size+8)>>2;\
1042
    src -= 2*srcStride+2;\
1043
    while(w--){\
1044
        asm volatile(\
1045
            "pxor %%mm7, %%mm7      \n\t"\
1046
            "movd (%0), %%mm0       \n\t"\
1047
            "add %2, %0             \n\t"\
1048
            "movd (%0), %%mm1       \n\t"\
1049
            "add %2, %0             \n\t"\
1050
            "movd (%0), %%mm2       \n\t"\
1051
            "add %2, %0             \n\t"\
1052
            "movd (%0), %%mm3       \n\t"\
1053
            "add %2, %0             \n\t"\
1054
            "movd (%0), %%mm4       \n\t"\
1055
            "add %2, %0             \n\t"\
1056
            "punpcklbw %%mm7, %%mm0 \n\t"\
1057
            "punpcklbw %%mm7, %%mm1 \n\t"\
1058
            "punpcklbw %%mm7, %%mm2 \n\t"\
1059
            "punpcklbw %%mm7, %%mm3 \n\t"\
1060
            "punpcklbw %%mm7, %%mm4 \n\t"\
1061
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
1062
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
1063
            QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
1064
            QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
1065
            QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
1066
            QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
1067
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
1068
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
1069
            : "+a"(src)\
1070
            : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1071
            : "memory"\
1072
        );\
1073
        if(size==16){\
1074
            asm volatile(\
1075
                QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1,  8*48)\
1076
                QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2,  9*48)\
1077
                QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
1078
                QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
1079
                QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
1080
                QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
1081
                QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
1082
                QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
1083
                : "+a"(src)\
1084
                : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1085
                : "memory"\
1086
            );\
1087
        }\
1088
        tmp += 4;\
1089
        src += 4 - (size+5)*srcStride;\
1090
    }\
1091
    tmp -= size+8;\
1092
    w = size>>4;\
1093
    do{\
1094
    h = size;\
1095
    asm volatile(\
1096
        "movq %4, %%mm6             \n\t"\
1097
        "1:                         \n\t"\
1098
        "movq     (%0), %%mm0       \n\t"\
1099
        "movq    8(%0), %%mm3       \n\t"\
1100
        "movq    2(%0), %%mm1       \n\t"\
1101
        "movq   10(%0), %%mm4       \n\t"\
1102
        "paddw   %%mm4, %%mm0       \n\t"\
1103
        "paddw   %%mm3, %%mm1       \n\t"\
1104
        "paddw  18(%0), %%mm3       \n\t"\
1105
        "paddw  16(%0), %%mm4       \n\t"\
1106
        "movq    4(%0), %%mm2       \n\t"\
1107
        "movq   12(%0), %%mm5       \n\t"\
1108
        "paddw   6(%0), %%mm2       \n\t"\
1109
        "paddw  14(%0), %%mm5       \n\t"\
1110
        "psubw %%mm1, %%mm0         \n\t"\
1111
        "psubw %%mm4, %%mm3         \n\t"\
1112
        "psraw $2, %%mm0            \n\t"\
1113
        "psraw $2, %%mm3            \n\t"\
1114
        "psubw %%mm1, %%mm0         \n\t"\
1115
        "psubw %%mm4, %%mm3         \n\t"\
1116
        "paddsw %%mm2, %%mm0        \n\t"\
1117
        "paddsw %%mm5, %%mm3        \n\t"\
1118
        "psraw $2, %%mm0            \n\t"\
1119
        "psraw $2, %%mm3            \n\t"\
1120
        "paddw %%mm6, %%mm2         \n\t"\
1121
        "paddw %%mm6, %%mm5         \n\t"\
1122
        "paddw %%mm2, %%mm0         \n\t"\
1123
        "paddw %%mm5, %%mm3         \n\t"\
1124
        "psraw $6, %%mm0            \n\t"\
1125
        "psraw $6, %%mm3            \n\t"\
1126
        "packuswb %%mm3, %%mm0      \n\t"\
1127
        OP(%%mm0, (%1),%%mm7, q)\
1128
        "add $48, %0                \n\t"\
1129
        "add %3, %1                 \n\t"\
1130
        "decl %2                    \n\t"\
1131
        " jnz 1b                    \n\t"\
1132
        : "+a"(tmp), "+c"(dst), "+m"(h)\
1133
        : "S"((long)dstStride), "m"(ff_pw_32)\
1134
        : "memory"\
1135
    );\
1136
    tmp += 8 - size*24;\
1137
    dst += 8 - size*dstStride;\
1138
    }while(w--);\
1139
}\
1140
\
1141
static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1142
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
1143
}\
1144
static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1145
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
1146
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
1147
}\
1148
\
1149
static av_noinline void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1150
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst  , src  , dstStride, srcStride);\
1151
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1152
    src += 8*srcStride;\
1153
    dst += 8*dstStride;\
1154
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst  , src  , dstStride, srcStride);\
1155
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1156
}\
1157
\
1158
static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1159
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst  , src  , src2  , dstStride, src2Stride);\
1160
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1161
    src += 8*dstStride;\
1162
    dst += 8*dstStride;\
1163
    src2 += 8*src2Stride;\
1164
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst  , src  , src2  , dstStride, src2Stride);\
1165
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1166
}\
1167
\
1168
static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1169
    OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst  , tmp  , src  , dstStride, tmpStride, srcStride, 8);\
1170
}\
1171
\
1172
static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1173
    OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst  , tmp  , src  , dstStride, tmpStride, srcStride, 16);\
1174
}\
1175
\
1176
static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1177
{\
1178
    asm volatile(\
1179
        "movq       %5,  %%mm6          \n\t"\
1180
        "movq      (%1), %%mm0          \n\t"\
1181
        "movq    24(%1), %%mm1          \n\t"\
1182
        "paddw    %%mm6, %%mm0          \n\t"\
1183
        "paddw    %%mm6, %%mm1          \n\t"\
1184
        "psraw      $5,  %%mm0          \n\t"\
1185
        "psraw      $5,  %%mm1          \n\t"\
1186
        "packuswb %%mm0, %%mm0          \n\t"\
1187
        "packuswb %%mm1, %%mm1          \n\t"\
1188
        PAVGB"     (%0), %%mm0          \n\t"\
1189
        PAVGB"  (%0,%3), %%mm1          \n\t"\
1190
        OP(%%mm0, (%2),    %%mm4, d)\
1191
        OP(%%mm1, (%2,%4), %%mm5, d)\
1192
        "lea  (%0,%3,2), %0             \n\t"\
1193
        "lea  (%2,%4,2), %2             \n\t"\
1194
        "movq    48(%1), %%mm0          \n\t"\
1195
        "movq    72(%1), %%mm1          \n\t"\
1196
        "paddw    %%mm6, %%mm0          \n\t"\
1197
        "paddw    %%mm6, %%mm1          \n\t"\
1198
        "psraw      $5,  %%mm0          \n\t"\
1199
        "psraw      $5,  %%mm1          \n\t"\
1200
        "packuswb %%mm0, %%mm0          \n\t"\
1201
        "packuswb %%mm1, %%mm1          \n\t"\
1202
        PAVGB"     (%0), %%mm0          \n\t"\
1203
        PAVGB"  (%0,%3), %%mm1          \n\t"\
1204
        OP(%%mm0, (%2),    %%mm4, d)\
1205
        OP(%%mm1, (%2,%4), %%mm5, d)\
1206
        :"+a"(src8), "+c"(src16), "+d"(dst)\
1207
        :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
1208
        :"memory");\
1209
}\
1210
static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1211
{\
1212
    asm volatile(\
1213
        "movq       %0,  %%mm6          \n\t"\
1214
        ::"m"(ff_pw_16)\
1215
        );\
1216
    while(h--){\
1217
    asm volatile(\
1218
        "movq      (%1), %%mm0          \n\t"\
1219
        "movq     8(%1), %%mm1          \n\t"\
1220
        "paddw    %%mm6, %%mm0          \n\t"\
1221
        "paddw    %%mm6, %%mm1          \n\t"\
1222
        "psraw      $5,  %%mm0          \n\t"\
1223
        "psraw      $5,  %%mm1          \n\t"\
1224
        "packuswb %%mm1, %%mm0          \n\t"\
1225
        PAVGB"     (%0), %%mm0          \n\t"\
1226
        OP(%%mm0, (%2), %%mm5, q)\
1227
        ::"a"(src8), "c"(src16), "d"(dst)\
1228
        :"memory");\
1229
        src8 += src8Stride;\
1230
        src16 += 24;\
1231
        dst += dstStride;\
1232
    }\
1233
}\
1234
static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1235
{\
1236
    OPNAME ## pixels8_l2_shift5_ ## MMX(dst  , src16  , src8  , dstStride, src8Stride, h);\
1237
    OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
1238
}\
1239

    
1240

    
1241
#define H264_MC(OPNAME, SIZE, MMX) \
1242
static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1243
    OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
1244
}\
1245
\
1246
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1247
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
1248
}\
1249
\
1250
static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1251
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
1252
}\
1253
\
1254
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1255
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
1256
}\
1257
\
1258
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1259
    uint64_t temp[SIZE*SIZE/8];\
1260
    uint8_t * const half= (uint8_t*)temp;\
1261
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1262
    OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
1263
}\
1264
\
1265
static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1266
    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
1267
}\
1268
\
1269
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1270
    uint64_t temp[SIZE*SIZE/8];\
1271
    uint8_t * const half= (uint8_t*)temp;\
1272
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1273
    OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
1274
}\
1275
\
1276
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1277
    uint64_t temp[SIZE*SIZE/8];\
1278
    uint8_t * const halfV= (uint8_t*)temp;\
1279
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1280
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1281
}\
1282
\
1283
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1284
    uint64_t temp[SIZE*SIZE/8];\
1285
    uint8_t * const halfV= (uint8_t*)temp;\
1286
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1287
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1288
}\
1289
\
1290
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1291
    uint64_t temp[SIZE*SIZE/8];\
1292
    uint8_t * const halfV= (uint8_t*)temp;\
1293
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1294
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1295
}\
1296
\
1297
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1298
    uint64_t temp[SIZE*SIZE/8];\
1299
    uint8_t * const halfV= (uint8_t*)temp;\
1300
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1301
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1302
}\
1303
\
1304
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1305
    uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
1306
    int16_t * const tmp= (int16_t*)temp;\
1307
    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
1308
}\
1309
\
1310
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1311
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1312
    uint8_t * const halfHV= (uint8_t*)temp;\
1313
    int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1314
    assert((int)temp & 7 == 0);\
1315
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1316
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
1317
}\
1318
\
1319
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1320
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1321
    uint8_t * const halfHV= (uint8_t*)temp;\
1322
    int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1323
    assert((int)temp & 7 == 0);\
1324
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1325
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
1326
}\
1327
\
1328
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1329
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1330
    int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1331
    uint8_t * const halfHV= ((uint8_t*)temp);\
1332
    assert((int)temp & 7 == 0);\
1333
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1334
    OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
1335
}\
1336
\
1337
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1338
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1339
    int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1340
    uint8_t * const halfHV= ((uint8_t*)temp);\
1341
    assert((int)temp & 7 == 0);\
1342
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1343
    OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
1344
}\
1345

    
1346

    
1347
#define AVG_3DNOW_OP(a,b,temp, size) \
1348
"mov" #size " " #b ", " #temp "   \n\t"\
1349
"pavgusb " #temp ", " #a "        \n\t"\
1350
"mov" #size " " #a ", " #b "      \n\t"
1351
#define AVG_MMX2_OP(a,b,temp, size) \
1352
"mov" #size " " #b ", " #temp "   \n\t"\
1353
"pavgb " #temp ", " #a "          \n\t"\
1354
"mov" #size " " #a ", " #b "      \n\t"
1355

    
1356
#define PAVGB "pavgusb"
1357
QPEL_H264(put_,       PUT_OP, 3dnow)
1358
QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
1359
#undef PAVGB
1360
#define PAVGB "pavgb"
1361
QPEL_H264(put_,       PUT_OP, mmx2)
1362
QPEL_H264(avg_,  AVG_MMX2_OP, mmx2)
1363
#undef PAVGB
1364

    
1365
H264_MC(put_, 4, 3dnow)
1366
H264_MC(put_, 8, 3dnow)
1367
H264_MC(put_, 16,3dnow)
1368
H264_MC(avg_, 4, 3dnow)
1369
H264_MC(avg_, 8, 3dnow)
1370
H264_MC(avg_, 16,3dnow)
1371
H264_MC(put_, 4, mmx2)
1372
H264_MC(put_, 8, mmx2)
1373
H264_MC(put_, 16,mmx2)
1374
H264_MC(avg_, 4, mmx2)
1375
H264_MC(avg_, 8, mmx2)
1376
H264_MC(avg_, 16,mmx2)
1377

    
1378

    
1379
#define H264_CHROMA_OP(S,D)
1380
#define H264_CHROMA_OP4(S,D,T)
1381
#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
1382
#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
1383
#define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
1384
#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
1385
#include "dsputil_h264_template_mmx.c"
1386
#undef H264_CHROMA_OP
1387
#undef H264_CHROMA_OP4
1388
#undef H264_CHROMA_MC8_TMPL
1389
#undef H264_CHROMA_MC4_TMPL
1390
#undef H264_CHROMA_MC2_TMPL
1391
#undef H264_CHROMA_MC8_MV0
1392

    
1393
#define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
1394
#define H264_CHROMA_OP4(S,D,T) "movd  " #S ", " #T " \n\t"\
1395
                               "pavgb " #T ", " #D " \n\t"
1396
#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
1397
#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
1398
#define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
1399
#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
1400
#include "dsputil_h264_template_mmx.c"
1401
#undef H264_CHROMA_OP
1402
#undef H264_CHROMA_OP4
1403
#undef H264_CHROMA_MC8_TMPL
1404
#undef H264_CHROMA_MC4_TMPL
1405
#undef H264_CHROMA_MC2_TMPL
1406
#undef H264_CHROMA_MC8_MV0
1407

    
1408
#define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
1409
#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1410
                               "pavgusb " #T ", " #D " \n\t"
1411
#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
1412
#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
1413
#define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
1414
#include "dsputil_h264_template_mmx.c"
1415
#undef H264_CHROMA_OP
1416
#undef H264_CHROMA_OP4
1417
#undef H264_CHROMA_MC8_TMPL
1418
#undef H264_CHROMA_MC4_TMPL
1419
#undef H264_CHROMA_MC8_MV0
1420

    
1421
/***********************************/
1422
/* weighted prediction */
1423

    
1424
static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
1425
{
1426
    int x, y;
1427
    offset <<= log2_denom;
1428
    offset += (1 << log2_denom) >> 1;
1429
    asm volatile(
1430
        "movd    %0, %%mm4        \n\t"
1431
        "movd    %1, %%mm5        \n\t"
1432
        "movd    %2, %%mm6        \n\t"
1433
        "pshufw  $0, %%mm4, %%mm4 \n\t"
1434
        "pshufw  $0, %%mm5, %%mm5 \n\t"
1435
        "pxor    %%mm7, %%mm7     \n\t"
1436
        :: "g"(weight), "g"(offset), "g"(log2_denom)
1437
    );
1438
    for(y=0; y<h; y+=2){
1439
        for(x=0; x<w; x+=4){
1440
            asm volatile(
1441
                "movd      %0,    %%mm0 \n\t"
1442
                "movd      %1,    %%mm1 \n\t"
1443
                "punpcklbw %%mm7, %%mm0 \n\t"
1444
                "punpcklbw %%mm7, %%mm1 \n\t"
1445
                "pmullw    %%mm4, %%mm0 \n\t"
1446
                "pmullw    %%mm4, %%mm1 \n\t"
1447
                "paddsw    %%mm5, %%mm0 \n\t"
1448
                "paddsw    %%mm5, %%mm1 \n\t"
1449
                "psraw     %%mm6, %%mm0 \n\t"
1450
                "psraw     %%mm6, %%mm1 \n\t"
1451
                "packuswb  %%mm7, %%mm0 \n\t"
1452
                "packuswb  %%mm7, %%mm1 \n\t"
1453
                "movd      %%mm0, %0    \n\t"
1454
                "movd      %%mm1, %1    \n\t"
1455
                : "+m"(*(uint32_t*)(dst+x)),
1456
                  "+m"(*(uint32_t*)(dst+x+stride))
1457
            );
1458
        }
1459
        dst += 2*stride;
1460
    }
1461
}
1462

    
1463
static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
1464
{
1465
    int x, y;
1466
    offset = ((offset + 1) | 1) << log2_denom;
1467
    asm volatile(
1468
        "movd    %0, %%mm3        \n\t"
1469
        "movd    %1, %%mm4        \n\t"
1470
        "movd    %2, %%mm5        \n\t"
1471
        "movd    %3, %%mm6        \n\t"
1472
        "pshufw  $0, %%mm3, %%mm3 \n\t"
1473
        "pshufw  $0, %%mm4, %%mm4 \n\t"
1474
        "pshufw  $0, %%mm5, %%mm5 \n\t"
1475
        "pxor    %%mm7, %%mm7     \n\t"
1476
        :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
1477
    );
1478
    for(y=0; y<h; y++){
1479
        for(x=0; x<w; x+=4){
1480
            asm volatile(
1481
                "movd      %0,    %%mm0 \n\t"
1482
                "movd      %1,    %%mm1 \n\t"
1483
                "punpcklbw %%mm7, %%mm0 \n\t"
1484
                "punpcklbw %%mm7, %%mm1 \n\t"
1485
                "pmullw    %%mm3, %%mm0 \n\t"
1486
                "pmullw    %%mm4, %%mm1 \n\t"
1487
                "paddsw    %%mm1, %%mm0 \n\t"
1488
                "paddsw    %%mm5, %%mm0 \n\t"
1489
                "psraw     %%mm6, %%mm0 \n\t"
1490
                "packuswb  %%mm0, %%mm0 \n\t"
1491
                "movd      %%mm0, %0    \n\t"
1492
                : "+m"(*(uint32_t*)(dst+x))
1493
                :  "m"(*(uint32_t*)(src+x))
1494
            );
1495
        }
1496
        src += stride;
1497
        dst += stride;
1498
    }
1499
}
1500

    
1501
#define H264_WEIGHT(W,H) \
1502
static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
1503
    ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
1504
} \
1505
static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
1506
    ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
1507
}
1508

    
1509
H264_WEIGHT(16,16)
1510
H264_WEIGHT(16, 8)
1511
H264_WEIGHT( 8,16)
1512
H264_WEIGHT( 8, 8)
1513
H264_WEIGHT( 8, 4)
1514
H264_WEIGHT( 4, 8)
1515
H264_WEIGHT( 4, 4)
1516
H264_WEIGHT( 4, 2)
1517