Statistics
| Branch: | Revision:

ffmpeg / libavcodec / i386 / h264dsp_mmx.c @ ce611a27

History | View | Annotate | Download (57.9 KB)

1 d2bb7db1 Loren Merritt
/*
2
 * Copyright (c) 2004-2005 Michael Niedermayer, Loren Merritt
3
 *
4 b78e7197 Diego Biurrun
 * This file is part of FFmpeg.
5
 *
6
 * FFmpeg is free software; you can redistribute it and/or
7 d2bb7db1 Loren Merritt
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9 b78e7197 Diego Biurrun
 * version 2.1 of the License, or (at your option) any later version.
10 d2bb7db1 Loren Merritt
 *
11 b78e7197 Diego Biurrun
 * FFmpeg is distributed in the hope that it will be useful,
12 d2bb7db1 Loren Merritt
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17 b78e7197 Diego Biurrun
 * License along with FFmpeg; if not, write to the Free Software
18 5509bffa Diego Biurrun
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 d2bb7db1 Loren Merritt
 */
20
21
22
/***********************************/
23
/* IDCT */
24
25
/* in/out: mma=mma+mmb, mmb=mmb-mma */
26
#define SUMSUB_BA( a, b ) \
27
    "paddw "#b", "#a" \n\t"\
28
    "paddw "#b", "#b" \n\t"\
29
    "psubw "#a", "#b" \n\t"
30
31
#define SUMSUB_BADC( a, b, c, d ) \
32
    "paddw "#b", "#a" \n\t"\
33
    "paddw "#d", "#c" \n\t"\
34
    "paddw "#b", "#b" \n\t"\
35
    "paddw "#d", "#d" \n\t"\
36
    "psubw "#a", "#b" \n\t"\
37
    "psubw "#c", "#d" \n\t"
38
39
#define SUMSUBD2_AB( a, b, t ) \
40
    "movq  "#b", "#t" \n\t"\
41
    "psraw  $1 , "#b" \n\t"\
42
    "paddw "#a", "#b" \n\t"\
43
    "psraw  $1 , "#a" \n\t"\
44
    "psubw "#t", "#a" \n\t"
45
46
#define IDCT4_1D( s02, s13, d02, d13, t ) \
47
    SUMSUB_BA  ( s02, d02 )\
48
    SUMSUBD2_AB( s13, d13, t )\
49
    SUMSUB_BADC( d13, s02, s13, d02 )
50
51
#define STORE_DIFF_4P( p, t, z ) \
52
    "psraw      $6,     "#p" \n\t"\
53
    "movd       (%0),   "#t" \n\t"\
54
    "punpcklbw "#z",    "#t" \n\t"\
55
    "paddsw    "#t",    "#p" \n\t"\
56
    "packuswb  "#z",    "#p" \n\t"\
57
    "movd      "#p",    (%0) \n\t"
58
59 6da971f1 Loren Merritt
static void ff_h264_idct_add_mmx(uint8_t *dst, int16_t *block, int stride)
60 d2bb7db1 Loren Merritt
{
61
    /* Load dct coeffs */
62
    asm volatile(
63
        "movq   (%0), %%mm0 \n\t"
64
        "movq  8(%0), %%mm1 \n\t"
65
        "movq 16(%0), %%mm2 \n\t"
66
        "movq 24(%0), %%mm3 \n\t"
67
    :: "r"(block) );
68
69
    asm volatile(
70
        /* mm1=s02+s13  mm2=s02-s13  mm4=d02+d13  mm0=d02-d13 */
71
        IDCT4_1D( %%mm2, %%mm1, %%mm0, %%mm3, %%mm4 )
72
73
        "movq      %0,    %%mm6 \n\t"
74
        /* in: 1,4,0,2  out: 1,2,3,0 */
75
        TRANSPOSE4( %%mm3, %%mm1, %%mm0, %%mm2, %%mm4 )
76
77
        "paddw     %%mm6, %%mm3 \n\t"
78
79
        /* mm2=s02+s13  mm3=s02-s13  mm4=d02+d13  mm1=d02-d13 */
80
        IDCT4_1D( %%mm4, %%mm2, %%mm3, %%mm0, %%mm1 )
81
82
        "pxor %%mm7, %%mm7    \n\t"
83
    :: "m"(ff_pw_32));
84
85
    asm volatile(
86
    STORE_DIFF_4P( %%mm0, %%mm1, %%mm7)
87
        "add %1, %0             \n\t"
88
    STORE_DIFF_4P( %%mm2, %%mm1, %%mm7)
89
        "add %1, %0             \n\t"
90
    STORE_DIFF_4P( %%mm3, %%mm1, %%mm7)
91
        "add %1, %0             \n\t"
92
    STORE_DIFF_4P( %%mm4, %%mm1, %%mm7)
93
        : "+r"(dst)
94
        : "r" ((long)stride)
95
    );
96
}
97
98 548a1c8a Loren Merritt
static inline void h264_idct8_1d(int16_t *block)
99
{
100
    asm volatile(
101
        "movq 112(%0), %%mm7  \n\t"
102
        "movq  80(%0), %%mm5  \n\t"
103
        "movq  48(%0), %%mm3  \n\t"
104
        "movq  16(%0), %%mm1  \n\t"
105
106
        "movq   %%mm7, %%mm4  \n\t"
107
        "movq   %%mm3, %%mm6  \n\t"
108
        "movq   %%mm5, %%mm0  \n\t"
109
        "movq   %%mm7, %%mm2  \n\t"
110
        "psraw  $1,    %%mm4  \n\t"
111
        "psraw  $1,    %%mm6  \n\t"
112
        "psubw  %%mm7, %%mm0  \n\t"
113
        "psubw  %%mm6, %%mm2  \n\t"
114
        "psubw  %%mm4, %%mm0  \n\t"
115
        "psubw  %%mm3, %%mm2  \n\t"
116
        "psubw  %%mm3, %%mm0  \n\t"
117
        "paddw  %%mm1, %%mm2  \n\t"
118
119
        "movq   %%mm5, %%mm4  \n\t"
120
        "movq   %%mm1, %%mm6  \n\t"
121
        "psraw  $1,    %%mm4  \n\t"
122
        "psraw  $1,    %%mm6  \n\t"
123
        "paddw  %%mm5, %%mm4  \n\t"
124
        "paddw  %%mm1, %%mm6  \n\t"
125
        "paddw  %%mm7, %%mm4  \n\t"
126
        "paddw  %%mm5, %%mm6  \n\t"
127
        "psubw  %%mm1, %%mm4  \n\t"
128
        "paddw  %%mm3, %%mm6  \n\t"
129
130
        "movq   %%mm0, %%mm1  \n\t"
131
        "movq   %%mm4, %%mm3  \n\t"
132
        "movq   %%mm2, %%mm5  \n\t"
133
        "movq   %%mm6, %%mm7  \n\t"
134
        "psraw  $2,    %%mm6  \n\t"
135
        "psraw  $2,    %%mm3  \n\t"
136
        "psraw  $2,    %%mm5  \n\t"
137
        "psraw  $2,    %%mm0  \n\t"
138
        "paddw  %%mm6, %%mm1  \n\t"
139
        "paddw  %%mm2, %%mm3  \n\t"
140
        "psubw  %%mm4, %%mm5  \n\t"
141
        "psubw  %%mm0, %%mm7  \n\t"
142
143
        "movq  32(%0), %%mm2  \n\t"
144
        "movq  96(%0), %%mm6  \n\t"
145
        "movq   %%mm2, %%mm4  \n\t"
146
        "movq   %%mm6, %%mm0  \n\t"
147
        "psraw  $1,    %%mm4  \n\t"
148
        "psraw  $1,    %%mm6  \n\t"
149
        "psubw  %%mm0, %%mm4  \n\t"
150
        "paddw  %%mm2, %%mm6  \n\t"
151
152
        "movq    (%0), %%mm2  \n\t"
153
        "movq  64(%0), %%mm0  \n\t"
154
        SUMSUB_BA( %%mm0, %%mm2 )
155
        SUMSUB_BA( %%mm6, %%mm0 )
156
        SUMSUB_BA( %%mm4, %%mm2 )
157
        SUMSUB_BA( %%mm7, %%mm6 )
158
        SUMSUB_BA( %%mm5, %%mm4 )
159
        SUMSUB_BA( %%mm3, %%mm2 )
160
        SUMSUB_BA( %%mm1, %%mm0 )
161
        :: "r"(block)
162
    );
163
}
164
165
static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
166
{
167
    int i;
168
    int16_t __attribute__ ((aligned(8))) b2[64];
169
170
    block[0] += 32;
171
172
    for(i=0; i<2; i++){
173 079e61db Michael Niedermayer
        DECLARE_ALIGNED_8(uint64_t, tmp);
174 548a1c8a Loren Merritt
175
        h264_idct8_1d(block+4*i);
176
177
        asm volatile(
178
            "movq   %%mm7,    %0   \n\t"
179
            TRANSPOSE4( %%mm0, %%mm2, %%mm4, %%mm6, %%mm7 )
180
            "movq   %%mm0,  8(%1)  \n\t"
181
            "movq   %%mm6, 24(%1)  \n\t"
182
            "movq   %%mm7, 40(%1)  \n\t"
183
            "movq   %%mm4, 56(%1)  \n\t"
184
            "movq    %0,    %%mm7  \n\t"
185
            TRANSPOSE4( %%mm7, %%mm5, %%mm3, %%mm1, %%mm0 )
186
            "movq   %%mm7,   (%1)  \n\t"
187
            "movq   %%mm1, 16(%1)  \n\t"
188
            "movq   %%mm0, 32(%1)  \n\t"
189
            "movq   %%mm3, 48(%1)  \n\t"
190
            : "=m"(tmp)
191
            : "r"(b2+32*i)
192
            : "memory"
193
        );
194
    }
195
196
    for(i=0; i<2; i++){
197
        h264_idct8_1d(b2+4*i);
198
199
        asm volatile(
200
            "psraw     $6, %%mm7  \n\t"
201
            "psraw     $6, %%mm6  \n\t"
202
            "psraw     $6, %%mm5  \n\t"
203
            "psraw     $6, %%mm4  \n\t"
204
            "psraw     $6, %%mm3  \n\t"
205
            "psraw     $6, %%mm2  \n\t"
206
            "psraw     $6, %%mm1  \n\t"
207
            "psraw     $6, %%mm0  \n\t"
208
209
            "movq   %%mm7,    (%0)  \n\t"
210
            "movq   %%mm5,  16(%0)  \n\t"
211
            "movq   %%mm3,  32(%0)  \n\t"
212
            "movq   %%mm1,  48(%0)  \n\t"
213
            "movq   %%mm0,  64(%0)  \n\t"
214
            "movq   %%mm2,  80(%0)  \n\t"
215
            "movq   %%mm4,  96(%0)  \n\t"
216
            "movq   %%mm6, 112(%0)  \n\t"
217
            :: "r"(b2+4*i)
218
            : "memory"
219
        );
220
    }
221
222
    add_pixels_clamped_mmx(b2, dst, stride);
223
}
224
225 6da971f1 Loren Merritt
static void ff_h264_idct_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
226 ef9d1d15 Loren Merritt
{
227
    int dc = (block[0] + 32) >> 6;
228
    asm volatile(
229
        "movd          %0, %%mm0 \n\t"
230
        "pshufw $0, %%mm0, %%mm0 \n\t"
231
        "pxor       %%mm1, %%mm1 \n\t"
232
        "psubw      %%mm0, %%mm1 \n\t"
233
        "packuswb   %%mm0, %%mm0 \n\t"
234
        "packuswb   %%mm1, %%mm1 \n\t"
235
        ::"r"(dc)
236
    );
237
    asm volatile(
238
        "movd          %0, %%mm2 \n\t"
239
        "movd          %1, %%mm3 \n\t"
240
        "movd          %2, %%mm4 \n\t"
241
        "movd          %3, %%mm5 \n\t"
242
        "paddusb    %%mm0, %%mm2 \n\t"
243
        "paddusb    %%mm0, %%mm3 \n\t"
244
        "paddusb    %%mm0, %%mm4 \n\t"
245
        "paddusb    %%mm0, %%mm5 \n\t"
246
        "psubusb    %%mm1, %%mm2 \n\t"
247
        "psubusb    %%mm1, %%mm3 \n\t"
248
        "psubusb    %%mm1, %%mm4 \n\t"
249
        "psubusb    %%mm1, %%mm5 \n\t"
250
        "movd       %%mm2, %0    \n\t"
251
        "movd       %%mm3, %1    \n\t"
252
        "movd       %%mm4, %2    \n\t"
253
        "movd       %%mm5, %3    \n\t"
254
        :"+m"(*(uint32_t*)(dst+0*stride)),
255
         "+m"(*(uint32_t*)(dst+1*stride)),
256
         "+m"(*(uint32_t*)(dst+2*stride)),
257
         "+m"(*(uint32_t*)(dst+3*stride))
258
    );
259
}
260
261 6da971f1 Loren Merritt
static void ff_h264_idct8_dc_add_mmx2(uint8_t *dst, int16_t *block, int stride)
262 ef9d1d15 Loren Merritt
{
263
    int dc = (block[0] + 32) >> 6;
264
    int y;
265
    asm volatile(
266
        "movd          %0, %%mm0 \n\t"
267
        "pshufw $0, %%mm0, %%mm0 \n\t"
268
        "pxor       %%mm1, %%mm1 \n\t"
269
        "psubw      %%mm0, %%mm1 \n\t"
270
        "packuswb   %%mm0, %%mm0 \n\t"
271
        "packuswb   %%mm1, %%mm1 \n\t"
272
        ::"r"(dc)
273
    );
274
    for(y=2; y--; dst += 4*stride){
275
    asm volatile(
276
        "movq          %0, %%mm2 \n\t"
277
        "movq          %1, %%mm3 \n\t"
278
        "movq          %2, %%mm4 \n\t"
279
        "movq          %3, %%mm5 \n\t"
280
        "paddusb    %%mm0, %%mm2 \n\t"
281
        "paddusb    %%mm0, %%mm3 \n\t"
282
        "paddusb    %%mm0, %%mm4 \n\t"
283
        "paddusb    %%mm0, %%mm5 \n\t"
284
        "psubusb    %%mm1, %%mm2 \n\t"
285
        "psubusb    %%mm1, %%mm3 \n\t"
286
        "psubusb    %%mm1, %%mm4 \n\t"
287
        "psubusb    %%mm1, %%mm5 \n\t"
288
        "movq       %%mm2, %0    \n\t"
289
        "movq       %%mm3, %1    \n\t"
290
        "movq       %%mm4, %2    \n\t"
291
        "movq       %%mm5, %3    \n\t"
292
        :"+m"(*(uint64_t*)(dst+0*stride)),
293
         "+m"(*(uint64_t*)(dst+1*stride)),
294
         "+m"(*(uint64_t*)(dst+2*stride)),
295
         "+m"(*(uint64_t*)(dst+3*stride))
296
    );
297
    }
298
}
299
300 d2bb7db1 Loren Merritt
301
/***********************************/
302
/* deblocking */
303
304
// out: o = |x-y|>a
305
// clobbers: t
306
#define DIFF_GT_MMX(x,y,a,o,t)\
307
    "movq     "#y", "#t"  \n\t"\
308
    "movq     "#x", "#o"  \n\t"\
309
    "psubusb  "#x", "#t"  \n\t"\
310
    "psubusb  "#y", "#o"  \n\t"\
311
    "por      "#t", "#o"  \n\t"\
312
    "psubusb  "#a", "#o"  \n\t"
313
314 e80cf125 Michael Niedermayer
// out: o = |x-y|>a
315
// clobbers: t
316
#define DIFF_GT2_MMX(x,y,a,o,t)\
317
    "movq     "#y", "#t"  \n\t"\
318
    "movq     "#x", "#o"  \n\t"\
319
    "psubusb  "#x", "#t"  \n\t"\
320
    "psubusb  "#y", "#o"  \n\t"\
321
    "psubusb  "#a", "#t"  \n\t"\
322
    "psubusb  "#a", "#o"  \n\t"\
323
    "pcmpeqb  "#t", "#o"  \n\t"\
324
325 d2bb7db1 Loren Merritt
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1
326
// out: mm5=beta-1, mm7=mask
327
// clobbers: mm4,mm6
328
#define H264_DEBLOCK_MASK(alpha1, beta1) \
329
    "pshufw $0, "#alpha1", %%mm4 \n\t"\
330
    "pshufw $0, "#beta1 ", %%mm5 \n\t"\
331
    "packuswb  %%mm4, %%mm4      \n\t"\
332
    "packuswb  %%mm5, %%mm5      \n\t"\
333
    DIFF_GT_MMX(%%mm1, %%mm2, %%mm4, %%mm7, %%mm6) /* |p0-q0| > alpha-1 */\
334
    DIFF_GT_MMX(%%mm0, %%mm1, %%mm5, %%mm4, %%mm6) /* |p1-p0| > beta-1 */\
335
    "por       %%mm4, %%mm7      \n\t"\
336
    DIFF_GT_MMX(%%mm3, %%mm2, %%mm5, %%mm4, %%mm6) /* |q1-q0| > beta-1 */\
337
    "por       %%mm4, %%mm7      \n\t"\
338
    "pxor      %%mm6, %%mm6      \n\t"\
339
    "pcmpeqb   %%mm6, %%mm7      \n\t"
340
341
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask)
342
// out: mm1=p0' mm2=q0'
343
// clobbers: mm0,3-6
344
#define H264_DEBLOCK_P0_Q0(pb_01, pb_3f)\
345 e9f1885c Michael Niedermayer
        "movq    %%mm1              , %%mm5 \n\t"\
346
        "pxor    %%mm2              , %%mm5 \n\t" /* p0^q0*/\
347
        "pand    "#pb_01"           , %%mm5 \n\t" /* (p0^q0)&1*/\
348
        "pcmpeqb %%mm4              , %%mm4 \n\t"\
349
        "pxor    %%mm4              , %%mm3 \n\t"\
350
        "pavgb   %%mm0              , %%mm3 \n\t" /* (p1 - q1 + 256)>>1*/\
351
        "pavgb   "MANGLE(ff_pb_3)"  , %%mm3 \n\t" /*(((p1 - q1 + 256)>>1)+4)>>1 = 64+2+(p1-q1)>>2*/\
352
        "pxor    %%mm1              , %%mm4 \n\t"\
353
        "pavgb   %%mm2              , %%mm4 \n\t" /* (q0 - p0 + 256)>>1*/\
354
        "pavgb   %%mm5              , %%mm3 \n\t"\
355 bda2203d Michael Niedermayer
        "paddusb %%mm4              , %%mm3 \n\t" /* d+128+33*/\
356 5a1553de Michael Niedermayer
        "movq    "MANGLE(ff_pb_A1)" , %%mm6 \n\t"\
357
        "psubusb %%mm3              , %%mm6 \n\t"\
358 e9f1885c Michael Niedermayer
        "psubusb "MANGLE(ff_pb_A1)" , %%mm3 \n\t"\
359
        "pminub  %%mm7              , %%mm6 \n\t"\
360 5a1553de Michael Niedermayer
        "pminub  %%mm7              , %%mm3 \n\t"\
361 e9f1885c Michael Niedermayer
        "psubusb %%mm6              , %%mm1 \n\t"\
362 5a1553de Michael Niedermayer
        "psubusb %%mm3              , %%mm2 \n\t"\
363
        "paddusb %%mm3              , %%mm1 \n\t"\
364
        "paddusb %%mm6              , %%mm2 \n\t"
365 d2bb7db1 Loren Merritt
366
// in: mm0=p1 mm1=p0 mm2=q0 mm3=q1 mm7=(tc&mask) %8=mm_bone
367 a1ce6110 Reimar Döffinger
// out: (q1addr) = av_clip( (q2+((p0+q0+1)>>1))>>1, q1-tc0, q1+tc0 )
368 d2bb7db1 Loren Merritt
// clobbers: q2, tmp, tc0
369
#define H264_DEBLOCK_Q1(p1, q2, q2addr, q1addr, tc0, tmp)\
370
        "movq     %%mm1,  "#tmp"   \n\t"\
371
        "pavgb    %%mm2,  "#tmp"   \n\t"\
372
        "pavgb    "#tmp", "#q2"    \n\t" /* avg(p2,avg(p0,q0)) */\
373
        "pxor   "q2addr", "#tmp"   \n\t"\
374
        "pand     %8,     "#tmp"   \n\t" /* (p2^avg(p0,q0))&1 */\
375
        "psubusb  "#tmp", "#q2"    \n\t" /* (p2+((p0+q0+1)>>1))>>1 */\
376
        "movq     "#p1",  "#tmp"   \n\t"\
377
        "psubusb  "#tc0", "#tmp"   \n\t"\
378
        "paddusb  "#p1",  "#tc0"   \n\t"\
379
        "pmaxub   "#tmp", "#q2"    \n\t"\
380
        "pminub   "#tc0", "#q2"    \n\t"\
381
        "movq     "#q2",  "q1addr" \n\t"
382
383
static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
384
{
385 079e61db Michael Niedermayer
    DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
386 d2bb7db1 Loren Merritt
387
    asm volatile(
388
        "movq    (%1,%3), %%mm0    \n\t" //p1
389
        "movq    (%1,%3,2), %%mm1  \n\t" //p0
390
        "movq    (%2),    %%mm2    \n\t" //q0
391
        "movq    (%2,%3), %%mm3    \n\t" //q1
392
        H264_DEBLOCK_MASK(%6, %7)
393 38cfdc83 Michael Niedermayer
394
        "movd      %5,    %%mm4    \n\t"
395
        "punpcklbw %%mm4, %%mm4    \n\t"
396
        "punpcklwd %%mm4, %%mm4    \n\t"
397
        "pcmpeqb   %%mm3, %%mm3    \n\t"
398
        "movq      %%mm4, %%mm6    \n\t"
399
        "pcmpgtb   %%mm3, %%mm4    \n\t"
400
        "movq      %%mm6, 8+%0     \n\t"
401
        "pand      %%mm4, %%mm7    \n\t"
402
        "movq      %%mm7, %0       \n\t"
403 d2bb7db1 Loren Merritt
404
        /* filter p1 */
405
        "movq     (%1),   %%mm3    \n\t" //p2
406 e80cf125 Michael Niedermayer
        DIFF_GT2_MMX(%%mm1, %%mm3, %%mm5, %%mm6, %%mm4) // |p2-p0|>beta-1
407 d2bb7db1 Loren Merritt
        "pand     %%mm7,  %%mm6    \n\t" // mask & |p2-p0|<beta
408 f5a9e8f3 Michael Niedermayer
        "pand     8+%0,   %%mm7    \n\t" // mask & tc0
409
        "movq     %%mm7,  %%mm4    \n\t"
410 25225c37 Michael Niedermayer
        "psubb    %%mm6,  %%mm7    \n\t"
411 d2bb7db1 Loren Merritt
        "pand     %%mm4,  %%mm6    \n\t" // mask & |p2-p0|<beta & tc0
412
        H264_DEBLOCK_Q1(%%mm0, %%mm3, "(%1)", "(%1,%3)", %%mm6, %%mm4)
413
414
        /* filter q1 */
415
        "movq    (%2,%3,2), %%mm4  \n\t" //q2
416 e80cf125 Michael Niedermayer
        DIFF_GT2_MMX(%%mm2, %%mm4, %%mm5, %%mm6, %%mm3) // |q2-q0|>beta-1
417 d2bb7db1 Loren Merritt
        "pand     %0,     %%mm6    \n\t"
418 93471182 Michael Niedermayer
        "movq     8+%0,   %%mm5    \n\t" // can be merged with the and below but is slower then
419 d2bb7db1 Loren Merritt
        "pand     %%mm6,  %%mm5    \n\t"
420 25225c37 Michael Niedermayer
        "psubb    %%mm6,  %%mm7    \n\t"
421 d2bb7db1 Loren Merritt
        "movq    (%2,%3), %%mm3    \n\t"
422
        H264_DEBLOCK_Q1(%%mm3, %%mm4, "(%2,%3,2)", "(%2,%3)", %%mm5, %%mm6)
423
424
        /* filter p0, q0 */
425 38cfdc83 Michael Niedermayer
        H264_DEBLOCK_P0_Q0(%8, unused)
426 d2bb7db1 Loren Merritt
        "movq      %%mm1, (%1,%3,2) \n\t"
427
        "movq      %%mm2, (%2)      \n\t"
428
429 38cfdc83 Michael Niedermayer
        : "=m"(*tmp0)
430 d2bb7db1 Loren Merritt
        : "r"(pix-3*stride), "r"(pix), "r"((long)stride),
431 38cfdc83 Michael Niedermayer
          "m"(*tmp0/*unused*/), "m"(*(uint32_t*)tc0), "m"(alpha1), "m"(beta1),
432
          "m"(mm_bone)
433 d2bb7db1 Loren Merritt
    );
434
}
435
436
static void h264_v_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
437
{
438
    if((tc0[0] & tc0[1]) >= 0)
439
        h264_loop_filter_luma_mmx2(pix, stride, alpha-1, beta-1, tc0);
440
    if((tc0[2] & tc0[3]) >= 0)
441
        h264_loop_filter_luma_mmx2(pix+8, stride, alpha-1, beta-1, tc0+2);
442
}
443
static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
444
{
445
    //FIXME: could cut some load/stores by merging transpose with filter
446
    // also, it only needs to transpose 6x8
447 079e61db Michael Niedermayer
    DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
448 d2bb7db1 Loren Merritt
    int i;
449
    for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
450
        if((tc0[0] & tc0[1]) < 0)
451
            continue;
452
        transpose4x4(trans,       pix-4,          8, stride);
453
        transpose4x4(trans  +4*8, pix,            8, stride);
454
        transpose4x4(trans+4,     pix-4+4*stride, 8, stride);
455
        transpose4x4(trans+4+4*8, pix  +4*stride, 8, stride);
456
        h264_loop_filter_luma_mmx2(trans+4*8, 8, alpha-1, beta-1, tc0);
457
        transpose4x4(pix-2,          trans  +2*8, stride, 8);
458
        transpose4x4(pix-2+4*stride, trans+4+2*8, stride, 8);
459
    }
460
}
461
462
static inline void h264_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
463
{
464
    asm volatile(
465
        "movq    (%0),    %%mm0     \n\t" //p1
466
        "movq    (%0,%2), %%mm1     \n\t" //p0
467
        "movq    (%1),    %%mm2     \n\t" //q0
468
        "movq    (%1,%2), %%mm3     \n\t" //q1
469
        H264_DEBLOCK_MASK(%4, %5)
470
        "movd      %3,    %%mm6     \n\t"
471
        "punpcklbw %%mm6, %%mm6     \n\t"
472
        "pand      %%mm6, %%mm7     \n\t" // mm7 = tc&mask
473
        H264_DEBLOCK_P0_Q0(%6, %7)
474
        "movq      %%mm1, (%0,%2)   \n\t"
475
        "movq      %%mm2, (%1)      \n\t"
476
477
        :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
478
           "r"(*(uint32_t*)tc0),
479
           "m"(alpha1), "m"(beta1), "m"(mm_bone), "m"(ff_pb_3F)
480
    );
481
}
482
483
static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
484
{
485
    h264_loop_filter_chroma_mmx2(pix, stride, alpha-1, beta-1, tc0);
486
}
487
488
static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
489
{
490
    //FIXME: could cut some load/stores by merging transpose with filter
491 079e61db Michael Niedermayer
    DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
492 d2bb7db1 Loren Merritt
    transpose4x4(trans, pix-2, 8, stride);
493
    transpose4x4(trans+4, pix-2+4*stride, 8, stride);
494
    h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
495
    transpose4x4(pix-2, trans, stride, 8);
496
    transpose4x4(pix-2+4*stride, trans+4, stride, 8);
497
}
498
499
// p0 = (p0 + q1 + 2*p1 + 2) >> 2
500
#define H264_FILTER_CHROMA4(p0, p1, q1, one) \
501
    "movq    "#p0", %%mm4  \n\t"\
502
    "pxor    "#q1", %%mm4  \n\t"\
503
    "pand   "#one", %%mm4  \n\t" /* mm4 = (p0^q1)&1 */\
504
    "pavgb   "#q1", "#p0"  \n\t"\
505
    "psubusb %%mm4, "#p0"  \n\t"\
506
    "pavgb   "#p1", "#p0"  \n\t" /* dst = avg(p1, avg(p0,q1) - ((p0^q1)&1)) */\
507
508
static inline void h264_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha1, int beta1)
509
{
510
    asm volatile(
511
        "movq    (%0),    %%mm0     \n\t"
512
        "movq    (%0,%2), %%mm1     \n\t"
513
        "movq    (%1),    %%mm2     \n\t"
514
        "movq    (%1,%2), %%mm3     \n\t"
515
        H264_DEBLOCK_MASK(%3, %4)
516
        "movq    %%mm1,   %%mm5     \n\t"
517
        "movq    %%mm2,   %%mm6     \n\t"
518
        H264_FILTER_CHROMA4(%%mm1, %%mm0, %%mm3, %5) //p0'
519
        H264_FILTER_CHROMA4(%%mm2, %%mm3, %%mm0, %5) //q0'
520
        "psubb   %%mm5,   %%mm1     \n\t"
521
        "psubb   %%mm6,   %%mm2     \n\t"
522
        "pand    %%mm7,   %%mm1     \n\t"
523
        "pand    %%mm7,   %%mm2     \n\t"
524
        "paddb   %%mm5,   %%mm1     \n\t"
525
        "paddb   %%mm6,   %%mm2     \n\t"
526
        "movq    %%mm1,   (%0,%2)   \n\t"
527
        "movq    %%mm2,   (%1)      \n\t"
528
        :: "r"(pix-2*stride), "r"(pix), "r"((long)stride),
529
           "m"(alpha1), "m"(beta1), "m"(mm_bone)
530
    );
531
}
532
533
static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
534
{
535
    h264_loop_filter_chroma_intra_mmx2(pix, stride, alpha-1, beta-1);
536
}
537
538
static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
539
{
540
    //FIXME: could cut some load/stores by merging transpose with filter
541 079e61db Michael Niedermayer
    DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
542 d2bb7db1 Loren Merritt
    transpose4x4(trans, pix-2, 8, stride);
543
    transpose4x4(trans+4, pix-2+4*stride, 8, stride);
544
    h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
545
    transpose4x4(pix-2, trans, stride, 8);
546
    transpose4x4(pix-2+4*stride, trans+4, stride, 8);
547
}
548
549 3e20143e Loren Merritt
static void h264_loop_filter_strength_mmx2( int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
550
                                            int bidir, int edges, int step, int mask_mv0, int mask_mv1 ) {
551
    int dir;
552
    asm volatile(
553
        "pxor %%mm7, %%mm7 \n\t"
554
        "movq %0, %%mm6 \n\t"
555
        "movq %1, %%mm5 \n\t"
556
        "movq %2, %%mm4 \n\t"
557
        ::"m"(ff_pb_1), "m"(ff_pb_3), "m"(ff_pb_7)
558
    );
559
    // could do a special case for dir==0 && edges==1, but it only reduces the
560
    // average filter time by 1.2%
561
    for( dir=1; dir>=0; dir-- ) {
562
        const int d_idx = dir ? -8 : -1;
563
        const int mask_mv = dir ? mask_mv1 : mask_mv0;
564 079e61db Michael Niedermayer
        DECLARE_ALIGNED_8(const uint64_t, mask_dir) = dir ? 0 : 0xffffffffffffffffULL;
565 3e20143e Loren Merritt
        int b_idx, edge, l;
566
        for( b_idx=12, edge=0; edge<edges; edge+=step, b_idx+=8*step ) {
567
            asm volatile(
568
                "pand %0, %%mm0 \n\t"
569
                ::"m"(mask_dir)
570
            );
571
            if(!(mask_mv & edge)) {
572
                asm volatile("pxor %%mm0, %%mm0 \n\t":);
573
                for( l = bidir; l >= 0; l-- ) {
574
                    asm volatile(
575
                        "movd %0, %%mm1 \n\t"
576
                        "punpckldq %1, %%mm1 \n\t"
577
                        "movq %%mm1, %%mm2 \n\t"
578
                        "psrlw $7, %%mm2 \n\t"
579
                        "pand %%mm6, %%mm2 \n\t"
580
                        "por %%mm2, %%mm1 \n\t" // ref_cache with -2 mapped to -1
581
                        "punpckldq %%mm1, %%mm2 \n\t"
582
                        "pcmpeqb %%mm2, %%mm1 \n\t"
583
                        "paddb %%mm6, %%mm1 \n\t"
584
                        "punpckhbw %%mm7, %%mm1 \n\t" // ref[b] != ref[bn]
585
                        "por %%mm1, %%mm0 \n\t"
586
587
                        "movq %2, %%mm1 \n\t"
588
                        "movq %3, %%mm2 \n\t"
589
                        "psubw %4, %%mm1 \n\t"
590
                        "psubw %5, %%mm2 \n\t"
591
                        "packsswb %%mm2, %%mm1 \n\t"
592
                        "paddb %%mm5, %%mm1 \n\t"
593
                        "pminub %%mm4, %%mm1 \n\t"
594
                        "pcmpeqb %%mm4, %%mm1 \n\t" // abs(mv[b] - mv[bn]) >= limit
595
                        "por %%mm1, %%mm0 \n\t"
596
                        ::"m"(ref[l][b_idx]),
597
                          "m"(ref[l][b_idx+d_idx]),
598
                          "m"(mv[l][b_idx][0]),
599
                          "m"(mv[l][b_idx+2][0]),
600
                          "m"(mv[l][b_idx+d_idx][0]),
601
                          "m"(mv[l][b_idx+d_idx+2][0])
602
                    );
603
                }
604
            }
605
            asm volatile(
606
                "movd %0, %%mm1 \n\t"
607
                "por  %1, %%mm1 \n\t"
608
                "punpcklbw %%mm7, %%mm1 \n\t"
609
                "pcmpgtw %%mm7, %%mm1 \n\t" // nnz[b] || nnz[bn]
610
                ::"m"(nnz[b_idx]),
611
                  "m"(nnz[b_idx+d_idx])
612
            );
613
            asm volatile(
614
                "pcmpeqw %%mm7, %%mm0 \n\t"
615
                "pcmpeqw %%mm7, %%mm0 \n\t"
616
                "psrlw $15, %%mm0 \n\t" // nonzero -> 1
617
                "psrlw $14, %%mm1 \n\t"
618
                "movq %%mm0, %%mm2 \n\t"
619
                "por %%mm1, %%mm2 \n\t"
620
                "psrlw $1, %%mm1 \n\t"
621
                "pandn %%mm2, %%mm1 \n\t"
622
                "movq %%mm1, %0 \n\t"
623
                :"=m"(*bS[dir][edge])
624
                ::"memory"
625
            );
626
        }
627
        edges = 4;
628
        step = 1;
629
    }
630
    asm volatile(
631
        "movq   (%0), %%mm0 \n\t"
632
        "movq  8(%0), %%mm1 \n\t"
633
        "movq 16(%0), %%mm2 \n\t"
634
        "movq 24(%0), %%mm3 \n\t"
635
        TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4)
636
        "movq %%mm0,   (%0) \n\t"
637
        "movq %%mm3,  8(%0) \n\t"
638
        "movq %%mm4, 16(%0) \n\t"
639
        "movq %%mm2, 24(%0) \n\t"
640
        ::"r"(bS[0])
641
        :"memory"
642
    );
643
}
644 d2bb7db1 Loren Merritt
645
/***********************************/
646
/* motion compensation */
647
648
#define QPEL_H264V(A,B,C,D,E,F,OP)\
649 bb270c08 Diego Biurrun
        "movd (%0), "#F"            \n\t"\
650
        "movq "#C", %%mm6           \n\t"\
651
        "paddw "#D", %%mm6          \n\t"\
652
        "psllw $2, %%mm6            \n\t"\
653
        "psubw "#B", %%mm6          \n\t"\
654
        "psubw "#E", %%mm6          \n\t"\
655
        "pmullw %4, %%mm6           \n\t"\
656
        "add %2, %0                 \n\t"\
657
        "punpcklbw %%mm7, "#F"      \n\t"\
658
        "paddw %5, "#A"             \n\t"\
659
        "paddw "#F", "#A"           \n\t"\
660
        "paddw "#A", %%mm6          \n\t"\
661
        "psraw $5, %%mm6            \n\t"\
662
        "packuswb %%mm6, %%mm6      \n\t"\
663 d2bb7db1 Loren Merritt
        OP(%%mm6, (%1), A, d)\
664 bb270c08 Diego Biurrun
        "add %3, %1                 \n\t"
665 d2bb7db1 Loren Merritt
666
#define QPEL_H264HV(A,B,C,D,E,F,OF)\
667 bb270c08 Diego Biurrun
        "movd (%0), "#F"            \n\t"\
668
        "movq "#C", %%mm6           \n\t"\
669
        "paddw "#D", %%mm6          \n\t"\
670
        "psllw $2, %%mm6            \n\t"\
671
        "psubw "#B", %%mm6          \n\t"\
672
        "psubw "#E", %%mm6          \n\t"\
673
        "pmullw %3, %%mm6           \n\t"\
674
        "add %2, %0                 \n\t"\
675
        "punpcklbw %%mm7, "#F"      \n\t"\
676
        "paddw "#F", "#A"           \n\t"\
677
        "paddw "#A", %%mm6          \n\t"\
678
        "movq %%mm6, "#OF"(%1)      \n\t"
679 115329f1 Diego Biurrun
680 d2bb7db1 Loren Merritt
#define QPEL_H264(OPNAME, OP, MMX)\
681 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel4_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
682 d2bb7db1 Loren Merritt
    int h=4;\
683
\
684
    asm volatile(\
685 bb270c08 Diego Biurrun
        "pxor %%mm7, %%mm7          \n\t"\
686
        "movq %5, %%mm4             \n\t"\
687
        "movq %6, %%mm5             \n\t"\
688
        "1:                         \n\t"\
689
        "movd  -1(%0), %%mm1        \n\t"\
690
        "movd    (%0), %%mm2        \n\t"\
691
        "movd   1(%0), %%mm3        \n\t"\
692
        "movd   2(%0), %%mm0        \n\t"\
693
        "punpcklbw %%mm7, %%mm1     \n\t"\
694
        "punpcklbw %%mm7, %%mm2     \n\t"\
695
        "punpcklbw %%mm7, %%mm3     \n\t"\
696
        "punpcklbw %%mm7, %%mm0     \n\t"\
697
        "paddw %%mm0, %%mm1         \n\t"\
698
        "paddw %%mm3, %%mm2         \n\t"\
699
        "movd  -2(%0), %%mm0        \n\t"\
700
        "movd   3(%0), %%mm3        \n\t"\
701
        "punpcklbw %%mm7, %%mm0     \n\t"\
702
        "punpcklbw %%mm7, %%mm3     \n\t"\
703
        "paddw %%mm3, %%mm0         \n\t"\
704
        "psllw $2, %%mm2            \n\t"\
705
        "psubw %%mm1, %%mm2         \n\t"\
706
        "pmullw %%mm4, %%mm2        \n\t"\
707
        "paddw %%mm5, %%mm0         \n\t"\
708
        "paddw %%mm2, %%mm0         \n\t"\
709
        "psraw $5, %%mm0            \n\t"\
710
        "packuswb %%mm0, %%mm0      \n\t"\
711 d2bb7db1 Loren Merritt
        OP(%%mm0, (%1),%%mm6, d)\
712 bb270c08 Diego Biurrun
        "add %3, %0                 \n\t"\
713
        "add %4, %1                 \n\t"\
714
        "decl %2                    \n\t"\
715
        " jnz 1b                    \n\t"\
716 d2bb7db1 Loren Merritt
        : "+a"(src), "+c"(dst), "+m"(h)\
717
        : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
718
        : "memory"\
719
    );\
720
}\
721 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel4_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
722 6a8eb0f4 Loren Merritt
    int h=4;\
723
    asm volatile(\
724
        "pxor %%mm7, %%mm7          \n\t"\
725 d84f7c61 Loren Merritt
        "movq %0, %%mm4             \n\t"\
726
        "movq %1, %%mm5             \n\t"\
727
        :: "m"(ff_pw_5), "m"(ff_pw_16)\
728
    );\
729
    do{\
730
    asm volatile(\
731 6a8eb0f4 Loren Merritt
        "movd  -1(%0), %%mm1        \n\t"\
732
        "movd    (%0), %%mm2        \n\t"\
733
        "movd   1(%0), %%mm3        \n\t"\
734
        "movd   2(%0), %%mm0        \n\t"\
735
        "punpcklbw %%mm7, %%mm1     \n\t"\
736
        "punpcklbw %%mm7, %%mm2     \n\t"\
737
        "punpcklbw %%mm7, %%mm3     \n\t"\
738
        "punpcklbw %%mm7, %%mm0     \n\t"\
739
        "paddw %%mm0, %%mm1         \n\t"\
740
        "paddw %%mm3, %%mm2         \n\t"\
741
        "movd  -2(%0), %%mm0        \n\t"\
742
        "movd   3(%0), %%mm3        \n\t"\
743
        "punpcklbw %%mm7, %%mm0     \n\t"\
744
        "punpcklbw %%mm7, %%mm3     \n\t"\
745
        "paddw %%mm3, %%mm0         \n\t"\
746
        "psllw $2, %%mm2            \n\t"\
747
        "psubw %%mm1, %%mm2         \n\t"\
748
        "pmullw %%mm4, %%mm2        \n\t"\
749
        "paddw %%mm5, %%mm0         \n\t"\
750
        "paddw %%mm2, %%mm0         \n\t"\
751
        "movd   (%2), %%mm3         \n\t"\
752
        "psraw $5, %%mm0            \n\t"\
753
        "packuswb %%mm0, %%mm0      \n\t"\
754
        PAVGB" %%mm3, %%mm0         \n\t"\
755
        OP(%%mm0, (%1),%%mm6, d)\
756 d84f7c61 Loren Merritt
        "add %4, %0                 \n\t"\
757
        "add %4, %1                 \n\t"\
758
        "add %3, %2                 \n\t"\
759
        : "+a"(src), "+c"(dst), "+d"(src2)\
760
        : "D"((long)src2Stride), "S"((long)dstStride)\
761 6a8eb0f4 Loren Merritt
        : "memory"\
762
    );\
763 d84f7c61 Loren Merritt
    }while(--h);\
764 6a8eb0f4 Loren Merritt
}\
765 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel4_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
766 d2bb7db1 Loren Merritt
    src -= 2*srcStride;\
767
    asm volatile(\
768 bb270c08 Diego Biurrun
        "pxor %%mm7, %%mm7          \n\t"\
769
        "movd (%0), %%mm0           \n\t"\
770
        "add %2, %0                 \n\t"\
771
        "movd (%0), %%mm1           \n\t"\
772
        "add %2, %0                 \n\t"\
773
        "movd (%0), %%mm2           \n\t"\
774
        "add %2, %0                 \n\t"\
775
        "movd (%0), %%mm3           \n\t"\
776
        "add %2, %0                 \n\t"\
777
        "movd (%0), %%mm4           \n\t"\
778
        "add %2, %0                 \n\t"\
779
        "punpcklbw %%mm7, %%mm0     \n\t"\
780
        "punpcklbw %%mm7, %%mm1     \n\t"\
781
        "punpcklbw %%mm7, %%mm2     \n\t"\
782
        "punpcklbw %%mm7, %%mm3     \n\t"\
783
        "punpcklbw %%mm7, %%mm4     \n\t"\
784 d2bb7db1 Loren Merritt
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
785
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
786
        QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
787
        QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
788
         \
789
        : "+a"(src), "+c"(dst)\
790
        : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
791
        : "memory"\
792
    );\
793
}\
794 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel4_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
795 d2bb7db1 Loren Merritt
    int h=4;\
796
    int w=3;\
797
    src -= 2*srcStride+2;\
798
    while(w--){\
799
        asm volatile(\
800 bb270c08 Diego Biurrun
            "pxor %%mm7, %%mm7      \n\t"\
801
            "movd (%0), %%mm0       \n\t"\
802
            "add %2, %0             \n\t"\
803
            "movd (%0), %%mm1       \n\t"\
804
            "add %2, %0             \n\t"\
805
            "movd (%0), %%mm2       \n\t"\
806
            "add %2, %0             \n\t"\
807
            "movd (%0), %%mm3       \n\t"\
808
            "add %2, %0             \n\t"\
809
            "movd (%0), %%mm4       \n\t"\
810
            "add %2, %0             \n\t"\
811
            "punpcklbw %%mm7, %%mm0 \n\t"\
812
            "punpcklbw %%mm7, %%mm1 \n\t"\
813
            "punpcklbw %%mm7, %%mm2 \n\t"\
814
            "punpcklbw %%mm7, %%mm3 \n\t"\
815
            "punpcklbw %%mm7, %%mm4 \n\t"\
816 d2bb7db1 Loren Merritt
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*8*3)\
817
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*8*3)\
818
            QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*8*3)\
819
            QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*8*3)\
820
             \
821
            : "+a"(src)\
822
            : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
823
            : "memory"\
824
        );\
825
        tmp += 4;\
826
        src += 4 - 9*srcStride;\
827
    }\
828
    tmp -= 3*4;\
829
    asm volatile(\
830 bb270c08 Diego Biurrun
        "movq %4, %%mm6             \n\t"\
831
        "1:                         \n\t"\
832
        "movq     (%0), %%mm0       \n\t"\
833
        "paddw  10(%0), %%mm0       \n\t"\
834
        "movq    2(%0), %%mm1       \n\t"\
835
        "paddw   8(%0), %%mm1       \n\t"\
836
        "movq    4(%0), %%mm2       \n\t"\
837
        "paddw   6(%0), %%mm2       \n\t"\
838
        "psubw %%mm1, %%mm0         \n\t"/*a-b   (abccba)*/\
839
        "psraw $2, %%mm0            \n\t"/*(a-b)/4 */\
840
        "psubw %%mm1, %%mm0         \n\t"/*(a-b)/4-b */\
841
        "paddsw %%mm2, %%mm0        \n\t"\
842 cec93959 Loren Merritt
        "psraw $2, %%mm0            \n\t"/*((a-b)/4-b+c)/4 */\
843 bb270c08 Diego Biurrun
        "paddw %%mm6, %%mm2         \n\t"\
844 cec93959 Loren Merritt
        "paddw %%mm2, %%mm0         \n\t"/*(a-5*b+20*c)/16 +32 */\
845 bb270c08 Diego Biurrun
        "psraw $6, %%mm0            \n\t"\
846
        "packuswb %%mm0, %%mm0      \n\t"\
847 d2bb7db1 Loren Merritt
        OP(%%mm0, (%1),%%mm7, d)\
848 bb270c08 Diego Biurrun
        "add $24, %0                \n\t"\
849
        "add %3, %1                 \n\t"\
850
        "decl %2                    \n\t"\
851
        " jnz 1b                    \n\t"\
852 d2bb7db1 Loren Merritt
        : "+a"(tmp), "+c"(dst), "+m"(h)\
853
        : "S"((long)dstStride), "m"(ff_pw_32)\
854
        : "memory"\
855
    );\
856
}\
857
\
858 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel8_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
859 d2bb7db1 Loren Merritt
    int h=8;\
860
    asm volatile(\
861 bb270c08 Diego Biurrun
        "pxor %%mm7, %%mm7          \n\t"\
862
        "movq %5, %%mm6             \n\t"\
863
        "1:                         \n\t"\
864
        "movq    (%0), %%mm0        \n\t"\
865
        "movq   1(%0), %%mm2        \n\t"\
866
        "movq %%mm0, %%mm1          \n\t"\
867
        "movq %%mm2, %%mm3          \n\t"\
868
        "punpcklbw %%mm7, %%mm0     \n\t"\
869
        "punpckhbw %%mm7, %%mm1     \n\t"\
870
        "punpcklbw %%mm7, %%mm2     \n\t"\
871
        "punpckhbw %%mm7, %%mm3     \n\t"\
872
        "paddw %%mm2, %%mm0         \n\t"\
873
        "paddw %%mm3, %%mm1         \n\t"\
874
        "psllw $2, %%mm0            \n\t"\
875
        "psllw $2, %%mm1            \n\t"\
876
        "movq   -1(%0), %%mm2       \n\t"\
877
        "movq    2(%0), %%mm4       \n\t"\
878
        "movq %%mm2, %%mm3          \n\t"\
879
        "movq %%mm4, %%mm5          \n\t"\
880
        "punpcklbw %%mm7, %%mm2     \n\t"\
881
        "punpckhbw %%mm7, %%mm3     \n\t"\
882
        "punpcklbw %%mm7, %%mm4     \n\t"\
883
        "punpckhbw %%mm7, %%mm5     \n\t"\
884
        "paddw %%mm4, %%mm2         \n\t"\
885
        "paddw %%mm3, %%mm5         \n\t"\
886
        "psubw %%mm2, %%mm0         \n\t"\
887
        "psubw %%mm5, %%mm1         \n\t"\
888
        "pmullw %%mm6, %%mm0        \n\t"\
889
        "pmullw %%mm6, %%mm1        \n\t"\
890
        "movd   -2(%0), %%mm2       \n\t"\
891
        "movd    7(%0), %%mm5       \n\t"\
892
        "punpcklbw %%mm7, %%mm2     \n\t"\
893
        "punpcklbw %%mm7, %%mm5     \n\t"\
894
        "paddw %%mm3, %%mm2         \n\t"\
895
        "paddw %%mm5, %%mm4         \n\t"\
896
        "movq %6, %%mm5             \n\t"\
897
        "paddw %%mm5, %%mm2         \n\t"\
898
        "paddw %%mm5, %%mm4         \n\t"\
899
        "paddw %%mm2, %%mm0         \n\t"\
900
        "paddw %%mm4, %%mm1         \n\t"\
901
        "psraw $5, %%mm0            \n\t"\
902
        "psraw $5, %%mm1            \n\t"\
903
        "packuswb %%mm1, %%mm0      \n\t"\
904 d2bb7db1 Loren Merritt
        OP(%%mm0, (%1),%%mm5, q)\
905 bb270c08 Diego Biurrun
        "add %3, %0                 \n\t"\
906
        "add %4, %1                 \n\t"\
907
        "decl %2                    \n\t"\
908
        " jnz 1b                    \n\t"\
909 d2bb7db1 Loren Merritt
        : "+a"(src), "+c"(dst), "+m"(h)\
910
        : "d"((long)srcStride), "S"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
911
        : "memory"\
912
    );\
913
}\
914
\
915 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
916 6a8eb0f4 Loren Merritt
    int h=8;\
917
    asm volatile(\
918
        "pxor %%mm7, %%mm7          \n\t"\
919 d84f7c61 Loren Merritt
        "movq %0, %%mm6             \n\t"\
920
        :: "m"(ff_pw_5)\
921
    );\
922
    do{\
923
    asm volatile(\
924 6a8eb0f4 Loren Merritt
        "movq    (%0), %%mm0        \n\t"\
925
        "movq   1(%0), %%mm2        \n\t"\
926
        "movq %%mm0, %%mm1          \n\t"\
927
        "movq %%mm2, %%mm3          \n\t"\
928
        "punpcklbw %%mm7, %%mm0     \n\t"\
929
        "punpckhbw %%mm7, %%mm1     \n\t"\
930
        "punpcklbw %%mm7, %%mm2     \n\t"\
931
        "punpckhbw %%mm7, %%mm3     \n\t"\
932
        "paddw %%mm2, %%mm0         \n\t"\
933
        "paddw %%mm3, %%mm1         \n\t"\
934
        "psllw $2, %%mm0            \n\t"\
935
        "psllw $2, %%mm1            \n\t"\
936
        "movq   -1(%0), %%mm2       \n\t"\
937
        "movq    2(%0), %%mm4       \n\t"\
938
        "movq %%mm2, %%mm3          \n\t"\
939
        "movq %%mm4, %%mm5          \n\t"\
940
        "punpcklbw %%mm7, %%mm2     \n\t"\
941
        "punpckhbw %%mm7, %%mm3     \n\t"\
942
        "punpcklbw %%mm7, %%mm4     \n\t"\
943
        "punpckhbw %%mm7, %%mm5     \n\t"\
944
        "paddw %%mm4, %%mm2         \n\t"\
945
        "paddw %%mm3, %%mm5         \n\t"\
946
        "psubw %%mm2, %%mm0         \n\t"\
947
        "psubw %%mm5, %%mm1         \n\t"\
948
        "pmullw %%mm6, %%mm0        \n\t"\
949
        "pmullw %%mm6, %%mm1        \n\t"\
950
        "movd   -2(%0), %%mm2       \n\t"\
951
        "movd    7(%0), %%mm5       \n\t"\
952
        "punpcklbw %%mm7, %%mm2     \n\t"\
953
        "punpcklbw %%mm7, %%mm5     \n\t"\
954
        "paddw %%mm3, %%mm2         \n\t"\
955
        "paddw %%mm5, %%mm4         \n\t"\
956 d84f7c61 Loren Merritt
        "movq %5, %%mm5             \n\t"\
957 6a8eb0f4 Loren Merritt
        "paddw %%mm5, %%mm2         \n\t"\
958
        "paddw %%mm5, %%mm4         \n\t"\
959
        "paddw %%mm2, %%mm0         \n\t"\
960
        "paddw %%mm4, %%mm1         \n\t"\
961
        "psraw $5, %%mm0            \n\t"\
962
        "psraw $5, %%mm1            \n\t"\
963
        "movq (%2), %%mm4           \n\t"\
964
        "packuswb %%mm1, %%mm0      \n\t"\
965
        PAVGB" %%mm4, %%mm0         \n\t"\
966
        OP(%%mm0, (%1),%%mm5, q)\
967 d84f7c61 Loren Merritt
        "add %4, %0                 \n\t"\
968
        "add %4, %1                 \n\t"\
969
        "add %3, %2                 \n\t"\
970
        : "+a"(src), "+c"(dst), "+d"(src2)\
971 6a8eb0f4 Loren Merritt
        : "D"((long)src2Stride), "S"((long)dstStride),\
972 d84f7c61 Loren Merritt
          "m"(ff_pw_16)\
973 6a8eb0f4 Loren Merritt
        : "memory"\
974
    );\
975 d84f7c61 Loren Merritt
    }while(--h);\
976 6a8eb0f4 Loren Merritt
}\
977
\
978 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
979 c03ce51d Loren Merritt
    int w= 2;\
980 d2bb7db1 Loren Merritt
    src -= 2*srcStride;\
981
    \
982 c03ce51d Loren Merritt
    while(w--){\
983 d2bb7db1 Loren Merritt
      asm volatile(\
984 bb270c08 Diego Biurrun
        "pxor %%mm7, %%mm7          \n\t"\
985
        "movd (%0), %%mm0           \n\t"\
986
        "add %2, %0                 \n\t"\
987
        "movd (%0), %%mm1           \n\t"\
988
        "add %2, %0                 \n\t"\
989
        "movd (%0), %%mm2           \n\t"\
990
        "add %2, %0                 \n\t"\
991
        "movd (%0), %%mm3           \n\t"\
992
        "add %2, %0                 \n\t"\
993
        "movd (%0), %%mm4           \n\t"\
994
        "add %2, %0                 \n\t"\
995
        "punpcklbw %%mm7, %%mm0     \n\t"\
996
        "punpcklbw %%mm7, %%mm1     \n\t"\
997
        "punpcklbw %%mm7, %%mm2     \n\t"\
998
        "punpcklbw %%mm7, %%mm3     \n\t"\
999
        "punpcklbw %%mm7, %%mm4     \n\t"\
1000 d2bb7db1 Loren Merritt
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1001
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1002
        QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1003
        QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1004
        QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1005
        QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1006
        QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1007
        QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1008
         \
1009
        : "+a"(src), "+c"(dst)\
1010
        : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1011
        : "memory"\
1012
     );\
1013 c03ce51d Loren Merritt
     if(h==16){\
1014
        asm volatile(\
1015
            QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1016
            QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1017
            QPEL_H264V(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, OP)\
1018
            QPEL_H264V(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, OP)\
1019
            QPEL_H264V(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, OP)\
1020
            QPEL_H264V(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, OP)\
1021
            QPEL_H264V(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, OP)\
1022
            QPEL_H264V(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, OP)\
1023
            \
1024
           : "+a"(src), "+c"(dst)\
1025
           : "S"((long)srcStride), "D"((long)dstStride), "m"(ff_pw_5), "m"(ff_pw_16)\
1026
           : "memory"\
1027
        );\
1028
     }\
1029
     src += 4-(h+5)*srcStride;\
1030
     dst += 4-h*dstStride;\
1031 d2bb7db1 Loren Merritt
   }\
1032
}\
1033 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride, int size){\
1034 0331f092 Loren Merritt
    int h = size;\
1035
    int w = (size+8)>>2;\
1036 d2bb7db1 Loren Merritt
    src -= 2*srcStride+2;\
1037
    while(w--){\
1038
        asm volatile(\
1039 bb270c08 Diego Biurrun
            "pxor %%mm7, %%mm7      \n\t"\
1040
            "movd (%0), %%mm0       \n\t"\
1041
            "add %2, %0             \n\t"\
1042
            "movd (%0), %%mm1       \n\t"\
1043
            "add %2, %0             \n\t"\
1044
            "movd (%0), %%mm2       \n\t"\
1045
            "add %2, %0             \n\t"\
1046
            "movd (%0), %%mm3       \n\t"\
1047
            "add %2, %0             \n\t"\
1048
            "movd (%0), %%mm4       \n\t"\
1049
            "add %2, %0             \n\t"\
1050
            "punpcklbw %%mm7, %%mm0 \n\t"\
1051
            "punpcklbw %%mm7, %%mm1 \n\t"\
1052
            "punpcklbw %%mm7, %%mm2 \n\t"\
1053
            "punpcklbw %%mm7, %%mm3 \n\t"\
1054
            "punpcklbw %%mm7, %%mm4 \n\t"\
1055 0331f092 Loren Merritt
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 0*48)\
1056
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 1*48)\
1057
            QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 2*48)\
1058
            QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 3*48)\
1059
            QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 4*48)\
1060
            QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 5*48)\
1061
            QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 6*48)\
1062
            QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 7*48)\
1063 d2bb7db1 Loren Merritt
            : "+a"(src)\
1064
            : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1065
            : "memory"\
1066
        );\
1067 0331f092 Loren Merritt
        if(size==16){\
1068
            asm volatile(\
1069
                QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1,  8*48)\
1070
                QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2,  9*48)\
1071
                QPEL_H264HV(%%mm4, %%mm5, %%mm0, %%mm1, %%mm2, %%mm3, 10*48)\
1072
                QPEL_H264HV(%%mm5, %%mm0, %%mm1, %%mm2, %%mm3, %%mm4, 11*48)\
1073
                QPEL_H264HV(%%mm0, %%mm1, %%mm2, %%mm3, %%mm4, %%mm5, 12*48)\
1074
                QPEL_H264HV(%%mm1, %%mm2, %%mm3, %%mm4, %%mm5, %%mm0, 13*48)\
1075
                QPEL_H264HV(%%mm2, %%mm3, %%mm4, %%mm5, %%mm0, %%mm1, 14*48)\
1076
                QPEL_H264HV(%%mm3, %%mm4, %%mm5, %%mm0, %%mm1, %%mm2, 15*48)\
1077
                : "+a"(src)\
1078
                : "c"(tmp), "S"((long)srcStride), "m"(ff_pw_5)\
1079
                : "memory"\
1080
            );\
1081
        }\
1082 d2bb7db1 Loren Merritt
        tmp += 4;\
1083 0331f092 Loren Merritt
        src += 4 - (size+5)*srcStride;\
1084 d2bb7db1 Loren Merritt
    }\
1085 0331f092 Loren Merritt
    tmp -= size+8;\
1086
    w = size>>4;\
1087
    do{\
1088
    h = size;\
1089 d2bb7db1 Loren Merritt
    asm volatile(\
1090 bb270c08 Diego Biurrun
        "movq %4, %%mm6             \n\t"\
1091
        "1:                         \n\t"\
1092
        "movq     (%0), %%mm0       \n\t"\
1093
        "movq    8(%0), %%mm3       \n\t"\
1094
        "movq    2(%0), %%mm1       \n\t"\
1095
        "movq   10(%0), %%mm4       \n\t"\
1096
        "paddw   %%mm4, %%mm0       \n\t"\
1097
        "paddw   %%mm3, %%mm1       \n\t"\
1098
        "paddw  18(%0), %%mm3       \n\t"\
1099
        "paddw  16(%0), %%mm4       \n\t"\
1100
        "movq    4(%0), %%mm2       \n\t"\
1101
        "movq   12(%0), %%mm5       \n\t"\
1102
        "paddw   6(%0), %%mm2       \n\t"\
1103
        "paddw  14(%0), %%mm5       \n\t"\
1104
        "psubw %%mm1, %%mm0         \n\t"\
1105
        "psubw %%mm4, %%mm3         \n\t"\
1106
        "psraw $2, %%mm0            \n\t"\
1107
        "psraw $2, %%mm3            \n\t"\
1108
        "psubw %%mm1, %%mm0         \n\t"\
1109
        "psubw %%mm4, %%mm3         \n\t"\
1110
        "paddsw %%mm2, %%mm0        \n\t"\
1111
        "paddsw %%mm5, %%mm3        \n\t"\
1112
        "psraw $2, %%mm0            \n\t"\
1113
        "psraw $2, %%mm3            \n\t"\
1114
        "paddw %%mm6, %%mm2         \n\t"\
1115
        "paddw %%mm6, %%mm5         \n\t"\
1116
        "paddw %%mm2, %%mm0         \n\t"\
1117
        "paddw %%mm5, %%mm3         \n\t"\
1118
        "psraw $6, %%mm0            \n\t"\
1119
        "psraw $6, %%mm3            \n\t"\
1120
        "packuswb %%mm3, %%mm0      \n\t"\
1121 d2bb7db1 Loren Merritt
        OP(%%mm0, (%1),%%mm7, q)\
1122 0331f092 Loren Merritt
        "add $48, %0                \n\t"\
1123 bb270c08 Diego Biurrun
        "add %3, %1                 \n\t"\
1124
        "decl %2                    \n\t"\
1125
        " jnz 1b                    \n\t"\
1126 d2bb7db1 Loren Merritt
        : "+a"(tmp), "+c"(dst), "+m"(h)\
1127
        : "S"((long)dstStride), "m"(ff_pw_32)\
1128
        : "memory"\
1129
    );\
1130 0331f092 Loren Merritt
    tmp += 8 - size*24;\
1131
    dst += 8 - size*dstStride;\
1132
    }while(w--);\
1133 d2bb7db1 Loren Merritt
}\
1134 c03ce51d Loren Merritt
\
1135
static void OPNAME ## h264_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1136
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst  , src  , dstStride, srcStride, 8);\
1137
}\
1138 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1139 c03ce51d Loren Merritt
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst  , src  , dstStride, srcStride, 16);\
1140
    OPNAME ## h264_qpel8or16_v_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride, 16);\
1141 d2bb7db1 Loren Merritt
}\
1142
\
1143 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel16_h_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1144 d2bb7db1 Loren Merritt
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst  , src  , dstStride, srcStride);\
1145
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1146
    src += 8*srcStride;\
1147
    dst += 8*dstStride;\
1148
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst  , src  , dstStride, srcStride);\
1149
    OPNAME ## h264_qpel8_h_lowpass_ ## MMX(dst+8, src+8, dstStride, srcStride);\
1150
}\
1151
\
1152 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## h264_qpel16_h_lowpass_l2_ ## MMX(uint8_t *dst, uint8_t *src, uint8_t *src2, int dstStride, int src2Stride){\
1153 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst  , src  , src2  , dstStride, src2Stride);\
1154
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1155
    src += 8*dstStride;\
1156
    dst += 8*dstStride;\
1157
    src2 += 8*src2Stride;\
1158
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst  , src  , src2  , dstStride, src2Stride);\
1159
    OPNAME ## h264_qpel8_h_lowpass_l2_ ## MMX(dst+8, src+8, src2+8, dstStride, src2Stride);\
1160
}\
1161
\
1162 0331f092 Loren Merritt
static void OPNAME ## h264_qpel8_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1163
    OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst  , tmp  , src  , dstStride, tmpStride, srcStride, 8);\
1164
}\
1165
\
1166 d2bb7db1 Loren Merritt
static void OPNAME ## h264_qpel16_hv_lowpass_ ## MMX(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
1167 0331f092 Loren Merritt
    OPNAME ## h264_qpel8or16_hv_lowpass_ ## MMX(dst  , tmp  , src  , dstStride, tmpStride, srcStride, 16);\
1168 d2bb7db1 Loren Merritt
}\
1169 e545f375 Loren Merritt
\
1170 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## pixels4_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1171 e545f375 Loren Merritt
{\
1172
    asm volatile(\
1173
        "movq       %5,  %%mm6          \n\t"\
1174
        "movq      (%1), %%mm0          \n\t"\
1175
        "movq    24(%1), %%mm1          \n\t"\
1176
        "paddw    %%mm6, %%mm0          \n\t"\
1177
        "paddw    %%mm6, %%mm1          \n\t"\
1178 4e295993 Loren Merritt
        "psraw      $5,  %%mm0          \n\t"\
1179
        "psraw      $5,  %%mm1          \n\t"\
1180 7a5b2fa8 Loren Merritt
        "packuswb %%mm0, %%mm0          \n\t"\
1181
        "packuswb %%mm1, %%mm1          \n\t"\
1182 e545f375 Loren Merritt
        PAVGB"     (%0), %%mm0          \n\t"\
1183
        PAVGB"  (%0,%3), %%mm1          \n\t"\
1184
        OP(%%mm0, (%2),    %%mm4, d)\
1185
        OP(%%mm1, (%2,%4), %%mm5, d)\
1186
        "lea  (%0,%3,2), %0             \n\t"\
1187
        "lea  (%2,%4,2), %2             \n\t"\
1188
        "movq    48(%1), %%mm0          \n\t"\
1189
        "movq    72(%1), %%mm1          \n\t"\
1190
        "paddw    %%mm6, %%mm0          \n\t"\
1191
        "paddw    %%mm6, %%mm1          \n\t"\
1192 4e295993 Loren Merritt
        "psraw      $5,  %%mm0          \n\t"\
1193
        "psraw      $5,  %%mm1          \n\t"\
1194 7a5b2fa8 Loren Merritt
        "packuswb %%mm0, %%mm0          \n\t"\
1195
        "packuswb %%mm1, %%mm1          \n\t"\
1196 e545f375 Loren Merritt
        PAVGB"     (%0), %%mm0          \n\t"\
1197
        PAVGB"  (%0,%3), %%mm1          \n\t"\
1198
        OP(%%mm0, (%2),    %%mm4, d)\
1199
        OP(%%mm1, (%2,%4), %%mm5, d)\
1200
        :"+a"(src8), "+c"(src16), "+d"(dst)\
1201 6ee66973 Loren Merritt
        :"S"((long)src8Stride), "D"((long)dstStride), "m"(ff_pw_16)\
1202 e545f375 Loren Merritt
        :"memory");\
1203
}\
1204 00e210dd Michael Niedermayer
static av_noinline void OPNAME ## pixels8_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1205 e545f375 Loren Merritt
{\
1206
    asm volatile(\
1207
        "movq       %0,  %%mm6          \n\t"\
1208
        ::"m"(ff_pw_16)\
1209
        );\
1210
    while(h--){\
1211
    asm volatile(\
1212
        "movq      (%1), %%mm0          \n\t"\
1213
        "movq     8(%1), %%mm1          \n\t"\
1214
        "paddw    %%mm6, %%mm0          \n\t"\
1215
        "paddw    %%mm6, %%mm1          \n\t"\
1216 4e295993 Loren Merritt
        "psraw      $5,  %%mm0          \n\t"\
1217
        "psraw      $5,  %%mm1          \n\t"\
1218 e545f375 Loren Merritt
        "packuswb %%mm1, %%mm0          \n\t"\
1219
        PAVGB"     (%0), %%mm0          \n\t"\
1220
        OP(%%mm0, (%2), %%mm5, q)\
1221
        ::"a"(src8), "c"(src16), "d"(dst)\
1222
        :"memory");\
1223
        src8 += src8Stride;\
1224
        src16 += 24;\
1225
        dst += dstStride;\
1226
    }\
1227
}\
1228
static void OPNAME ## pixels16_l2_shift5_ ## MMX(uint8_t *dst, int16_t *src16, uint8_t *src8, int dstStride, int src8Stride, int h)\
1229
{\
1230
    OPNAME ## pixels8_l2_shift5_ ## MMX(dst  , src16  , src8  , dstStride, src8Stride, h);\
1231
    OPNAME ## pixels8_l2_shift5_ ## MMX(dst+8, src16+8, src8+8, dstStride, src8Stride, h);\
1232
}\
1233
1234 d2bb7db1 Loren Merritt
1235
#define H264_MC(OPNAME, SIZE, MMX) \
1236
static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1237
    OPNAME ## pixels ## SIZE ## _mmx(dst, src, stride, SIZE);\
1238
}\
1239
\
1240
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1241 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src, stride, stride);\
1242 d2bb7db1 Loren Merritt
}\
1243
\
1244
static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1245
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## MMX(dst, src, stride, stride);\
1246
}\
1247
\
1248
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1249 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, src+1, stride, stride);\
1250 d2bb7db1 Loren Merritt
}\
1251
\
1252
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1253
    uint64_t temp[SIZE*SIZE/8];\
1254
    uint8_t * const half= (uint8_t*)temp;\
1255
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1256
    OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, half, stride, stride, SIZE);\
1257
}\
1258
\
1259
static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1260
    OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## MMX(dst, src, stride, stride);\
1261
}\
1262
\
1263
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1264
    uint64_t temp[SIZE*SIZE/8];\
1265
    uint8_t * const half= (uint8_t*)temp;\
1266
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(half, src, SIZE, stride);\
1267
    OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, half, stride, stride, SIZE);\
1268
}\
1269
\
1270
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1271 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*SIZE/8];\
1272
    uint8_t * const halfV= (uint8_t*)temp;\
1273 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1274 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1275 d2bb7db1 Loren Merritt
}\
1276
\
1277
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1278 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*SIZE/8];\
1279
    uint8_t * const halfV= (uint8_t*)temp;\
1280 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1281 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfV, stride, SIZE);\
1282 d2bb7db1 Loren Merritt
}\
1283
\
1284
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1285 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*SIZE/8];\
1286
    uint8_t * const halfV= (uint8_t*)temp;\
1287 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src, SIZE, stride);\
1288 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1289 d2bb7db1 Loren Merritt
}\
1290
\
1291
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1292 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*SIZE/8];\
1293
    uint8_t * const halfV= (uint8_t*)temp;\
1294 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(halfV, src+1, SIZE, stride);\
1295 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfV, stride, SIZE);\
1296 d2bb7db1 Loren Merritt
}\
1297
\
1298
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1299 0331f092 Loren Merritt
    uint64_t temp[SIZE*(SIZE<8?12:24)/4];\
1300 d2bb7db1 Loren Merritt
    int16_t * const tmp= (int16_t*)temp;\
1301
    OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, tmp, src, stride, SIZE, stride);\
1302
}\
1303
\
1304
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1305 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1306
    uint8_t * const halfHV= (uint8_t*)temp;\
1307
    int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1308 2a115873 Michael Niedermayer
    assert((int)temp & 7 == 0);\
1309 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1310 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, halfHV, stride, SIZE);\
1311 d2bb7db1 Loren Merritt
}\
1312
\
1313
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1314 6a8eb0f4 Loren Merritt
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1315
    uint8_t * const halfHV= (uint8_t*)temp;\
1316
    int16_t * const tmp= ((int16_t*)temp) + SIZE*SIZE/2;\
1317 2a115873 Michael Niedermayer
    assert((int)temp & 7 == 0);\
1318 d2bb7db1 Loren Merritt
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, tmp, src, SIZE, SIZE, stride);\
1319 6a8eb0f4 Loren Merritt
    OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, halfHV, stride, SIZE);\
1320 d2bb7db1 Loren Merritt
}\
1321
\
1322
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1323 e545f375 Loren Merritt
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1324
    int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1325
    uint8_t * const halfHV= ((uint8_t*)temp);\
1326 2a115873 Michael Niedermayer
    assert((int)temp & 7 == 0);\
1327 e545f375 Loren Merritt
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1328
    OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+2, halfHV, stride, SIZE, SIZE);\
1329 d2bb7db1 Loren Merritt
}\
1330
\
1331
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1332 e545f375 Loren Merritt
    uint64_t temp[SIZE*(SIZE<8?12:24)/4 + SIZE*SIZE/8];\
1333
    int16_t * const halfV= ((int16_t*)temp) + SIZE*SIZE/2;\
1334
    uint8_t * const halfHV= ((uint8_t*)temp);\
1335 2a115873 Michael Niedermayer
    assert((int)temp & 7 == 0);\
1336 e545f375 Loren Merritt
    put_h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(halfHV, halfV, src, SIZE, SIZE, stride);\
1337
    OPNAME ## pixels ## SIZE ## _l2_shift5_ ## MMX(dst, halfV+3, halfHV, stride, SIZE, SIZE);\
1338 d2bb7db1 Loren Merritt
}\
1339
1340
1341
#define AVG_3DNOW_OP(a,b,temp, size) \
1342 bb270c08 Diego Biurrun
"mov" #size " " #b ", " #temp "   \n\t"\
1343
"pavgusb " #temp ", " #a "        \n\t"\
1344
"mov" #size " " #a ", " #b "      \n\t"
1345 d2bb7db1 Loren Merritt
#define AVG_MMX2_OP(a,b,temp, size) \
1346 bb270c08 Diego Biurrun
"mov" #size " " #b ", " #temp "   \n\t"\
1347
"pavgb " #temp ", " #a "          \n\t"\
1348
"mov" #size " " #a ", " #b "      \n\t"
1349 d2bb7db1 Loren Merritt
1350 e545f375 Loren Merritt
#define PAVGB "pavgusb"
1351 d2bb7db1 Loren Merritt
QPEL_H264(put_,       PUT_OP, 3dnow)
1352
QPEL_H264(avg_, AVG_3DNOW_OP, 3dnow)
1353 e545f375 Loren Merritt
#undef PAVGB
1354
#define PAVGB "pavgb"
1355 d2bb7db1 Loren Merritt
QPEL_H264(put_,       PUT_OP, mmx2)
1356
QPEL_H264(avg_,  AVG_MMX2_OP, mmx2)
1357 e545f375 Loren Merritt
#undef PAVGB
1358 d2bb7db1 Loren Merritt
1359
H264_MC(put_, 4, 3dnow)
1360
H264_MC(put_, 8, 3dnow)
1361
H264_MC(put_, 16,3dnow)
1362
H264_MC(avg_, 4, 3dnow)
1363
H264_MC(avg_, 8, 3dnow)
1364
H264_MC(avg_, 16,3dnow)
1365
H264_MC(put_, 4, mmx2)
1366
H264_MC(put_, 8, mmx2)
1367
H264_MC(put_, 16,mmx2)
1368
H264_MC(avg_, 4, mmx2)
1369
H264_MC(avg_, 8, mmx2)
1370
H264_MC(avg_, 16,mmx2)
1371
1372
1373
#define H264_CHROMA_OP(S,D)
1374 a6624e21 Loren Merritt
#define H264_CHROMA_OP4(S,D,T)
1375 d2bb7db1 Loren Merritt
#define H264_CHROMA_MC8_TMPL put_h264_chroma_mc8_mmx
1376 a6624e21 Loren Merritt
#define H264_CHROMA_MC4_TMPL put_h264_chroma_mc4_mmx
1377 fdd30579 Loren Merritt
#define H264_CHROMA_MC2_TMPL put_h264_chroma_mc2_mmx2
1378 a6624e21 Loren Merritt
#define H264_CHROMA_MC8_MV0 put_pixels8_mmx
1379 d2bb7db1 Loren Merritt
#include "dsputil_h264_template_mmx.c"
1380
#undef H264_CHROMA_OP
1381 a6624e21 Loren Merritt
#undef H264_CHROMA_OP4
1382 d2bb7db1 Loren Merritt
#undef H264_CHROMA_MC8_TMPL
1383 a6624e21 Loren Merritt
#undef H264_CHROMA_MC4_TMPL
1384 fdd30579 Loren Merritt
#undef H264_CHROMA_MC2_TMPL
1385 a6624e21 Loren Merritt
#undef H264_CHROMA_MC8_MV0
1386 d2bb7db1 Loren Merritt
1387
#define H264_CHROMA_OP(S,D) "pavgb " #S ", " #D " \n\t"
1388 a6624e21 Loren Merritt
#define H264_CHROMA_OP4(S,D,T) "movd  " #S ", " #T " \n\t"\
1389
                               "pavgb " #T ", " #D " \n\t"
1390 d2bb7db1 Loren Merritt
#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_mmx2
1391 a6624e21 Loren Merritt
#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_mmx2
1392 fdd30579 Loren Merritt
#define H264_CHROMA_MC2_TMPL avg_h264_chroma_mc2_mmx2
1393 a6624e21 Loren Merritt
#define H264_CHROMA_MC8_MV0 avg_pixels8_mmx2
1394 d2bb7db1 Loren Merritt
#include "dsputil_h264_template_mmx.c"
1395
#undef H264_CHROMA_OP
1396 a6624e21 Loren Merritt
#undef H264_CHROMA_OP4
1397 d2bb7db1 Loren Merritt
#undef H264_CHROMA_MC8_TMPL
1398 a6624e21 Loren Merritt
#undef H264_CHROMA_MC4_TMPL
1399 fdd30579 Loren Merritt
#undef H264_CHROMA_MC2_TMPL
1400 a6624e21 Loren Merritt
#undef H264_CHROMA_MC8_MV0
1401 d2bb7db1 Loren Merritt
1402
#define H264_CHROMA_OP(S,D) "pavgusb " #S ", " #D " \n\t"
1403 a6624e21 Loren Merritt
#define H264_CHROMA_OP4(S,D,T) "movd " #S ", " #T " \n\t"\
1404
                               "pavgusb " #T ", " #D " \n\t"
1405 d2bb7db1 Loren Merritt
#define H264_CHROMA_MC8_TMPL avg_h264_chroma_mc8_3dnow
1406 a6624e21 Loren Merritt
#define H264_CHROMA_MC4_TMPL avg_h264_chroma_mc4_3dnow
1407
#define H264_CHROMA_MC8_MV0 avg_pixels8_3dnow
1408 d2bb7db1 Loren Merritt
#include "dsputil_h264_template_mmx.c"
1409
#undef H264_CHROMA_OP
1410 a6624e21 Loren Merritt
#undef H264_CHROMA_OP4
1411 d2bb7db1 Loren Merritt
#undef H264_CHROMA_MC8_TMPL
1412 a6624e21 Loren Merritt
#undef H264_CHROMA_MC4_TMPL
1413
#undef H264_CHROMA_MC8_MV0
1414 d2bb7db1 Loren Merritt
1415 b926572a Loren Merritt
/***********************************/
1416
/* weighted prediction */
1417
1418
static inline void ff_h264_weight_WxH_mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset, int w, int h)
1419
{
1420
    int x, y;
1421
    offset <<= log2_denom;
1422
    offset += (1 << log2_denom) >> 1;
1423
    asm volatile(
1424
        "movd    %0, %%mm4        \n\t"
1425
        "movd    %1, %%mm5        \n\t"
1426
        "movd    %2, %%mm6        \n\t"
1427
        "pshufw  $0, %%mm4, %%mm4 \n\t"
1428
        "pshufw  $0, %%mm5, %%mm5 \n\t"
1429
        "pxor    %%mm7, %%mm7     \n\t"
1430
        :: "g"(weight), "g"(offset), "g"(log2_denom)
1431
    );
1432
    for(y=0; y<h; y+=2){
1433
        for(x=0; x<w; x+=4){
1434
            asm volatile(
1435
                "movd      %0,    %%mm0 \n\t"
1436
                "movd      %1,    %%mm1 \n\t"
1437
                "punpcklbw %%mm7, %%mm0 \n\t"
1438
                "punpcklbw %%mm7, %%mm1 \n\t"
1439
                "pmullw    %%mm4, %%mm0 \n\t"
1440
                "pmullw    %%mm4, %%mm1 \n\t"
1441 cec93959 Loren Merritt
                "paddsw    %%mm5, %%mm0 \n\t"
1442
                "paddsw    %%mm5, %%mm1 \n\t"
1443 b926572a Loren Merritt
                "psraw     %%mm6, %%mm0 \n\t"
1444
                "psraw     %%mm6, %%mm1 \n\t"
1445
                "packuswb  %%mm7, %%mm0 \n\t"
1446
                "packuswb  %%mm7, %%mm1 \n\t"
1447
                "movd      %%mm0, %0    \n\t"
1448
                "movd      %%mm1, %1    \n\t"
1449
                : "+m"(*(uint32_t*)(dst+x)),
1450
                  "+m"(*(uint32_t*)(dst+x+stride))
1451
            );
1452
        }
1453
        dst += 2*stride;
1454
    }
1455
}
1456
1457 e8b56208 Loren Merritt
static inline void ff_h264_biweight_WxH_mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset, int w, int h)
1458 b926572a Loren Merritt
{
1459
    int x, y;
1460 e8b56208 Loren Merritt
    offset = ((offset + 1) | 1) << log2_denom;
1461 b926572a Loren Merritt
    asm volatile(
1462
        "movd    %0, %%mm3        \n\t"
1463
        "movd    %1, %%mm4        \n\t"
1464
        "movd    %2, %%mm5        \n\t"
1465
        "movd    %3, %%mm6        \n\t"
1466
        "pshufw  $0, %%mm3, %%mm3 \n\t"
1467
        "pshufw  $0, %%mm4, %%mm4 \n\t"
1468
        "pshufw  $0, %%mm5, %%mm5 \n\t"
1469
        "pxor    %%mm7, %%mm7     \n\t"
1470
        :: "g"(weightd), "g"(weights), "g"(offset), "g"(log2_denom+1)
1471
    );
1472
    for(y=0; y<h; y++){
1473
        for(x=0; x<w; x+=4){
1474
            asm volatile(
1475
                "movd      %0,    %%mm0 \n\t"
1476
                "movd      %1,    %%mm1 \n\t"
1477
                "punpcklbw %%mm7, %%mm0 \n\t"
1478
                "punpcklbw %%mm7, %%mm1 \n\t"
1479
                "pmullw    %%mm3, %%mm0 \n\t"
1480
                "pmullw    %%mm4, %%mm1 \n\t"
1481 cec93959 Loren Merritt
                "paddsw    %%mm1, %%mm0 \n\t"
1482
                "paddsw    %%mm5, %%mm0 \n\t"
1483 b926572a Loren Merritt
                "psraw     %%mm6, %%mm0 \n\t"
1484
                "packuswb  %%mm0, %%mm0 \n\t"
1485
                "movd      %%mm0, %0    \n\t"
1486
                : "+m"(*(uint32_t*)(dst+x))
1487
                :  "m"(*(uint32_t*)(src+x))
1488
            );
1489
        }
1490
        src += stride;
1491
        dst += stride;
1492
    }
1493
}
1494
1495
#define H264_WEIGHT(W,H) \
1496 e8b56208 Loren Merritt
static void ff_h264_biweight_ ## W ## x ## H ## _mmx2(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \
1497
    ff_h264_biweight_WxH_mmx2(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \
1498 b926572a Loren Merritt
} \
1499
static void ff_h264_weight_ ## W ## x ## H ## _mmx2(uint8_t *dst, int stride, int log2_denom, int weight, int offset){ \
1500
    ff_h264_weight_WxH_mmx2(dst, stride, log2_denom, weight, offset, W, H); \
1501
}
1502
1503
H264_WEIGHT(16,16)
1504
H264_WEIGHT(16, 8)
1505
H264_WEIGHT( 8,16)
1506
H264_WEIGHT( 8, 8)
1507
H264_WEIGHT( 8, 4)
1508
H264_WEIGHT( 4, 8)
1509
H264_WEIGHT( 4, 4)
1510
H264_WEIGHT( 4, 2)