Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / dsputil_mmx.c @ c21c835b

History | View | Annotate | Download (125 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 *
22
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23
 */
24

    
25
#include "libavutil/x86_cpu.h"
26
#include "libavcodec/dsputil.h"
27
#include "libavcodec/h263.h"
28
#include "libavcodec/mpegvideo.h"
29
#include "libavcodec/simple_idct.h"
30
#include "dsputil_mmx.h"
31
#include "vp3dsp_mmx.h"
32
#include "vp3dsp_sse2.h"
33
#include "vp6dsp_mmx.h"
34
#include "vp6dsp_sse2.h"
35
#include "idct_xvid.h"
36

    
37
//#undef NDEBUG
38
//#include <assert.h>
39

    
40
int mm_flags; /* multimedia extension flags */
41

    
42
/* pixel operations */
43
DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
44
DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
45

    
46
DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
47
{0x8000000080000000ULL, 0x8000000080000000ULL};
48

    
49
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3  ) = 0x0003000300030003ULL;
50
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4  ) = 0x0004000400040004ULL;
51
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
52
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
53
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
54
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
55
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
56
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
57
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
58
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
59
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
60
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
61
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
62
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
63

    
64
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1  ) = 0x0101010101010101ULL;
65
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3  ) = 0x0303030303030303ULL;
66
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
67
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
68
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
69
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
70
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
71
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
72

    
73
DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
74
DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
75

    
76
#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
77
#define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
78

    
79
#define MOVQ_BFE(regd) \
80
    __asm__ volatile ( \
81
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
82
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
83

    
84
#ifndef PIC
85
#define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
86
#define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
87
#else
88
// for shared library it's better to use this way for accessing constants
89
// pcmpeqd -> -1
90
#define MOVQ_BONE(regd) \
91
    __asm__ volatile ( \
92
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
93
    "psrlw $15, %%" #regd " \n\t" \
94
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
95

    
96
#define MOVQ_WTWO(regd) \
97
    __asm__ volatile ( \
98
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
99
    "psrlw $15, %%" #regd " \n\t" \
100
    "psllw $1, %%" #regd " \n\t"::)
101

    
102
#endif
103

    
104
// using regr as temporary and for the output result
105
// first argument is unmodifed and second is trashed
106
// regfe is supposed to contain 0xfefefefefefefefe
107
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
108
    "movq " #rega ", " #regr "  \n\t"\
109
    "pand " #regb ", " #regr "  \n\t"\
110
    "pxor " #rega ", " #regb "  \n\t"\
111
    "pand " #regfe "," #regb "  \n\t"\
112
    "psrlq $1, " #regb "        \n\t"\
113
    "paddb " #regb ", " #regr " \n\t"
114

    
115
#define PAVGB_MMX(rega, regb, regr, regfe) \
116
    "movq " #rega ", " #regr "  \n\t"\
117
    "por  " #regb ", " #regr "  \n\t"\
118
    "pxor " #rega ", " #regb "  \n\t"\
119
    "pand " #regfe "," #regb "  \n\t"\
120
    "psrlq $1, " #regb "        \n\t"\
121
    "psubb " #regb ", " #regr " \n\t"
122

    
123
// mm6 is supposed to contain 0xfefefefefefefefe
124
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
125
    "movq " #rega ", " #regr "  \n\t"\
126
    "movq " #regc ", " #regp "  \n\t"\
127
    "pand " #regb ", " #regr "  \n\t"\
128
    "pand " #regd ", " #regp "  \n\t"\
129
    "pxor " #rega ", " #regb "  \n\t"\
130
    "pxor " #regc ", " #regd "  \n\t"\
131
    "pand %%mm6, " #regb "      \n\t"\
132
    "pand %%mm6, " #regd "      \n\t"\
133
    "psrlq $1, " #regb "        \n\t"\
134
    "psrlq $1, " #regd "        \n\t"\
135
    "paddb " #regb ", " #regr " \n\t"\
136
    "paddb " #regd ", " #regp " \n\t"
137

    
138
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
139
    "movq " #rega ", " #regr "  \n\t"\
140
    "movq " #regc ", " #regp "  \n\t"\
141
    "por  " #regb ", " #regr "  \n\t"\
142
    "por  " #regd ", " #regp "  \n\t"\
143
    "pxor " #rega ", " #regb "  \n\t"\
144
    "pxor " #regc ", " #regd "  \n\t"\
145
    "pand %%mm6, " #regb "      \n\t"\
146
    "pand %%mm6, " #regd "      \n\t"\
147
    "psrlq $1, " #regd "        \n\t"\
148
    "psrlq $1, " #regb "        \n\t"\
149
    "psubb " #regb ", " #regr " \n\t"\
150
    "psubb " #regd ", " #regp " \n\t"
151

    
152
/***********************************/
153
/* MMX no rounding */
154
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
155
#define SET_RND  MOVQ_WONE
156
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
157
#define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
158
#define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
159

    
160
#include "dsputil_mmx_rnd_template.c"
161

    
162
#undef DEF
163
#undef SET_RND
164
#undef PAVGBP
165
#undef PAVGB
166
/***********************************/
167
/* MMX rounding */
168

    
169
#define DEF(x, y) x ## _ ## y ##_mmx
170
#define SET_RND  MOVQ_WTWO
171
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
172
#define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
173

    
174
#include "dsputil_mmx_rnd_template.c"
175

    
176
#undef DEF
177
#undef SET_RND
178
#undef PAVGBP
179
#undef PAVGB
180
#undef OP_AVG
181

    
182
/***********************************/
183
/* 3Dnow specific */
184

    
185
#define DEF(x) x ## _3dnow
186
#define PAVGB "pavgusb"
187
#define OP_AVG PAVGB
188

    
189
#include "dsputil_mmx_avg_template.c"
190

    
191
#undef DEF
192
#undef PAVGB
193
#undef OP_AVG
194

    
195
/***********************************/
196
/* MMX2 specific */
197

    
198
#define DEF(x) x ## _mmx2
199

    
200
/* Introduced only in MMX2 set */
201
#define PAVGB "pavgb"
202
#define OP_AVG PAVGB
203

    
204
#include "dsputil_mmx_avg_template.c"
205

    
206
#undef DEF
207
#undef PAVGB
208
#undef OP_AVG
209

    
210
#define put_no_rnd_pixels16_mmx put_pixels16_mmx
211
#define put_no_rnd_pixels8_mmx put_pixels8_mmx
212
#define put_pixels16_mmx2 put_pixels16_mmx
213
#define put_pixels8_mmx2 put_pixels8_mmx
214
#define put_pixels4_mmx2 put_pixels4_mmx
215
#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
216
#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
217
#define put_pixels16_3dnow put_pixels16_mmx
218
#define put_pixels8_3dnow put_pixels8_mmx
219
#define put_pixels4_3dnow put_pixels4_mmx
220
#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
221
#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
222

    
223
/***********************************/
224
/* standard MMX */
225

    
226
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
227
{
228
    const DCTELEM *p;
229
    uint8_t *pix;
230

    
231
    /* read the pixels */
232
    p = block;
233
    pix = pixels;
234
    /* unrolled loop */
235
        __asm__ volatile(
236
                "movq   %3, %%mm0               \n\t"
237
                "movq   8%3, %%mm1              \n\t"
238
                "movq   16%3, %%mm2             \n\t"
239
                "movq   24%3, %%mm3             \n\t"
240
                "movq   32%3, %%mm4             \n\t"
241
                "movq   40%3, %%mm5             \n\t"
242
                "movq   48%3, %%mm6             \n\t"
243
                "movq   56%3, %%mm7             \n\t"
244
                "packuswb %%mm1, %%mm0          \n\t"
245
                "packuswb %%mm3, %%mm2          \n\t"
246
                "packuswb %%mm5, %%mm4          \n\t"
247
                "packuswb %%mm7, %%mm6          \n\t"
248
                "movq   %%mm0, (%0)             \n\t"
249
                "movq   %%mm2, (%0, %1)         \n\t"
250
                "movq   %%mm4, (%0, %1, 2)      \n\t"
251
                "movq   %%mm6, (%0, %2)         \n\t"
252
                ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
253
                :"memory");
254
        pix += line_size*4;
255
        p += 32;
256

    
257
    // if here would be an exact copy of the code above
258
    // compiler would generate some very strange code
259
    // thus using "r"
260
    __asm__ volatile(
261
            "movq       (%3), %%mm0             \n\t"
262
            "movq       8(%3), %%mm1            \n\t"
263
            "movq       16(%3), %%mm2           \n\t"
264
            "movq       24(%3), %%mm3           \n\t"
265
            "movq       32(%3), %%mm4           \n\t"
266
            "movq       40(%3), %%mm5           \n\t"
267
            "movq       48(%3), %%mm6           \n\t"
268
            "movq       56(%3), %%mm7           \n\t"
269
            "packuswb %%mm1, %%mm0              \n\t"
270
            "packuswb %%mm3, %%mm2              \n\t"
271
            "packuswb %%mm5, %%mm4              \n\t"
272
            "packuswb %%mm7, %%mm6              \n\t"
273
            "movq       %%mm0, (%0)             \n\t"
274
            "movq       %%mm2, (%0, %1)         \n\t"
275
            "movq       %%mm4, (%0, %1, 2)      \n\t"
276
            "movq       %%mm6, (%0, %2)         \n\t"
277
            ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
278
            :"memory");
279
}
280

    
281
DECLARE_ASM_CONST(8, uint8_t, ff_vector128[8]) =
282
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
283

    
284
#define put_signed_pixels_clamped_mmx_half(off) \
285
            "movq    "#off"(%2), %%mm1          \n\t"\
286
            "movq 16+"#off"(%2), %%mm2          \n\t"\
287
            "movq 32+"#off"(%2), %%mm3          \n\t"\
288
            "movq 48+"#off"(%2), %%mm4          \n\t"\
289
            "packsswb  8+"#off"(%2), %%mm1      \n\t"\
290
            "packsswb 24+"#off"(%2), %%mm2      \n\t"\
291
            "packsswb 40+"#off"(%2), %%mm3      \n\t"\
292
            "packsswb 56+"#off"(%2), %%mm4      \n\t"\
293
            "paddb %%mm0, %%mm1                 \n\t"\
294
            "paddb %%mm0, %%mm2                 \n\t"\
295
            "paddb %%mm0, %%mm3                 \n\t"\
296
            "paddb %%mm0, %%mm4                 \n\t"\
297
            "movq %%mm1, (%0)                   \n\t"\
298
            "movq %%mm2, (%0, %3)               \n\t"\
299
            "movq %%mm3, (%0, %3, 2)            \n\t"\
300
            "movq %%mm4, (%0, %1)               \n\t"
301

    
302
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
303
{
304
    x86_reg line_skip = line_size;
305
    x86_reg line_skip3;
306

    
307
    __asm__ volatile (
308
            "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
309
            "lea (%3, %3, 2), %1                \n\t"
310
            put_signed_pixels_clamped_mmx_half(0)
311
            "lea (%0, %3, 4), %0                \n\t"
312
            put_signed_pixels_clamped_mmx_half(64)
313
            :"+&r" (pixels), "=&r" (line_skip3)
314
            :"r" (block), "r"(line_skip)
315
            :"memory");
316
}
317

    
318
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
319
{
320
    const DCTELEM *p;
321
    uint8_t *pix;
322
    int i;
323

    
324
    /* read the pixels */
325
    p = block;
326
    pix = pixels;
327
    MOVQ_ZERO(mm7);
328
    i = 4;
329
    do {
330
        __asm__ volatile(
331
                "movq   (%2), %%mm0     \n\t"
332
                "movq   8(%2), %%mm1    \n\t"
333
                "movq   16(%2), %%mm2   \n\t"
334
                "movq   24(%2), %%mm3   \n\t"
335
                "movq   %0, %%mm4       \n\t"
336
                "movq   %1, %%mm6       \n\t"
337
                "movq   %%mm4, %%mm5    \n\t"
338
                "punpcklbw %%mm7, %%mm4 \n\t"
339
                "punpckhbw %%mm7, %%mm5 \n\t"
340
                "paddsw %%mm4, %%mm0    \n\t"
341
                "paddsw %%mm5, %%mm1    \n\t"
342
                "movq   %%mm6, %%mm5    \n\t"
343
                "punpcklbw %%mm7, %%mm6 \n\t"
344
                "punpckhbw %%mm7, %%mm5 \n\t"
345
                "paddsw %%mm6, %%mm2    \n\t"
346
                "paddsw %%mm5, %%mm3    \n\t"
347
                "packuswb %%mm1, %%mm0  \n\t"
348
                "packuswb %%mm3, %%mm2  \n\t"
349
                "movq   %%mm0, %0       \n\t"
350
                "movq   %%mm2, %1       \n\t"
351
                :"+m"(*pix), "+m"(*(pix+line_size))
352
                :"r"(p)
353
                :"memory");
354
        pix += line_size*2;
355
        p += 16;
356
    } while (--i);
357
}
358

    
359
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
360
{
361
    __asm__ volatile(
362
         "lea (%3, %3), %%"REG_a"       \n\t"
363
         ASMALIGN(3)
364
         "1:                            \n\t"
365
         "movd (%1), %%mm0              \n\t"
366
         "movd (%1, %3), %%mm1          \n\t"
367
         "movd %%mm0, (%2)              \n\t"
368
         "movd %%mm1, (%2, %3)          \n\t"
369
         "add %%"REG_a", %1             \n\t"
370
         "add %%"REG_a", %2             \n\t"
371
         "movd (%1), %%mm0              \n\t"
372
         "movd (%1, %3), %%mm1          \n\t"
373
         "movd %%mm0, (%2)              \n\t"
374
         "movd %%mm1, (%2, %3)          \n\t"
375
         "add %%"REG_a", %1             \n\t"
376
         "add %%"REG_a", %2             \n\t"
377
         "subl $4, %0                   \n\t"
378
         "jnz 1b                        \n\t"
379
         : "+g"(h), "+r" (pixels),  "+r" (block)
380
         : "r"((x86_reg)line_size)
381
         : "%"REG_a, "memory"
382
        );
383
}
384

    
385
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
386
{
387
    __asm__ volatile(
388
         "lea (%3, %3), %%"REG_a"       \n\t"
389
         ASMALIGN(3)
390
         "1:                            \n\t"
391
         "movq (%1), %%mm0              \n\t"
392
         "movq (%1, %3), %%mm1          \n\t"
393
         "movq %%mm0, (%2)              \n\t"
394
         "movq %%mm1, (%2, %3)          \n\t"
395
         "add %%"REG_a", %1             \n\t"
396
         "add %%"REG_a", %2             \n\t"
397
         "movq (%1), %%mm0              \n\t"
398
         "movq (%1, %3), %%mm1          \n\t"
399
         "movq %%mm0, (%2)              \n\t"
400
         "movq %%mm1, (%2, %3)          \n\t"
401
         "add %%"REG_a", %1             \n\t"
402
         "add %%"REG_a", %2             \n\t"
403
         "subl $4, %0                   \n\t"
404
         "jnz 1b                        \n\t"
405
         : "+g"(h), "+r" (pixels),  "+r" (block)
406
         : "r"((x86_reg)line_size)
407
         : "%"REG_a, "memory"
408
        );
409
}
410

    
411
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
412
{
413
    __asm__ volatile(
414
         "lea (%3, %3), %%"REG_a"       \n\t"
415
         ASMALIGN(3)
416
         "1:                            \n\t"
417
         "movq (%1), %%mm0              \n\t"
418
         "movq 8(%1), %%mm4             \n\t"
419
         "movq (%1, %3), %%mm1          \n\t"
420
         "movq 8(%1, %3), %%mm5         \n\t"
421
         "movq %%mm0, (%2)              \n\t"
422
         "movq %%mm4, 8(%2)             \n\t"
423
         "movq %%mm1, (%2, %3)          \n\t"
424
         "movq %%mm5, 8(%2, %3)         \n\t"
425
         "add %%"REG_a", %1             \n\t"
426
         "add %%"REG_a", %2             \n\t"
427
         "movq (%1), %%mm0              \n\t"
428
         "movq 8(%1), %%mm4             \n\t"
429
         "movq (%1, %3), %%mm1          \n\t"
430
         "movq 8(%1, %3), %%mm5         \n\t"
431
         "movq %%mm0, (%2)              \n\t"
432
         "movq %%mm4, 8(%2)             \n\t"
433
         "movq %%mm1, (%2, %3)          \n\t"
434
         "movq %%mm5, 8(%2, %3)         \n\t"
435
         "add %%"REG_a", %1             \n\t"
436
         "add %%"REG_a", %2             \n\t"
437
         "subl $4, %0                   \n\t"
438
         "jnz 1b                        \n\t"
439
         : "+g"(h), "+r" (pixels),  "+r" (block)
440
         : "r"((x86_reg)line_size)
441
         : "%"REG_a, "memory"
442
        );
443
}
444

    
445
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
446
{
447
    __asm__ volatile(
448
         "1:                            \n\t"
449
         "movdqu (%1), %%xmm0           \n\t"
450
         "movdqu (%1,%3), %%xmm1        \n\t"
451
         "movdqu (%1,%3,2), %%xmm2      \n\t"
452
         "movdqu (%1,%4), %%xmm3        \n\t"
453
         "movdqa %%xmm0, (%2)           \n\t"
454
         "movdqa %%xmm1, (%2,%3)        \n\t"
455
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
456
         "movdqa %%xmm3, (%2,%4)        \n\t"
457
         "subl $4, %0                   \n\t"
458
         "lea (%1,%3,4), %1             \n\t"
459
         "lea (%2,%3,4), %2             \n\t"
460
         "jnz 1b                        \n\t"
461
         : "+g"(h), "+r" (pixels),  "+r" (block)
462
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
463
         : "memory"
464
        );
465
}
466

    
467
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
468
{
469
    __asm__ volatile(
470
         "1:                            \n\t"
471
         "movdqu (%1), %%xmm0           \n\t"
472
         "movdqu (%1,%3), %%xmm1        \n\t"
473
         "movdqu (%1,%3,2), %%xmm2      \n\t"
474
         "movdqu (%1,%4), %%xmm3        \n\t"
475
         "pavgb  (%2), %%xmm0           \n\t"
476
         "pavgb  (%2,%3), %%xmm1        \n\t"
477
         "pavgb  (%2,%3,2), %%xmm2      \n\t"
478
         "pavgb  (%2,%4), %%xmm3        \n\t"
479
         "movdqa %%xmm0, (%2)           \n\t"
480
         "movdqa %%xmm1, (%2,%3)        \n\t"
481
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
482
         "movdqa %%xmm3, (%2,%4)        \n\t"
483
         "subl $4, %0                   \n\t"
484
         "lea (%1,%3,4), %1             \n\t"
485
         "lea (%2,%3,4), %2             \n\t"
486
         "jnz 1b                        \n\t"
487
         : "+g"(h), "+r" (pixels),  "+r" (block)
488
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
489
         : "memory"
490
        );
491
}
492

    
493
#define CLEAR_BLOCKS(name,n) \
494
static void name(DCTELEM *blocks)\
495
{\
496
    __asm__ volatile(\
497
                "pxor %%mm7, %%mm7              \n\t"\
498
                "mov     %1, %%"REG_a"          \n\t"\
499
                "1:                             \n\t"\
500
                "movq %%mm7, (%0, %%"REG_a")    \n\t"\
501
                "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
502
                "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
503
                "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
504
                "add $32, %%"REG_a"             \n\t"\
505
                " js 1b                         \n\t"\
506
                : : "r" (((uint8_t *)blocks)+128*n),\
507
                    "i" (-128*n)\
508
                : "%"REG_a\
509
        );\
510
}
511
CLEAR_BLOCKS(clear_blocks_mmx, 6)
512
CLEAR_BLOCKS(clear_block_mmx, 1)
513

    
514
static void clear_block_sse(DCTELEM *block)
515
{
516
    __asm__ volatile(
517
        "xorps  %%xmm0, %%xmm0  \n"
518
        "movaps %%xmm0,    (%0) \n"
519
        "movaps %%xmm0,  16(%0) \n"
520
        "movaps %%xmm0,  32(%0) \n"
521
        "movaps %%xmm0,  48(%0) \n"
522
        "movaps %%xmm0,  64(%0) \n"
523
        "movaps %%xmm0,  80(%0) \n"
524
        "movaps %%xmm0,  96(%0) \n"
525
        "movaps %%xmm0, 112(%0) \n"
526
        :: "r"(block)
527
        : "memory"
528
    );
529
}
530

    
531
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
532
    x86_reg i=0;
533
    __asm__ volatile(
534
        "jmp 2f                         \n\t"
535
        "1:                             \n\t"
536
        "movq  (%1, %0), %%mm0          \n\t"
537
        "movq  (%2, %0), %%mm1          \n\t"
538
        "paddb %%mm0, %%mm1             \n\t"
539
        "movq %%mm1, (%2, %0)           \n\t"
540
        "movq 8(%1, %0), %%mm0          \n\t"
541
        "movq 8(%2, %0), %%mm1          \n\t"
542
        "paddb %%mm0, %%mm1             \n\t"
543
        "movq %%mm1, 8(%2, %0)          \n\t"
544
        "add $16, %0                    \n\t"
545
        "2:                             \n\t"
546
        "cmp %3, %0                     \n\t"
547
        " js 1b                         \n\t"
548
        : "+r" (i)
549
        : "r"(src), "r"(dst), "r"((x86_reg)w-15)
550
    );
551
    for(; i<w; i++)
552
        dst[i+0] += src[i+0];
553
}
554

    
555
static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
556
    x86_reg i=0;
557
    __asm__ volatile(
558
        "jmp 2f                         \n\t"
559
        "1:                             \n\t"
560
        "movq   (%2, %0), %%mm0         \n\t"
561
        "movq  8(%2, %0), %%mm1         \n\t"
562
        "paddb  (%3, %0), %%mm0         \n\t"
563
        "paddb 8(%3, %0), %%mm1         \n\t"
564
        "movq %%mm0,  (%1, %0)          \n\t"
565
        "movq %%mm1, 8(%1, %0)          \n\t"
566
        "add $16, %0                    \n\t"
567
        "2:                             \n\t"
568
        "cmp %4, %0                     \n\t"
569
        " js 1b                         \n\t"
570
        : "+r" (i)
571
        : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
572
    );
573
    for(; i<w; i++)
574
        dst[i] = src1[i] + src2[i];
575
}
576

    
577
#if HAVE_7REGS && HAVE_TEN_OPERANDS
578
static void add_hfyu_median_prediction_cmov(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top) {
579
    x86_reg w2 = -w;
580
    x86_reg x;
581
    int l = *left & 0xff;
582
    int tl = *left_top & 0xff;
583
    int t;
584
    __asm__ volatile(
585
        "mov    %7, %3 \n"
586
        "1: \n"
587
        "movzx (%3,%4), %2 \n"
588
        "mov    %2, %k3 \n"
589
        "sub   %b1, %b3 \n"
590
        "add   %b0, %b3 \n"
591
        "mov    %2, %1 \n"
592
        "cmp    %0, %2 \n"
593
        "cmovg  %0, %2 \n"
594
        "cmovg  %1, %0 \n"
595
        "cmp   %k3, %0 \n"
596
        "cmovg %k3, %0 \n"
597
        "mov    %7, %3 \n"
598
        "cmp    %2, %0 \n"
599
        "cmovl  %2, %0 \n"
600
        "add (%6,%4), %b0 \n"
601
        "mov   %b0, (%5,%4) \n"
602
        "inc    %4 \n"
603
        "jl 1b \n"
604
        :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
605
        :"r"(dst+w), "r"(diff+w), "rm"(top+w)
606
    );
607
    *left = l;
608
    *left_top = tl;
609
}
610
#endif
611

    
612
#define H263_LOOP_FILTER \
613
        "pxor %%mm7, %%mm7              \n\t"\
614
        "movq  %0, %%mm0                \n\t"\
615
        "movq  %0, %%mm1                \n\t"\
616
        "movq  %3, %%mm2                \n\t"\
617
        "movq  %3, %%mm3                \n\t"\
618
        "punpcklbw %%mm7, %%mm0         \n\t"\
619
        "punpckhbw %%mm7, %%mm1         \n\t"\
620
        "punpcklbw %%mm7, %%mm2         \n\t"\
621
        "punpckhbw %%mm7, %%mm3         \n\t"\
622
        "psubw %%mm2, %%mm0             \n\t"\
623
        "psubw %%mm3, %%mm1             \n\t"\
624
        "movq  %1, %%mm2                \n\t"\
625
        "movq  %1, %%mm3                \n\t"\
626
        "movq  %2, %%mm4                \n\t"\
627
        "movq  %2, %%mm5                \n\t"\
628
        "punpcklbw %%mm7, %%mm2         \n\t"\
629
        "punpckhbw %%mm7, %%mm3         \n\t"\
630
        "punpcklbw %%mm7, %%mm4         \n\t"\
631
        "punpckhbw %%mm7, %%mm5         \n\t"\
632
        "psubw %%mm2, %%mm4             \n\t"\
633
        "psubw %%mm3, %%mm5             \n\t"\
634
        "psllw $2, %%mm4                \n\t"\
635
        "psllw $2, %%mm5                \n\t"\
636
        "paddw %%mm0, %%mm4             \n\t"\
637
        "paddw %%mm1, %%mm5             \n\t"\
638
        "pxor %%mm6, %%mm6              \n\t"\
639
        "pcmpgtw %%mm4, %%mm6           \n\t"\
640
        "pcmpgtw %%mm5, %%mm7           \n\t"\
641
        "pxor %%mm6, %%mm4              \n\t"\
642
        "pxor %%mm7, %%mm5              \n\t"\
643
        "psubw %%mm6, %%mm4             \n\t"\
644
        "psubw %%mm7, %%mm5             \n\t"\
645
        "psrlw $3, %%mm4                \n\t"\
646
        "psrlw $3, %%mm5                \n\t"\
647
        "packuswb %%mm5, %%mm4          \n\t"\
648
        "packsswb %%mm7, %%mm6          \n\t"\
649
        "pxor %%mm7, %%mm7              \n\t"\
650
        "movd %4, %%mm2                 \n\t"\
651
        "punpcklbw %%mm2, %%mm2         \n\t"\
652
        "punpcklbw %%mm2, %%mm2         \n\t"\
653
        "punpcklbw %%mm2, %%mm2         \n\t"\
654
        "psubusb %%mm4, %%mm2           \n\t"\
655
        "movq %%mm2, %%mm3              \n\t"\
656
        "psubusb %%mm4, %%mm3           \n\t"\
657
        "psubb %%mm3, %%mm2             \n\t"\
658
        "movq %1, %%mm3                 \n\t"\
659
        "movq %2, %%mm4                 \n\t"\
660
        "pxor %%mm6, %%mm3              \n\t"\
661
        "pxor %%mm6, %%mm4              \n\t"\
662
        "paddusb %%mm2, %%mm3           \n\t"\
663
        "psubusb %%mm2, %%mm4           \n\t"\
664
        "pxor %%mm6, %%mm3              \n\t"\
665
        "pxor %%mm6, %%mm4              \n\t"\
666
        "paddusb %%mm2, %%mm2           \n\t"\
667
        "packsswb %%mm1, %%mm0          \n\t"\
668
        "pcmpgtb %%mm0, %%mm7           \n\t"\
669
        "pxor %%mm7, %%mm0              \n\t"\
670
        "psubb %%mm7, %%mm0             \n\t"\
671
        "movq %%mm0, %%mm1              \n\t"\
672
        "psubusb %%mm2, %%mm0           \n\t"\
673
        "psubb %%mm0, %%mm1             \n\t"\
674
        "pand %5, %%mm1                 \n\t"\
675
        "psrlw $2, %%mm1                \n\t"\
676
        "pxor %%mm7, %%mm1              \n\t"\
677
        "psubb %%mm7, %%mm1             \n\t"\
678
        "movq %0, %%mm5                 \n\t"\
679
        "movq %3, %%mm6                 \n\t"\
680
        "psubb %%mm1, %%mm5             \n\t"\
681
        "paddb %%mm1, %%mm6             \n\t"
682

    
683
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
684
    if(CONFIG_ANY_H263) {
685
    const int strength= ff_h263_loop_filter_strength[qscale];
686

    
687
    __asm__ volatile(
688

    
689
        H263_LOOP_FILTER
690

    
691
        "movq %%mm3, %1                 \n\t"
692
        "movq %%mm4, %2                 \n\t"
693
        "movq %%mm5, %0                 \n\t"
694
        "movq %%mm6, %3                 \n\t"
695
        : "+m" (*(uint64_t*)(src - 2*stride)),
696
          "+m" (*(uint64_t*)(src - 1*stride)),
697
          "+m" (*(uint64_t*)(src + 0*stride)),
698
          "+m" (*(uint64_t*)(src + 1*stride))
699
        : "g" (2*strength), "m"(ff_pb_FC)
700
    );
701
    }
702
}
703

    
704
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
705
    __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
706
        "movd  %4, %%mm0                \n\t"
707
        "movd  %5, %%mm1                \n\t"
708
        "movd  %6, %%mm2                \n\t"
709
        "movd  %7, %%mm3                \n\t"
710
        "punpcklbw %%mm1, %%mm0         \n\t"
711
        "punpcklbw %%mm3, %%mm2         \n\t"
712
        "movq %%mm0, %%mm1              \n\t"
713
        "punpcklwd %%mm2, %%mm0         \n\t"
714
        "punpckhwd %%mm2, %%mm1         \n\t"
715
        "movd  %%mm0, %0                \n\t"
716
        "punpckhdq %%mm0, %%mm0         \n\t"
717
        "movd  %%mm0, %1                \n\t"
718
        "movd  %%mm1, %2                \n\t"
719
        "punpckhdq %%mm1, %%mm1         \n\t"
720
        "movd  %%mm1, %3                \n\t"
721

    
722
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
723
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
724
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
725
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
726
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
727
           "m" (*(uint32_t*)(src + 1*src_stride)),
728
           "m" (*(uint32_t*)(src + 2*src_stride)),
729
           "m" (*(uint32_t*)(src + 3*src_stride))
730
    );
731
}
732

    
733
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
734
    if(CONFIG_ANY_H263) {
735
    const int strength= ff_h263_loop_filter_strength[qscale];
736
    DECLARE_ALIGNED(8, uint64_t, temp[4]);
737
    uint8_t *btemp= (uint8_t*)temp;
738

    
739
    src -= 2;
740

    
741
    transpose4x4(btemp  , src           , 8, stride);
742
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
743
    __asm__ volatile(
744
        H263_LOOP_FILTER // 5 3 4 6
745

    
746
        : "+m" (temp[0]),
747
          "+m" (temp[1]),
748
          "+m" (temp[2]),
749
          "+m" (temp[3])
750
        : "g" (2*strength), "m"(ff_pb_FC)
751
    );
752

    
753
    __asm__ volatile(
754
        "movq %%mm5, %%mm1              \n\t"
755
        "movq %%mm4, %%mm0              \n\t"
756
        "punpcklbw %%mm3, %%mm5         \n\t"
757
        "punpcklbw %%mm6, %%mm4         \n\t"
758
        "punpckhbw %%mm3, %%mm1         \n\t"
759
        "punpckhbw %%mm6, %%mm0         \n\t"
760
        "movq %%mm5, %%mm3              \n\t"
761
        "movq %%mm1, %%mm6              \n\t"
762
        "punpcklwd %%mm4, %%mm5         \n\t"
763
        "punpcklwd %%mm0, %%mm1         \n\t"
764
        "punpckhwd %%mm4, %%mm3         \n\t"
765
        "punpckhwd %%mm0, %%mm6         \n\t"
766
        "movd %%mm5, (%0)               \n\t"
767
        "punpckhdq %%mm5, %%mm5         \n\t"
768
        "movd %%mm5, (%0,%2)            \n\t"
769
        "movd %%mm3, (%0,%2,2)          \n\t"
770
        "punpckhdq %%mm3, %%mm3         \n\t"
771
        "movd %%mm3, (%0,%3)            \n\t"
772
        "movd %%mm1, (%1)               \n\t"
773
        "punpckhdq %%mm1, %%mm1         \n\t"
774
        "movd %%mm1, (%1,%2)            \n\t"
775
        "movd %%mm6, (%1,%2,2)          \n\t"
776
        "punpckhdq %%mm6, %%mm6         \n\t"
777
        "movd %%mm6, (%1,%3)            \n\t"
778
        :: "r" (src),
779
           "r" (src + 4*stride),
780
           "r" ((x86_reg)   stride ),
781
           "r" ((x86_reg)(3*stride))
782
    );
783
    }
784
}
785

    
786
/* draw the edges of width 'w' of an image of size width, height
787
   this mmx version can only handle w==8 || w==16 */
788
static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
789
{
790
    uint8_t *ptr, *last_line;
791
    int i;
792

    
793
    last_line = buf + (height - 1) * wrap;
794
    /* left and right */
795
    ptr = buf;
796
    if(w==8)
797
    {
798
        __asm__ volatile(
799
                "1:                             \n\t"
800
                "movd (%0), %%mm0               \n\t"
801
                "punpcklbw %%mm0, %%mm0         \n\t"
802
                "punpcklwd %%mm0, %%mm0         \n\t"
803
                "punpckldq %%mm0, %%mm0         \n\t"
804
                "movq %%mm0, -8(%0)             \n\t"
805
                "movq -8(%0, %2), %%mm1         \n\t"
806
                "punpckhbw %%mm1, %%mm1         \n\t"
807
                "punpckhwd %%mm1, %%mm1         \n\t"
808
                "punpckhdq %%mm1, %%mm1         \n\t"
809
                "movq %%mm1, (%0, %2)           \n\t"
810
                "add %1, %0                     \n\t"
811
                "cmp %3, %0                     \n\t"
812
                " jb 1b                         \n\t"
813
                : "+r" (ptr)
814
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
815
        );
816
    }
817
    else
818
    {
819
        __asm__ volatile(
820
                "1:                             \n\t"
821
                "movd (%0), %%mm0               \n\t"
822
                "punpcklbw %%mm0, %%mm0         \n\t"
823
                "punpcklwd %%mm0, %%mm0         \n\t"
824
                "punpckldq %%mm0, %%mm0         \n\t"
825
                "movq %%mm0, -8(%0)             \n\t"
826
                "movq %%mm0, -16(%0)            \n\t"
827
                "movq -8(%0, %2), %%mm1         \n\t"
828
                "punpckhbw %%mm1, %%mm1         \n\t"
829
                "punpckhwd %%mm1, %%mm1         \n\t"
830
                "punpckhdq %%mm1, %%mm1         \n\t"
831
                "movq %%mm1, (%0, %2)           \n\t"
832
                "movq %%mm1, 8(%0, %2)          \n\t"
833
                "add %1, %0                     \n\t"
834
                "cmp %3, %0                     \n\t"
835
                " jb 1b                         \n\t"
836
                : "+r" (ptr)
837
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
838
        );
839
    }
840

    
841
    for(i=0;i<w;i+=4) {
842
        /* top and bottom (and hopefully also the corners) */
843
        ptr= buf - (i + 1) * wrap - w;
844
        __asm__ volatile(
845
                "1:                             \n\t"
846
                "movq (%1, %0), %%mm0           \n\t"
847
                "movq %%mm0, (%0)               \n\t"
848
                "movq %%mm0, (%0, %2)           \n\t"
849
                "movq %%mm0, (%0, %2, 2)        \n\t"
850
                "movq %%mm0, (%0, %3)           \n\t"
851
                "add $8, %0                     \n\t"
852
                "cmp %4, %0                     \n\t"
853
                " jb 1b                         \n\t"
854
                : "+r" (ptr)
855
                : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
856
        );
857
        ptr= last_line + (i + 1) * wrap - w;
858
        __asm__ volatile(
859
                "1:                             \n\t"
860
                "movq (%1, %0), %%mm0           \n\t"
861
                "movq %%mm0, (%0)               \n\t"
862
                "movq %%mm0, (%0, %2)           \n\t"
863
                "movq %%mm0, (%0, %2, 2)        \n\t"
864
                "movq %%mm0, (%0, %3)           \n\t"
865
                "add $8, %0                     \n\t"
866
                "cmp %4, %0                     \n\t"
867
                " jb 1b                         \n\t"
868
                : "+r" (ptr)
869
                : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
870
        );
871
    }
872
}
873

    
874
#define PAETH(cpu, abs3)\
875
static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
876
{\
877
    x86_reg i = -bpp;\
878
    x86_reg end = w-3;\
879
    __asm__ volatile(\
880
        "pxor      %%mm7, %%mm7 \n"\
881
        "movd    (%1,%0), %%mm0 \n"\
882
        "movd    (%2,%0), %%mm1 \n"\
883
        "punpcklbw %%mm7, %%mm0 \n"\
884
        "punpcklbw %%mm7, %%mm1 \n"\
885
        "add       %4, %0 \n"\
886
        "1: \n"\
887
        "movq      %%mm1, %%mm2 \n"\
888
        "movd    (%2,%0), %%mm1 \n"\
889
        "movq      %%mm2, %%mm3 \n"\
890
        "punpcklbw %%mm7, %%mm1 \n"\
891
        "movq      %%mm2, %%mm4 \n"\
892
        "psubw     %%mm1, %%mm3 \n"\
893
        "psubw     %%mm0, %%mm4 \n"\
894
        "movq      %%mm3, %%mm5 \n"\
895
        "paddw     %%mm4, %%mm5 \n"\
896
        abs3\
897
        "movq      %%mm4, %%mm6 \n"\
898
        "pminsw    %%mm5, %%mm6 \n"\
899
        "pcmpgtw   %%mm6, %%mm3 \n"\
900
        "pcmpgtw   %%mm5, %%mm4 \n"\
901
        "movq      %%mm4, %%mm6 \n"\
902
        "pand      %%mm3, %%mm4 \n"\
903
        "pandn     %%mm3, %%mm6 \n"\
904
        "pandn     %%mm0, %%mm3 \n"\
905
        "movd    (%3,%0), %%mm0 \n"\
906
        "pand      %%mm1, %%mm6 \n"\
907
        "pand      %%mm4, %%mm2 \n"\
908
        "punpcklbw %%mm7, %%mm0 \n"\
909
        "movq      %6,    %%mm5 \n"\
910
        "paddw     %%mm6, %%mm0 \n"\
911
        "paddw     %%mm2, %%mm3 \n"\
912
        "paddw     %%mm3, %%mm0 \n"\
913
        "pand      %%mm5, %%mm0 \n"\
914
        "movq      %%mm0, %%mm3 \n"\
915
        "packuswb  %%mm3, %%mm3 \n"\
916
        "movd      %%mm3, (%1,%0) \n"\
917
        "add       %4, %0 \n"\
918
        "cmp       %5, %0 \n"\
919
        "jle 1b \n"\
920
        :"+r"(i)\
921
        :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
922
         "m"(ff_pw_255)\
923
        :"memory"\
924
    );\
925
}
926

    
927
#define ABS3_MMX2\
928
        "psubw     %%mm5, %%mm7 \n"\
929
        "pmaxsw    %%mm7, %%mm5 \n"\
930
        "pxor      %%mm6, %%mm6 \n"\
931
        "pxor      %%mm7, %%mm7 \n"\
932
        "psubw     %%mm3, %%mm6 \n"\
933
        "psubw     %%mm4, %%mm7 \n"\
934
        "pmaxsw    %%mm6, %%mm3 \n"\
935
        "pmaxsw    %%mm7, %%mm4 \n"\
936
        "pxor      %%mm7, %%mm7 \n"
937

    
938
#define ABS3_SSSE3\
939
        "pabsw     %%mm3, %%mm3 \n"\
940
        "pabsw     %%mm4, %%mm4 \n"\
941
        "pabsw     %%mm5, %%mm5 \n"
942

    
943
PAETH(mmx2, ABS3_MMX2)
944
#if HAVE_SSSE3
945
PAETH(ssse3, ABS3_SSSE3)
946
#endif
947

    
948
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
949
        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
950
        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
951
        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
952
        "movq "#in7", " #m3 "             \n\t" /* d */\
953
        "movq "#in0", %%mm5               \n\t" /* D */\
954
        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
955
        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
956
        "movq "#in1", %%mm5               \n\t" /* C */\
957
        "movq "#in2", %%mm6               \n\t" /* B */\
958
        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
959
        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
960
        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
961
        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
962
        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
963
        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
964
        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
965
        "psraw $5, %%mm5                  \n\t"\
966
        "packuswb %%mm5, %%mm5            \n\t"\
967
        OP(%%mm5, out, %%mm7, d)
968

    
969
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
970
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
971
    uint64_t temp;\
972
\
973
    __asm__ volatile(\
974
        "pxor %%mm7, %%mm7                \n\t"\
975
        "1:                               \n\t"\
976
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
977
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
978
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
979
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
980
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
981
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
982
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
983
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
984
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
985
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
986
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
987
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
988
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
989
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
990
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
991
        "paddw %%mm3, %%mm5               \n\t" /* b */\
992
        "paddw %%mm2, %%mm6               \n\t" /* c */\
993
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
994
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
995
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
996
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
997
        "paddw %%mm4, %%mm0               \n\t" /* a */\
998
        "paddw %%mm1, %%mm5               \n\t" /* d */\
999
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1000
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1001
        "paddw %6, %%mm6                  \n\t"\
1002
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1003
        "psraw $5, %%mm0                  \n\t"\
1004
        "movq %%mm0, %5                   \n\t"\
1005
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1006
        \
1007
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1008
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1009
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1010
        "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
1011
        "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
1012
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
1013
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
1014
        "paddw %%mm0, %%mm2               \n\t" /* b */\
1015
        "paddw %%mm5, %%mm3               \n\t" /* c */\
1016
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1017
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1018
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1019
        "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
1020
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
1021
        "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
1022
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1023
        "paddw %%mm2, %%mm1               \n\t" /* a */\
1024
        "paddw %%mm6, %%mm4               \n\t" /* d */\
1025
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1026
        "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
1027
        "paddw %6, %%mm1                  \n\t"\
1028
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
1029
        "psraw $5, %%mm3                  \n\t"\
1030
        "movq %5, %%mm1                   \n\t"\
1031
        "packuswb %%mm3, %%mm1            \n\t"\
1032
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1033
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1034
        \
1035
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1036
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1037
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1038
        "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
1039
        "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
1040
        "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
1041
        "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
1042
        "paddw %%mm1, %%mm5               \n\t" /* b */\
1043
        "paddw %%mm4, %%mm0               \n\t" /* c */\
1044
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1045
        "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
1046
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1047
        "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
1048
        "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
1049
        "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
1050
        "paddw %%mm3, %%mm2               \n\t" /* d */\
1051
        "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
1052
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1053
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
1054
        "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
1055
        "paddw %%mm2, %%mm6               \n\t" /* a */\
1056
        "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1057
        "paddw %6, %%mm0                  \n\t"\
1058
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1059
        "psraw $5, %%mm0                  \n\t"\
1060
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1061
        \
1062
        "paddw %%mm5, %%mm3               \n\t" /* a */\
1063
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
1064
        "paddw %%mm4, %%mm6               \n\t" /* b */\
1065
        "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
1066
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
1067
        "paddw %%mm1, %%mm4               \n\t" /* c */\
1068
        "paddw %%mm2, %%mm5               \n\t" /* d */\
1069
        "paddw %%mm6, %%mm6               \n\t" /* 2b */\
1070
        "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
1071
        "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1072
        "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
1073
        "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
1074
        "paddw %6, %%mm4                  \n\t"\
1075
        "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
1076
        "psraw $5, %%mm4                  \n\t"\
1077
        "packuswb %%mm4, %%mm0            \n\t"\
1078
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1079
        \
1080
        "add %3, %0                       \n\t"\
1081
        "add %4, %1                       \n\t"\
1082
        "decl %2                          \n\t"\
1083
        " jnz 1b                          \n\t"\
1084
        : "+a"(src), "+c"(dst), "+D"(h)\
1085
        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1086
        : "memory"\
1087
    );\
1088
}\
1089
\
1090
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1091
    int i;\
1092
    int16_t temp[16];\
1093
    /* quick HACK, XXX FIXME MUST be optimized */\
1094
    for(i=0; i<h; i++)\
1095
    {\
1096
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1097
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1098
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1099
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1100
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1101
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1102
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1103
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1104
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1105
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1106
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1107
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1108
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1109
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1110
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1111
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1112
        __asm__ volatile(\
1113
            "movq (%0), %%mm0               \n\t"\
1114
            "movq 8(%0), %%mm1              \n\t"\
1115
            "paddw %2, %%mm0                \n\t"\
1116
            "paddw %2, %%mm1                \n\t"\
1117
            "psraw $5, %%mm0                \n\t"\
1118
            "psraw $5, %%mm1                \n\t"\
1119
            "packuswb %%mm1, %%mm0          \n\t"\
1120
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1121
            "movq 16(%0), %%mm0             \n\t"\
1122
            "movq 24(%0), %%mm1             \n\t"\
1123
            "paddw %2, %%mm0                \n\t"\
1124
            "paddw %2, %%mm1                \n\t"\
1125
            "psraw $5, %%mm0                \n\t"\
1126
            "psraw $5, %%mm1                \n\t"\
1127
            "packuswb %%mm1, %%mm0          \n\t"\
1128
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1129
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1130
            : "memory"\
1131
        );\
1132
        dst+=dstStride;\
1133
        src+=srcStride;\
1134
    }\
1135
}\
1136
\
1137
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1138
    __asm__ volatile(\
1139
        "pxor %%mm7, %%mm7                \n\t"\
1140
        "1:                               \n\t"\
1141
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1142
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1143
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1144
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1145
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1146
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1147
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1148
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1149
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1150
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1151
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1152
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1153
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1154
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1155
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1156
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1157
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1158
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1159
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1160
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1161
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1162
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1163
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1164
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1165
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1166
        "paddw %5, %%mm6                  \n\t"\
1167
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1168
        "psraw $5, %%mm0                  \n\t"\
1169
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1170
        \
1171
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1172
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
1173
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
1174
        "paddw %%mm5, %%mm1               \n\t" /* a */\
1175
        "paddw %%mm6, %%mm2               \n\t" /* b */\
1176
        "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
1177
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
1178
        "paddw %%mm6, %%mm3               \n\t" /* c */\
1179
        "paddw %%mm5, %%mm4               \n\t" /* d */\
1180
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1181
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1182
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1183
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1184
        "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
1185
        "paddw %5, %%mm1                  \n\t"\
1186
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
1187
        "psraw $5, %%mm3                  \n\t"\
1188
        "packuswb %%mm3, %%mm0            \n\t"\
1189
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1190
        \
1191
        "add %3, %0                       \n\t"\
1192
        "add %4, %1                       \n\t"\
1193
        "decl %2                          \n\t"\
1194
        " jnz 1b                          \n\t"\
1195
        : "+a"(src), "+c"(dst), "+d"(h)\
1196
        : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1197
        : "memory"\
1198
    );\
1199
}\
1200
\
1201
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1202
    int i;\
1203
    int16_t temp[8];\
1204
    /* quick HACK, XXX FIXME MUST be optimized */\
1205
    for(i=0; i<h; i++)\
1206
    {\
1207
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1208
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1209
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1210
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1211
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1212
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1213
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1214
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1215
        __asm__ volatile(\
1216
            "movq (%0), %%mm0           \n\t"\
1217
            "movq 8(%0), %%mm1          \n\t"\
1218
            "paddw %2, %%mm0            \n\t"\
1219
            "paddw %2, %%mm1            \n\t"\
1220
            "psraw $5, %%mm0            \n\t"\
1221
            "psraw $5, %%mm1            \n\t"\
1222
            "packuswb %%mm1, %%mm0      \n\t"\
1223
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1224
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1225
            :"memory"\
1226
        );\
1227
        dst+=dstStride;\
1228
        src+=srcStride;\
1229
    }\
1230
}
1231

    
1232
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1233
\
1234
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1235
    uint64_t temp[17*4];\
1236
    uint64_t *temp_ptr= temp;\
1237
    int count= 17;\
1238
\
1239
    /*FIXME unroll */\
1240
    __asm__ volatile(\
1241
        "pxor %%mm7, %%mm7              \n\t"\
1242
        "1:                             \n\t"\
1243
        "movq (%0), %%mm0               \n\t"\
1244
        "movq (%0), %%mm1               \n\t"\
1245
        "movq 8(%0), %%mm2              \n\t"\
1246
        "movq 8(%0), %%mm3              \n\t"\
1247
        "punpcklbw %%mm7, %%mm0         \n\t"\
1248
        "punpckhbw %%mm7, %%mm1         \n\t"\
1249
        "punpcklbw %%mm7, %%mm2         \n\t"\
1250
        "punpckhbw %%mm7, %%mm3         \n\t"\
1251
        "movq %%mm0, (%1)               \n\t"\
1252
        "movq %%mm1, 17*8(%1)           \n\t"\
1253
        "movq %%mm2, 2*17*8(%1)         \n\t"\
1254
        "movq %%mm3, 3*17*8(%1)         \n\t"\
1255
        "add $8, %1                     \n\t"\
1256
        "add %3, %0                     \n\t"\
1257
        "decl %2                        \n\t"\
1258
        " jnz 1b                        \n\t"\
1259
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1260
        : "r" ((x86_reg)srcStride)\
1261
        : "memory"\
1262
    );\
1263
    \
1264
    temp_ptr= temp;\
1265
    count=4;\
1266
    \
1267
/*FIXME reorder for speed */\
1268
    __asm__ volatile(\
1269
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1270
        "1:                             \n\t"\
1271
        "movq (%0), %%mm0               \n\t"\
1272
        "movq 8(%0), %%mm1              \n\t"\
1273
        "movq 16(%0), %%mm2             \n\t"\
1274
        "movq 24(%0), %%mm3             \n\t"\
1275
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1276
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1277
        "add %4, %1                     \n\t"\
1278
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1279
        \
1280
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1281
        "add %4, %1                     \n\t"\
1282
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1283
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1284
        "add %4, %1                     \n\t"\
1285
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1286
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1287
        "add %4, %1                     \n\t"\
1288
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1289
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1290
        "add %4, %1                     \n\t"\
1291
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1292
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1293
        "add %4, %1                     \n\t"\
1294
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1295
        \
1296
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1297
        "add %4, %1                     \n\t"  \
1298
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1299
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1300
        \
1301
        "add $136, %0                   \n\t"\
1302
        "add %6, %1                     \n\t"\
1303
        "decl %2                        \n\t"\
1304
        " jnz 1b                        \n\t"\
1305
        \
1306
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1307
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1308
        :"memory"\
1309
    );\
1310
}\
1311
\
1312
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1313
    uint64_t temp[9*2];\
1314
    uint64_t *temp_ptr= temp;\
1315
    int count= 9;\
1316
\
1317
    /*FIXME unroll */\
1318
    __asm__ volatile(\
1319
        "pxor %%mm7, %%mm7              \n\t"\
1320
        "1:                             \n\t"\
1321
        "movq (%0), %%mm0               \n\t"\
1322
        "movq (%0), %%mm1               \n\t"\
1323
        "punpcklbw %%mm7, %%mm0         \n\t"\
1324
        "punpckhbw %%mm7, %%mm1         \n\t"\
1325
        "movq %%mm0, (%1)               \n\t"\
1326
        "movq %%mm1, 9*8(%1)            \n\t"\
1327
        "add $8, %1                     \n\t"\
1328
        "add %3, %0                     \n\t"\
1329
        "decl %2                        \n\t"\
1330
        " jnz 1b                        \n\t"\
1331
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1332
        : "r" ((x86_reg)srcStride)\
1333
        : "memory"\
1334
    );\
1335
    \
1336
    temp_ptr= temp;\
1337
    count=2;\
1338
    \
1339
/*FIXME reorder for speed */\
1340
    __asm__ volatile(\
1341
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1342
        "1:                             \n\t"\
1343
        "movq (%0), %%mm0               \n\t"\
1344
        "movq 8(%0), %%mm1              \n\t"\
1345
        "movq 16(%0), %%mm2             \n\t"\
1346
        "movq 24(%0), %%mm3             \n\t"\
1347
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1348
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1349
        "add %4, %1                     \n\t"\
1350
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1351
        \
1352
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1353
        "add %4, %1                     \n\t"\
1354
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1355
        \
1356
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1357
        "add %4, %1                     \n\t"\
1358
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1359
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1360
                \
1361
        "add $72, %0                    \n\t"\
1362
        "add %6, %1                     \n\t"\
1363
        "decl %2                        \n\t"\
1364
        " jnz 1b                        \n\t"\
1365
         \
1366
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1367
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1368
        : "memory"\
1369
   );\
1370
}\
1371
\
1372
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1373
    OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1374
}\
1375
\
1376
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1377
    uint64_t temp[8];\
1378
    uint8_t * const half= (uint8_t*)temp;\
1379
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1380
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1381
}\
1382
\
1383
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1384
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1385
}\
1386
\
1387
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1388
    uint64_t temp[8];\
1389
    uint8_t * const half= (uint8_t*)temp;\
1390
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1391
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1392
}\
1393
\
1394
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1395
    uint64_t temp[8];\
1396
    uint8_t * const half= (uint8_t*)temp;\
1397
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1398
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1399
}\
1400
\
1401
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1402
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1403
}\
1404
\
1405
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1406
    uint64_t temp[8];\
1407
    uint8_t * const half= (uint8_t*)temp;\
1408
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1409
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1410
}\
1411
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1412
    uint64_t half[8 + 9];\
1413
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1414
    uint8_t * const halfHV= ((uint8_t*)half);\
1415
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1416
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1417
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1418
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1419
}\
1420
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1421
    uint64_t half[8 + 9];\
1422
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1423
    uint8_t * const halfHV= ((uint8_t*)half);\
1424
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1425
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1426
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1427
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1428
}\
1429
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1430
    uint64_t half[8 + 9];\
1431
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1432
    uint8_t * const halfHV= ((uint8_t*)half);\
1433
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1434
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1435
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1436
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1437
}\
1438
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1439
    uint64_t half[8 + 9];\
1440
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1441
    uint8_t * const halfHV= ((uint8_t*)half);\
1442
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1443
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1444
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1445
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1446
}\
1447
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1448
    uint64_t half[8 + 9];\
1449
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1450
    uint8_t * const halfHV= ((uint8_t*)half);\
1451
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1452
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1453
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1454
}\
1455
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1456
    uint64_t half[8 + 9];\
1457
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1458
    uint8_t * const halfHV= ((uint8_t*)half);\
1459
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1460
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1461
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1462
}\
1463
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1464
    uint64_t half[8 + 9];\
1465
    uint8_t * const halfH= ((uint8_t*)half);\
1466
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1467
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1468
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1469
}\
1470
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1471
    uint64_t half[8 + 9];\
1472
    uint8_t * const halfH= ((uint8_t*)half);\
1473
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1474
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1475
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1476
}\
1477
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1478
    uint64_t half[9];\
1479
    uint8_t * const halfH= ((uint8_t*)half);\
1480
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1481
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1482
}\
1483
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1484
    OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1485
}\
1486
\
1487
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1488
    uint64_t temp[32];\
1489
    uint8_t * const half= (uint8_t*)temp;\
1490
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1491
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1492
}\
1493
\
1494
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1495
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1496
}\
1497
\
1498
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1499
    uint64_t temp[32];\
1500
    uint8_t * const half= (uint8_t*)temp;\
1501
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1502
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1503
}\
1504
\
1505
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1506
    uint64_t temp[32];\
1507
    uint8_t * const half= (uint8_t*)temp;\
1508
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1509
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1510
}\
1511
\
1512
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1513
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1514
}\
1515
\
1516
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1517
    uint64_t temp[32];\
1518
    uint8_t * const half= (uint8_t*)temp;\
1519
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1520
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1521
}\
1522
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1523
    uint64_t half[16*2 + 17*2];\
1524
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1525
    uint8_t * const halfHV= ((uint8_t*)half);\
1526
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1527
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1528
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1529
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1530
}\
1531
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1532
    uint64_t half[16*2 + 17*2];\
1533
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1534
    uint8_t * const halfHV= ((uint8_t*)half);\
1535
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1536
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1537
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1538
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1539
}\
1540
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1541
    uint64_t half[16*2 + 17*2];\
1542
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1543
    uint8_t * const halfHV= ((uint8_t*)half);\
1544
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1545
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1546
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1547
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1548
}\
1549
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1550
    uint64_t half[16*2 + 17*2];\
1551
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1552
    uint8_t * const halfHV= ((uint8_t*)half);\
1553
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1554
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1555
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1556
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1557
}\
1558
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1559
    uint64_t half[16*2 + 17*2];\
1560
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1561
    uint8_t * const halfHV= ((uint8_t*)half);\
1562
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1563
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1564
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1565
}\
1566
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1567
    uint64_t half[16*2 + 17*2];\
1568
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1569
    uint8_t * const halfHV= ((uint8_t*)half);\
1570
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1571
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1572
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1573
}\
1574
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1575
    uint64_t half[17*2];\
1576
    uint8_t * const halfH= ((uint8_t*)half);\
1577
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1578
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1579
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1580
}\
1581
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1582
    uint64_t half[17*2];\
1583
    uint8_t * const halfH= ((uint8_t*)half);\
1584
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1585
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1586
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1587
}\
1588
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1589
    uint64_t half[17*2];\
1590
    uint8_t * const halfH= ((uint8_t*)half);\
1591
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1592
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1593
}
1594

    
1595
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1596
#define AVG_3DNOW_OP(a,b,temp, size) \
1597
"mov" #size " " #b ", " #temp "   \n\t"\
1598
"pavgusb " #temp ", " #a "        \n\t"\
1599
"mov" #size " " #a ", " #b "      \n\t"
1600
#define AVG_MMX2_OP(a,b,temp, size) \
1601
"mov" #size " " #b ", " #temp "   \n\t"\
1602
"pavgb " #temp ", " #a "          \n\t"\
1603
"mov" #size " " #a ", " #b "      \n\t"
1604

    
1605
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1606
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1607
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1608
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1609
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1610
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1611
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1612
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1613
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1614

    
1615
/***********************************/
1616
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1617

    
1618
#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1619
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1620
    OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1621
}
1622
#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1623
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1624
    OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1625
}
1626

    
1627
#define QPEL_2TAP(OPNAME, SIZE, MMX)\
1628
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1629
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1630
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1631
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1632
                          OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1633
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1634
                          OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1635
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1636
                          OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1637
static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1638
    OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1639
}\
1640
static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1641
    OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1642
}\
1643
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
1644
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
1645
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
1646
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
1647
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
1648
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
1649
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
1650
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1651

    
1652
QPEL_2TAP(put_, 16, mmx2)
1653
QPEL_2TAP(avg_, 16, mmx2)
1654
QPEL_2TAP(put_,  8, mmx2)
1655
QPEL_2TAP(avg_,  8, mmx2)
1656
QPEL_2TAP(put_, 16, 3dnow)
1657
QPEL_2TAP(avg_, 16, 3dnow)
1658
QPEL_2TAP(put_,  8, 3dnow)
1659
QPEL_2TAP(avg_,  8, 3dnow)
1660

    
1661

    
1662
#if 0
1663
static void just_return(void) { return; }
1664
#endif
1665

    
1666
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1667
                    int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1668
    const int w = 8;
1669
    const int ix = ox>>(16+shift);
1670
    const int iy = oy>>(16+shift);
1671
    const int oxs = ox>>4;
1672
    const int oys = oy>>4;
1673
    const int dxxs = dxx>>4;
1674
    const int dxys = dxy>>4;
1675
    const int dyxs = dyx>>4;
1676
    const int dyys = dyy>>4;
1677
    const uint16_t r4[4] = {r,r,r,r};
1678
    const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1679
    const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1680
    const uint64_t shift2 = 2*shift;
1681
    uint8_t edge_buf[(h+1)*stride];
1682
    int x, y;
1683

    
1684
    const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1685
    const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1686
    const int dxh = dxy*(h-1);
1687
    const int dyw = dyx*(w-1);
1688
    if( // non-constant fullpel offset (3% of blocks)
1689
        ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1690
         (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1691
        // uses more than 16 bits of subpel mv (only at huge resolution)
1692
        || (dxx|dxy|dyx|dyy)&15 )
1693
    {
1694
        //FIXME could still use mmx for some of the rows
1695
        ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1696
        return;
1697
    }
1698

    
1699
    src += ix + iy*stride;
1700
    if( (unsigned)ix >= width-w ||
1701
        (unsigned)iy >= height-h )
1702
    {
1703
        ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1704
        src = edge_buf;
1705
    }
1706

    
1707
    __asm__ volatile(
1708
        "movd         %0, %%mm6 \n\t"
1709
        "pxor      %%mm7, %%mm7 \n\t"
1710
        "punpcklwd %%mm6, %%mm6 \n\t"
1711
        "punpcklwd %%mm6, %%mm6 \n\t"
1712
        :: "r"(1<<shift)
1713
    );
1714

    
1715
    for(x=0; x<w; x+=4){
1716
        uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1717
                            oxs - dxys + dxxs*(x+1),
1718
                            oxs - dxys + dxxs*(x+2),
1719
                            oxs - dxys + dxxs*(x+3) };
1720
        uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1721
                            oys - dyys + dyxs*(x+1),
1722
                            oys - dyys + dyxs*(x+2),
1723
                            oys - dyys + dyxs*(x+3) };
1724

    
1725
        for(y=0; y<h; y++){
1726
            __asm__ volatile(
1727
                "movq   %0,  %%mm4 \n\t"
1728
                "movq   %1,  %%mm5 \n\t"
1729
                "paddw  %2,  %%mm4 \n\t"
1730
                "paddw  %3,  %%mm5 \n\t"
1731
                "movq   %%mm4, %0  \n\t"
1732
                "movq   %%mm5, %1  \n\t"
1733
                "psrlw  $12, %%mm4 \n\t"
1734
                "psrlw  $12, %%mm5 \n\t"
1735
                : "+m"(*dx4), "+m"(*dy4)
1736
                : "m"(*dxy4), "m"(*dyy4)
1737
            );
1738

    
1739
            __asm__ volatile(
1740
                "movq   %%mm6, %%mm2 \n\t"
1741
                "movq   %%mm6, %%mm1 \n\t"
1742
                "psubw  %%mm4, %%mm2 \n\t"
1743
                "psubw  %%mm5, %%mm1 \n\t"
1744
                "movq   %%mm2, %%mm0 \n\t"
1745
                "movq   %%mm4, %%mm3 \n\t"
1746
                "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1747
                "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1748
                "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1749
                "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1750

    
1751
                "movd   %4,    %%mm5 \n\t"
1752
                "movd   %3,    %%mm4 \n\t"
1753
                "punpcklbw %%mm7, %%mm5 \n\t"
1754
                "punpcklbw %%mm7, %%mm4 \n\t"
1755
                "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1756
                "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1757

    
1758
                "movd   %2,    %%mm5 \n\t"
1759
                "movd   %1,    %%mm4 \n\t"
1760
                "punpcklbw %%mm7, %%mm5 \n\t"
1761
                "punpcklbw %%mm7, %%mm4 \n\t"
1762
                "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1763
                "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1764
                "paddw  %5,    %%mm1 \n\t"
1765
                "paddw  %%mm3, %%mm2 \n\t"
1766
                "paddw  %%mm1, %%mm0 \n\t"
1767
                "paddw  %%mm2, %%mm0 \n\t"
1768

    
1769
                "psrlw    %6,    %%mm0 \n\t"
1770
                "packuswb %%mm0, %%mm0 \n\t"
1771
                "movd     %%mm0, %0    \n\t"
1772

    
1773
                : "=m"(dst[x+y*stride])
1774
                : "m"(src[0]), "m"(src[1]),
1775
                  "m"(src[stride]), "m"(src[stride+1]),
1776
                  "m"(*r4), "m"(shift2)
1777
            );
1778
            src += stride;
1779
        }
1780
        src += 4-h*stride;
1781
    }
1782
}
1783

    
1784
#define PREFETCH(name, op) \
1785
static void name(void *mem, int stride, int h){\
1786
    const uint8_t *p= mem;\
1787
    do{\
1788
        __asm__ volatile(#op" %0" :: "m"(*p));\
1789
        p+= stride;\
1790
    }while(--h);\
1791
}
1792
PREFETCH(prefetch_mmx2,  prefetcht0)
1793
PREFETCH(prefetch_3dnow, prefetch)
1794
#undef PREFETCH
1795

    
1796
#include "h264dsp_mmx.c"
1797
#include "rv40dsp_mmx.c"
1798

    
1799
/* CAVS specific */
1800
void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
1801
void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
1802

    
1803
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1804
    put_pixels8_mmx(dst, src, stride, 8);
1805
}
1806
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1807
    avg_pixels8_mmx(dst, src, stride, 8);
1808
}
1809
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1810
    put_pixels16_mmx(dst, src, stride, 16);
1811
}
1812
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1813
    avg_pixels16_mmx(dst, src, stride, 16);
1814
}
1815

    
1816
/* VC1 specific */
1817
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
1818

    
1819
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1820
    put_pixels8_mmx(dst, src, stride, 8);
1821
}
1822
void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1823
    avg_pixels8_mmx2(dst, src, stride, 8);
1824
}
1825

    
1826
/* external functions, from idct_mmx.c */
1827
void ff_mmx_idct(DCTELEM *block);
1828
void ff_mmxext_idct(DCTELEM *block);
1829

    
1830
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1831
   converted */
1832
#if CONFIG_GPL
1833
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1834
{
1835
    ff_mmx_idct (block);
1836
    put_pixels_clamped_mmx(block, dest, line_size);
1837
}
1838
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1839
{
1840
    ff_mmx_idct (block);
1841
    add_pixels_clamped_mmx(block, dest, line_size);
1842
}
1843
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1844
{
1845
    ff_mmxext_idct (block);
1846
    put_pixels_clamped_mmx(block, dest, line_size);
1847
}
1848
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1849
{
1850
    ff_mmxext_idct (block);
1851
    add_pixels_clamped_mmx(block, dest, line_size);
1852
}
1853
#endif
1854
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1855
{
1856
    ff_idct_xvid_mmx (block);
1857
    put_pixels_clamped_mmx(block, dest, line_size);
1858
}
1859
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1860
{
1861
    ff_idct_xvid_mmx (block);
1862
    add_pixels_clamped_mmx(block, dest, line_size);
1863
}
1864
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1865
{
1866
    ff_idct_xvid_mmx2 (block);
1867
    put_pixels_clamped_mmx(block, dest, line_size);
1868
}
1869
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1870
{
1871
    ff_idct_xvid_mmx2 (block);
1872
    add_pixels_clamped_mmx(block, dest, line_size);
1873
}
1874

    
1875
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1876
{
1877
    int i;
1878
    __asm__ volatile("pxor %%mm7, %%mm7":);
1879
    for(i=0; i<blocksize; i+=2) {
1880
        __asm__ volatile(
1881
            "movq    %0,    %%mm0 \n\t"
1882
            "movq    %1,    %%mm1 \n\t"
1883
            "movq    %%mm0, %%mm2 \n\t"
1884
            "movq    %%mm1, %%mm3 \n\t"
1885
            "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1886
            "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1887
            "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
1888
            "pxor    %%mm2, %%mm1 \n\t"
1889
            "movq    %%mm3, %%mm4 \n\t"
1890
            "pand    %%mm1, %%mm3 \n\t"
1891
            "pandn   %%mm1, %%mm4 \n\t"
1892
            "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1893
            "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1894
            "movq    %%mm3, %1    \n\t"
1895
            "movq    %%mm0, %0    \n\t"
1896
            :"+m"(mag[i]), "+m"(ang[i])
1897
            ::"memory"
1898
        );
1899
    }
1900
    __asm__ volatile("femms");
1901
}
1902
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1903
{
1904
    int i;
1905

    
1906
    __asm__ volatile(
1907
            "movaps  %0,     %%xmm5 \n\t"
1908
        ::"m"(ff_pdw_80000000[0])
1909
    );
1910
    for(i=0; i<blocksize; i+=4) {
1911
        __asm__ volatile(
1912
            "movaps  %0,     %%xmm0 \n\t"
1913
            "movaps  %1,     %%xmm1 \n\t"
1914
            "xorps   %%xmm2, %%xmm2 \n\t"
1915
            "xorps   %%xmm3, %%xmm3 \n\t"
1916
            "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1917
            "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1918
            "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1919
            "xorps   %%xmm2, %%xmm1 \n\t"
1920
            "movaps  %%xmm3, %%xmm4 \n\t"
1921
            "andps   %%xmm1, %%xmm3 \n\t"
1922
            "andnps  %%xmm1, %%xmm4 \n\t"
1923
            "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1924
            "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1925
            "movaps  %%xmm3, %1     \n\t"
1926
            "movaps  %%xmm0, %0     \n\t"
1927
            :"+m"(mag[i]), "+m"(ang[i])
1928
            ::"memory"
1929
        );
1930
    }
1931
}
1932

    
1933
#define IF1(x) x
1934
#define IF0(x)
1935

    
1936
#define MIX5(mono,stereo)\
1937
    __asm__ volatile(\
1938
        "movss          0(%2), %%xmm5 \n"\
1939
        "movss          8(%2), %%xmm6 \n"\
1940
        "movss         24(%2), %%xmm7 \n"\
1941
        "shufps    $0, %%xmm5, %%xmm5 \n"\
1942
        "shufps    $0, %%xmm6, %%xmm6 \n"\
1943
        "shufps    $0, %%xmm7, %%xmm7 \n"\
1944
        "1: \n"\
1945
        "movaps       (%0,%1), %%xmm0 \n"\
1946
        "movaps  0x400(%0,%1), %%xmm1 \n"\
1947
        "movaps  0x800(%0,%1), %%xmm2 \n"\
1948
        "movaps  0xc00(%0,%1), %%xmm3 \n"\
1949
        "movaps 0x1000(%0,%1), %%xmm4 \n"\
1950
        "mulps         %%xmm5, %%xmm0 \n"\
1951
        "mulps         %%xmm6, %%xmm1 \n"\
1952
        "mulps         %%xmm5, %%xmm2 \n"\
1953
        "mulps         %%xmm7, %%xmm3 \n"\
1954
        "mulps         %%xmm7, %%xmm4 \n"\
1955
 stereo("addps         %%xmm1, %%xmm0 \n")\
1956
        "addps         %%xmm1, %%xmm2 \n"\
1957
        "addps         %%xmm3, %%xmm0 \n"\
1958
        "addps         %%xmm4, %%xmm2 \n"\
1959
   mono("addps         %%xmm2, %%xmm0 \n")\
1960
        "movaps  %%xmm0,      (%0,%1) \n"\
1961
 stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
1962
        "add $16, %0 \n"\
1963
        "jl 1b \n"\
1964
        :"+&r"(i)\
1965
        :"r"(samples[0]+len), "r"(matrix)\
1966
        :"memory"\
1967
    );
1968

    
1969
#define MIX_MISC(stereo)\
1970
    __asm__ volatile(\
1971
        "1: \n"\
1972
        "movaps  (%3,%0), %%xmm0 \n"\
1973
 stereo("movaps   %%xmm0, %%xmm1 \n")\
1974
        "mulps    %%xmm6, %%xmm0 \n"\
1975
 stereo("mulps    %%xmm7, %%xmm1 \n")\
1976
        "lea 1024(%3,%0), %1 \n"\
1977
        "mov %5, %2 \n"\
1978
        "2: \n"\
1979
        "movaps   (%1),   %%xmm2 \n"\
1980
 stereo("movaps   %%xmm2, %%xmm3 \n")\
1981
        "mulps   (%4,%2), %%xmm2 \n"\
1982
 stereo("mulps 16(%4,%2), %%xmm3 \n")\
1983
        "addps    %%xmm2, %%xmm0 \n"\
1984
 stereo("addps    %%xmm3, %%xmm1 \n")\
1985
        "add $1024, %1 \n"\
1986
        "add $32, %2 \n"\
1987
        "jl 2b \n"\
1988
        "movaps   %%xmm0,     (%3,%0) \n"\
1989
 stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
1990
        "add $16, %0 \n"\
1991
        "jl 1b \n"\
1992
        :"+&r"(i), "=&r"(j), "=&r"(k)\
1993
        :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
1994
        :"memory"\
1995
    );
1996

    
1997
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
1998
{
1999
    int (*matrix_cmp)[2] = (int(*)[2])matrix;
2000
    intptr_t i,j,k;
2001

    
2002
    i = -len*sizeof(float);
2003
    if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2004
        MIX5(IF0,IF1);
2005
    } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2006
        MIX5(IF1,IF0);
2007
    } else {
2008
        DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
2009
        j = 2*in_ch*sizeof(float);
2010
        __asm__ volatile(
2011
            "1: \n"
2012
            "sub $8, %0 \n"
2013
            "movss     (%2,%0), %%xmm6 \n"
2014
            "movss    4(%2,%0), %%xmm7 \n"
2015
            "shufps $0, %%xmm6, %%xmm6 \n"
2016
            "shufps $0, %%xmm7, %%xmm7 \n"
2017
            "movaps %%xmm6,   (%1,%0,4) \n"
2018
            "movaps %%xmm7, 16(%1,%0,4) \n"
2019
            "jg 1b \n"
2020
            :"+&r"(j)
2021
            :"r"(matrix_simd), "r"(matrix)
2022
            :"memory"
2023
        );
2024
        if(out_ch == 2) {
2025
            MIX_MISC(IF1);
2026
        } else {
2027
            MIX_MISC(IF0);
2028
        }
2029
    }
2030
}
2031

    
2032
static void vector_fmul_3dnow(float *dst, const float *src, int len){
2033
    x86_reg i = (len-4)*4;
2034
    __asm__ volatile(
2035
        "1: \n\t"
2036
        "movq    (%1,%0), %%mm0 \n\t"
2037
        "movq   8(%1,%0), %%mm1 \n\t"
2038
        "pfmul   (%2,%0), %%mm0 \n\t"
2039
        "pfmul  8(%2,%0), %%mm1 \n\t"
2040
        "movq   %%mm0,  (%1,%0) \n\t"
2041
        "movq   %%mm1, 8(%1,%0) \n\t"
2042
        "sub  $16, %0 \n\t"
2043
        "jge 1b \n\t"
2044
        "femms  \n\t"
2045
        :"+r"(i)
2046
        :"r"(dst), "r"(src)
2047
        :"memory"
2048
    );
2049
}
2050
static void vector_fmul_sse(float *dst, const float *src, int len){
2051
    x86_reg i = (len-8)*4;
2052
    __asm__ volatile(
2053
        "1: \n\t"
2054
        "movaps    (%1,%0), %%xmm0 \n\t"
2055
        "movaps  16(%1,%0), %%xmm1 \n\t"
2056
        "mulps     (%2,%0), %%xmm0 \n\t"
2057
        "mulps   16(%2,%0), %%xmm1 \n\t"
2058
        "movaps  %%xmm0,   (%1,%0) \n\t"
2059
        "movaps  %%xmm1, 16(%1,%0) \n\t"
2060
        "sub  $32, %0 \n\t"
2061
        "jge 1b \n\t"
2062
        :"+r"(i)
2063
        :"r"(dst), "r"(src)
2064
        :"memory"
2065
    );
2066
}
2067

    
2068
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2069
    x86_reg i = len*4-16;
2070
    __asm__ volatile(
2071
        "1: \n\t"
2072
        "pswapd   8(%1), %%mm0 \n\t"
2073
        "pswapd    (%1), %%mm1 \n\t"
2074
        "pfmul  (%3,%0), %%mm0 \n\t"
2075
        "pfmul 8(%3,%0), %%mm1 \n\t"
2076
        "movq  %%mm0,  (%2,%0) \n\t"
2077
        "movq  %%mm1, 8(%2,%0) \n\t"
2078
        "add   $16, %1 \n\t"
2079
        "sub   $16, %0 \n\t"
2080
        "jge   1b \n\t"
2081
        :"+r"(i), "+r"(src1)
2082
        :"r"(dst), "r"(src0)
2083
    );
2084
    __asm__ volatile("femms");
2085
}
2086
static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2087
    x86_reg i = len*4-32;
2088
    __asm__ volatile(
2089
        "1: \n\t"
2090
        "movaps        16(%1), %%xmm0 \n\t"
2091
        "movaps          (%1), %%xmm1 \n\t"
2092
        "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2093
        "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2094
        "mulps        (%3,%0), %%xmm0 \n\t"
2095
        "mulps      16(%3,%0), %%xmm1 \n\t"
2096
        "movaps     %%xmm0,   (%2,%0) \n\t"
2097
        "movaps     %%xmm1, 16(%2,%0) \n\t"
2098
        "add    $32, %1 \n\t"
2099
        "sub    $32, %0 \n\t"
2100
        "jge    1b \n\t"
2101
        :"+r"(i), "+r"(src1)
2102
        :"r"(dst), "r"(src0)
2103
    );
2104
}
2105

    
2106
static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2107
                                      const float *src2, int src3, int len, int step){
2108
    x86_reg i = (len-4)*4;
2109
    if(step == 2 && src3 == 0){
2110
        dst += (len-4)*2;
2111
        __asm__ volatile(
2112
            "1: \n\t"
2113
            "movq   (%2,%0),  %%mm0 \n\t"
2114
            "movq  8(%2,%0),  %%mm1 \n\t"
2115
            "pfmul  (%3,%0),  %%mm0 \n\t"
2116
            "pfmul 8(%3,%0),  %%mm1 \n\t"
2117
            "pfadd  (%4,%0),  %%mm0 \n\t"
2118
            "pfadd 8(%4,%0),  %%mm1 \n\t"
2119
            "movd     %%mm0,   (%1) \n\t"
2120
            "movd     %%mm1, 16(%1) \n\t"
2121
            "psrlq      $32,  %%mm0 \n\t"
2122
            "psrlq      $32,  %%mm1 \n\t"
2123
            "movd     %%mm0,  8(%1) \n\t"
2124
            "movd     %%mm1, 24(%1) \n\t"
2125
            "sub  $32, %1 \n\t"
2126
            "sub  $16, %0 \n\t"
2127
            "jge  1b \n\t"
2128
            :"+r"(i), "+r"(dst)
2129
            :"r"(src0), "r"(src1), "r"(src2)
2130
            :"memory"
2131
        );
2132
    }
2133
    else if(step == 1 && src3 == 0){
2134
        __asm__ volatile(
2135
            "1: \n\t"
2136
            "movq    (%2,%0), %%mm0 \n\t"
2137
            "movq   8(%2,%0), %%mm1 \n\t"
2138
            "pfmul   (%3,%0), %%mm0 \n\t"
2139
            "pfmul  8(%3,%0), %%mm1 \n\t"
2140
            "pfadd   (%4,%0), %%mm0 \n\t"
2141
            "pfadd  8(%4,%0), %%mm1 \n\t"
2142
            "movq  %%mm0,   (%1,%0) \n\t"
2143
            "movq  %%mm1,  8(%1,%0) \n\t"
2144
            "sub  $16, %0 \n\t"
2145
            "jge  1b \n\t"
2146
            :"+r"(i)
2147
            :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2148
            :"memory"
2149
        );
2150
    }
2151
    else
2152
        ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2153
    __asm__ volatile("femms");
2154
}
2155
static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2156
                                    const float *src2, int src3, int len, int step){
2157
    x86_reg i = (len-8)*4;
2158
    if(step == 2 && src3 == 0){
2159
        dst += (len-8)*2;
2160
        __asm__ volatile(
2161
            "1: \n\t"
2162
            "movaps   (%2,%0), %%xmm0 \n\t"
2163
            "movaps 16(%2,%0), %%xmm1 \n\t"
2164
            "mulps    (%3,%0), %%xmm0 \n\t"
2165
            "mulps  16(%3,%0), %%xmm1 \n\t"
2166
            "addps    (%4,%0), %%xmm0 \n\t"
2167
            "addps  16(%4,%0), %%xmm1 \n\t"
2168
            "movss     %%xmm0,   (%1) \n\t"
2169
            "movss     %%xmm1, 32(%1) \n\t"
2170
            "movhlps   %%xmm0, %%xmm2 \n\t"
2171
            "movhlps   %%xmm1, %%xmm3 \n\t"
2172
            "movss     %%xmm2, 16(%1) \n\t"
2173
            "movss     %%xmm3, 48(%1) \n\t"
2174
            "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
2175
            "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
2176
            "movss     %%xmm0,  8(%1) \n\t"
2177
            "movss     %%xmm1, 40(%1) \n\t"
2178
            "movhlps   %%xmm0, %%xmm2 \n\t"
2179
            "movhlps   %%xmm1, %%xmm3 \n\t"
2180
            "movss     %%xmm2, 24(%1) \n\t"
2181
            "movss     %%xmm3, 56(%1) \n\t"
2182
            "sub  $64, %1 \n\t"
2183
            "sub  $32, %0 \n\t"
2184
            "jge  1b \n\t"
2185
            :"+r"(i), "+r"(dst)
2186
            :"r"(src0), "r"(src1), "r"(src2)
2187
            :"memory"
2188
        );
2189
    }
2190
    else if(step == 1 && src3 == 0){
2191
        __asm__ volatile(
2192
            "1: \n\t"
2193
            "movaps   (%2,%0), %%xmm0 \n\t"
2194
            "movaps 16(%2,%0), %%xmm1 \n\t"
2195
            "mulps    (%3,%0), %%xmm0 \n\t"
2196
            "mulps  16(%3,%0), %%xmm1 \n\t"
2197
            "addps    (%4,%0), %%xmm0 \n\t"
2198
            "addps  16(%4,%0), %%xmm1 \n\t"
2199
            "movaps %%xmm0,   (%1,%0) \n\t"
2200
            "movaps %%xmm1, 16(%1,%0) \n\t"
2201
            "sub  $32, %0 \n\t"
2202
            "jge  1b \n\t"
2203
            :"+r"(i)
2204
            :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2205
            :"memory"
2206
        );
2207
    }
2208
    else
2209
        ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2210
}
2211

    
2212
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2213
                                      const float *win, float add_bias, int len){
2214
#if HAVE_6REGS
2215
    if(add_bias == 0){
2216
        x86_reg i = -len*4;
2217
        x86_reg j = len*4-8;
2218
        __asm__ volatile(
2219
            "1: \n"
2220
            "pswapd  (%5,%1), %%mm1 \n"
2221
            "movq    (%5,%0), %%mm0 \n"
2222
            "pswapd  (%4,%1), %%mm5 \n"
2223
            "movq    (%3,%0), %%mm4 \n"
2224
            "movq      %%mm0, %%mm2 \n"
2225
            "movq      %%mm1, %%mm3 \n"
2226
            "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2227
            "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
2228
            "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2229
            "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
2230
            "pfadd     %%mm3, %%mm2 \n"
2231
            "pfsub     %%mm0, %%mm1 \n"
2232
            "pswapd    %%mm2, %%mm2 \n"
2233
            "movq      %%mm1, (%2,%0) \n"
2234
            "movq      %%mm2, (%2,%1) \n"
2235
            "sub $8, %1 \n"
2236
            "add $8, %0 \n"
2237
            "jl 1b \n"
2238
            "femms \n"
2239
            :"+r"(i), "+r"(j)
2240
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2241
        );
2242
    }else
2243
#endif
2244
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2245
}
2246

    
2247
static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2248
                                   const float *win, float add_bias, int len){
2249
#if HAVE_6REGS
2250
    if(add_bias == 0){
2251
        x86_reg i = -len*4;
2252
        x86_reg j = len*4-16;
2253
        __asm__ volatile(
2254
            "1: \n"
2255
            "movaps       (%5,%1), %%xmm1 \n"
2256
            "movaps       (%5,%0), %%xmm0 \n"
2257
            "movaps       (%4,%1), %%xmm5 \n"
2258
            "movaps       (%3,%0), %%xmm4 \n"
2259
            "shufps $0x1b, %%xmm1, %%xmm1 \n"
2260
            "shufps $0x1b, %%xmm5, %%xmm5 \n"
2261
            "movaps        %%xmm0, %%xmm2 \n"
2262
            "movaps        %%xmm1, %%xmm3 \n"
2263
            "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2264
            "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
2265
            "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2266
            "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
2267
            "addps         %%xmm3, %%xmm2 \n"
2268
            "subps         %%xmm0, %%xmm1 \n"
2269
            "shufps $0x1b, %%xmm2, %%xmm2 \n"
2270
            "movaps        %%xmm1, (%2,%0) \n"
2271
            "movaps        %%xmm2, (%2,%1) \n"
2272
            "sub $16, %1 \n"
2273
            "add $16, %0 \n"
2274
            "jl 1b \n"
2275
            :"+r"(i), "+r"(j)
2276
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2277
        );
2278
    }else
2279
#endif
2280
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2281
}
2282

    
2283
static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2284
{
2285
    x86_reg i = -4*len;
2286
    __asm__ volatile(
2287
        "movss  %3, %%xmm4 \n"
2288
        "shufps $0, %%xmm4, %%xmm4 \n"
2289
        "1: \n"
2290
        "cvtpi2ps   (%2,%0), %%xmm0 \n"
2291
        "cvtpi2ps  8(%2,%0), %%xmm1 \n"
2292
        "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2293
        "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2294
        "movlhps  %%xmm1,    %%xmm0 \n"
2295
        "movlhps  %%xmm3,    %%xmm2 \n"
2296
        "mulps    %%xmm4,    %%xmm0 \n"
2297
        "mulps    %%xmm4,    %%xmm2 \n"
2298
        "movaps   %%xmm0,   (%1,%0) \n"
2299
        "movaps   %%xmm2, 16(%1,%0) \n"
2300
        "add $32, %0 \n"
2301
        "jl 1b \n"
2302
        :"+r"(i)
2303
        :"r"(dst+len), "r"(src+len), "m"(mul)
2304
    );
2305
}
2306

    
2307
static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2308
{
2309
    x86_reg i = -4*len;
2310
    __asm__ volatile(
2311
        "movss  %3, %%xmm4 \n"
2312
        "shufps $0, %%xmm4, %%xmm4 \n"
2313
        "1: \n"
2314
        "cvtdq2ps   (%2,%0), %%xmm0 \n"
2315
        "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2316
        "mulps    %%xmm4,    %%xmm0 \n"
2317
        "mulps    %%xmm4,    %%xmm1 \n"
2318
        "movaps   %%xmm0,   (%1,%0) \n"
2319
        "movaps   %%xmm1, 16(%1,%0) \n"
2320
        "add $32, %0 \n"
2321
        "jl 1b \n"
2322
        :"+r"(i)
2323
        :"r"(dst+len), "r"(src+len), "m"(mul)
2324
    );
2325
}
2326

    
2327
static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2328
    x86_reg reglen = len;
2329
    // not bit-exact: pf2id uses different rounding than C and SSE
2330
    __asm__ volatile(
2331
        "add        %0          , %0        \n\t"
2332
        "lea         (%2,%0,2)  , %2        \n\t"
2333
        "add        %0          , %1        \n\t"
2334
        "neg        %0                      \n\t"
2335
        "1:                                 \n\t"
2336
        "pf2id       (%2,%0,2)  , %%mm0     \n\t"
2337
        "pf2id      8(%2,%0,2)  , %%mm1     \n\t"
2338
        "pf2id     16(%2,%0,2)  , %%mm2     \n\t"
2339
        "pf2id     24(%2,%0,2)  , %%mm3     \n\t"
2340
        "packssdw   %%mm1       , %%mm0     \n\t"
2341
        "packssdw   %%mm3       , %%mm2     \n\t"
2342
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2343
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2344
        "add        $16         , %0        \n\t"
2345
        " js 1b                             \n\t"
2346
        "femms                              \n\t"
2347
        :"+r"(reglen), "+r"(dst), "+r"(src)
2348
    );
2349
}
2350
static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2351
    x86_reg reglen = len;
2352
    __asm__ volatile(
2353
        "add        %0          , %0        \n\t"
2354
        "lea         (%2,%0,2)  , %2        \n\t"
2355
        "add        %0          , %1        \n\t"
2356
        "neg        %0                      \n\t"
2357
        "1:                                 \n\t"
2358
        "cvtps2pi    (%2,%0,2)  , %%mm0     \n\t"
2359
        "cvtps2pi   8(%2,%0,2)  , %%mm1     \n\t"
2360
        "cvtps2pi  16(%2,%0,2)  , %%mm2     \n\t"
2361
        "cvtps2pi  24(%2,%0,2)  , %%mm3     \n\t"
2362
        "packssdw   %%mm1       , %%mm0     \n\t"
2363
        "packssdw   %%mm3       , %%mm2     \n\t"
2364
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2365
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2366
        "add        $16         , %0        \n\t"
2367
        " js 1b                             \n\t"
2368
        "emms                               \n\t"
2369
        :"+r"(reglen), "+r"(dst), "+r"(src)
2370
    );
2371
}
2372

    
2373
static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2374
    x86_reg reglen = len;
2375
    __asm__ volatile(
2376
        "add        %0          , %0        \n\t"
2377
        "lea         (%2,%0,2)  , %2        \n\t"
2378
        "add        %0          , %1        \n\t"
2379
        "neg        %0                      \n\t"
2380
        "1:                                 \n\t"
2381
        "cvtps2dq    (%2,%0,2)  , %%xmm0    \n\t"
2382
        "cvtps2dq  16(%2,%0,2)  , %%xmm1    \n\t"
2383
        "packssdw   %%xmm1      , %%xmm0    \n\t"
2384
        "movdqa     %%xmm0      ,  (%1,%0)  \n\t"
2385
        "add        $16         , %0        \n\t"
2386
        " js 1b                             \n\t"
2387
        :"+r"(reglen), "+r"(dst), "+r"(src)
2388
    );
2389
}
2390

    
2391
#if HAVE_YASM
2392
void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2393
void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2394
void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2395
void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *top, uint8_t *diff, int w, int *left, int *left_top);
2396
void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2397
void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2398
void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2399
void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2400
#if ARCH_X86_32
2401
static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2402
{
2403
    ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2404
    ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2405
}
2406
#endif
2407
void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2408
void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2409
#else
2410
#define ff_float_to_int16_interleave6_sse(a,b,c)   float_to_int16_interleave_misc_sse(a,b,c,6)
2411
#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2412
#define ff_float_to_int16_interleave6_3dn2(a,b,c)  float_to_int16_interleave_misc_3dnow(a,b,c,6)
2413
#endif
2414
#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2415

    
2416
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2417
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2418
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2419
    DECLARE_ALIGNED_16(int16_t, tmp[len]);\
2420
    int i,j,c;\
2421
    for(c=0; c<channels; c++){\
2422
        float_to_int16_##cpu(tmp, src[c], len);\
2423
        for(i=0, j=c; i<len; i++, j+=channels)\
2424
            dst[j] = tmp[i];\
2425
    }\
2426
}\
2427
\
2428
static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2429
    if(channels==1)\
2430
        float_to_int16_##cpu(dst, src[0], len);\
2431
    else if(channels==2){\
2432
        x86_reg reglen = len; \
2433
        const float *src0 = src[0];\
2434
        const float *src1 = src[1];\
2435
        __asm__ volatile(\
2436
            "shl $2, %0 \n"\
2437
            "add %0, %1 \n"\
2438
            "add %0, %2 \n"\
2439
            "add %0, %3 \n"\
2440
            "neg %0 \n"\
2441
            body\
2442
            :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2443
        );\
2444
    }else if(channels==6){\
2445
        ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2446
    }else\
2447
        float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2448
}
2449

    
2450
FLOAT_TO_INT16_INTERLEAVE(3dnow,
2451
    "1:                         \n"
2452
    "pf2id     (%2,%0), %%mm0   \n"
2453
    "pf2id    8(%2,%0), %%mm1   \n"
2454
    "pf2id     (%3,%0), %%mm2   \n"
2455
    "pf2id    8(%3,%0), %%mm3   \n"
2456
    "packssdw    %%mm1, %%mm0   \n"
2457
    "packssdw    %%mm3, %%mm2   \n"
2458
    "movq        %%mm0, %%mm1   \n"
2459
    "punpcklwd   %%mm2, %%mm0   \n"
2460
    "punpckhwd   %%mm2, %%mm1   \n"
2461
    "movq        %%mm0,  (%1,%0)\n"
2462
    "movq        %%mm1, 8(%1,%0)\n"
2463
    "add $16, %0                \n"
2464
    "js 1b                      \n"
2465
    "femms                      \n"
2466
)
2467

    
2468
FLOAT_TO_INT16_INTERLEAVE(sse,
2469
    "1:                         \n"
2470
    "cvtps2pi  (%2,%0), %%mm0   \n"
2471
    "cvtps2pi 8(%2,%0), %%mm1   \n"
2472
    "cvtps2pi  (%3,%0), %%mm2   \n"
2473
    "cvtps2pi 8(%3,%0), %%mm3   \n"
2474
    "packssdw    %%mm1, %%mm0   \n"
2475
    "packssdw    %%mm3, %%mm2   \n"
2476
    "movq        %%mm0, %%mm1   \n"
2477
    "punpcklwd   %%mm2, %%mm0   \n"
2478
    "punpckhwd   %%mm2, %%mm1   \n"
2479
    "movq        %%mm0,  (%1,%0)\n"
2480
    "movq        %%mm1, 8(%1,%0)\n"
2481
    "add $16, %0                \n"
2482
    "js 1b                      \n"
2483
    "emms                       \n"
2484
)
2485

    
2486
FLOAT_TO_INT16_INTERLEAVE(sse2,
2487
    "1:                         \n"
2488
    "cvtps2dq  (%2,%0), %%xmm0  \n"
2489
    "cvtps2dq  (%3,%0), %%xmm1  \n"
2490
    "packssdw   %%xmm1, %%xmm0  \n"
2491
    "movhlps    %%xmm0, %%xmm1  \n"
2492
    "punpcklwd  %%xmm1, %%xmm0  \n"
2493
    "movdqa     %%xmm0, (%1,%0) \n"
2494
    "add $16, %0                \n"
2495
    "js 1b                      \n"
2496
)
2497

    
2498
static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2499
    if(channels==6)
2500
        ff_float_to_int16_interleave6_3dn2(dst, src, len);
2501
    else
2502
        float_to_int16_interleave_3dnow(dst, src, len, channels);
2503
}
2504

    
2505

    
2506
void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
2507
void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
2508
void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2509
void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2510
void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2511
                                   int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2512
void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2513
                                  int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2514

    
2515

    
2516
static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
2517
{
2518
    x86_reg o = -(order << 1);
2519
    v1 += order;
2520
    v2 += order;
2521
    __asm__ volatile(
2522
        "1:                          \n\t"
2523
        "movdqu   (%1,%2),   %%xmm0  \n\t"
2524
        "movdqu 16(%1,%2),   %%xmm1  \n\t"
2525
        "paddw    (%0,%2),   %%xmm0  \n\t"
2526
        "paddw  16(%0,%2),   %%xmm1  \n\t"
2527
        "movdqa   %%xmm0,    (%0,%2) \n\t"
2528
        "movdqa   %%xmm1,  16(%0,%2) \n\t"
2529
        "add      $32,       %2      \n\t"
2530
        "js       1b                 \n\t"
2531
        : "+r"(v1), "+r"(v2), "+r"(o)
2532
    );
2533
}
2534

    
2535
static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
2536
{
2537
    x86_reg o = -(order << 1);
2538
    v1 += order;
2539
    v2 += order;
2540
    __asm__ volatile(
2541
        "1:                           \n\t"
2542
        "movdqa    (%0,%2),   %%xmm0  \n\t"
2543
        "movdqa  16(%0,%2),   %%xmm2  \n\t"
2544
        "movdqu    (%1,%2),   %%xmm1  \n\t"
2545
        "movdqu  16(%1,%2),   %%xmm3  \n\t"
2546
        "psubw     %%xmm1,    %%xmm0  \n\t"
2547
        "psubw     %%xmm3,    %%xmm2  \n\t"
2548
        "movdqa    %%xmm0,    (%0,%2) \n\t"
2549
        "movdqa    %%xmm2,  16(%0,%2) \n\t"
2550
        "add       $32,       %2      \n\t"
2551
        "js        1b                 \n\t"
2552
        : "+r"(v1), "+r"(v2), "+r"(o)
2553
    );
2554
}
2555

    
2556
static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
2557
{
2558
    int res = 0;
2559
    DECLARE_ALIGNED_16(xmm_reg, sh);
2560
    x86_reg o = -(order << 1);
2561

    
2562
    v1 += order;
2563
    v2 += order;
2564
    sh.a = shift;
2565
    __asm__ volatile(
2566
        "pxor      %%xmm7,  %%xmm7        \n\t"
2567
        "1:                               \n\t"
2568
        "movdqu    (%0,%3), %%xmm0        \n\t"
2569
        "movdqu  16(%0,%3), %%xmm1        \n\t"
2570
        "pmaddwd   (%1,%3), %%xmm0        \n\t"
2571
        "pmaddwd 16(%1,%3), %%xmm1        \n\t"
2572
        "paddd     %%xmm0,  %%xmm7        \n\t"
2573
        "paddd     %%xmm1,  %%xmm7        \n\t"
2574
        "add       $32,     %3            \n\t"
2575
        "js        1b                     \n\t"
2576
        "movhlps   %%xmm7,  %%xmm2        \n\t"
2577
        "paddd     %%xmm2,  %%xmm7        \n\t"
2578
        "psrad     %4,      %%xmm7        \n\t"
2579
        "pshuflw   $0x4E,   %%xmm7,%%xmm2 \n\t"
2580
        "paddd     %%xmm2,  %%xmm7        \n\t"
2581
        "movd      %%xmm7,  %2            \n\t"
2582
        : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
2583
        : "m"(sh)
2584
    );
2585
    return res;
2586
}
2587

    
2588
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2589
{
2590
    mm_flags = mm_support();
2591

    
2592
    if (avctx->dsp_mask) {
2593
        if (avctx->dsp_mask & FF_MM_FORCE)
2594
            mm_flags |= (avctx->dsp_mask & 0xffff);
2595
        else
2596
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2597
    }
2598

    
2599
#if 0
2600
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2601
    if (mm_flags & FF_MM_MMX)
2602
        av_log(avctx, AV_LOG_INFO, " mmx");
2603
    if (mm_flags & FF_MM_MMX2)
2604
        av_log(avctx, AV_LOG_INFO, " mmx2");
2605
    if (mm_flags & FF_MM_3DNOW)
2606
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2607
    if (mm_flags & FF_MM_SSE)
2608
        av_log(avctx, AV_LOG_INFO, " sse");
2609
    if (mm_flags & FF_MM_SSE2)
2610
        av_log(avctx, AV_LOG_INFO, " sse2");
2611
    av_log(avctx, AV_LOG_INFO, "\n");
2612
#endif
2613

    
2614
    if (mm_flags & FF_MM_MMX) {
2615
        const int idct_algo= avctx->idct_algo;
2616

    
2617
        if(avctx->lowres==0){
2618
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2619
                c->idct_put= ff_simple_idct_put_mmx;
2620
                c->idct_add= ff_simple_idct_add_mmx;
2621
                c->idct    = ff_simple_idct_mmx;
2622
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2623
#if CONFIG_GPL
2624
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2625
                if(mm_flags & FF_MM_MMX2){
2626
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2627
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2628
                    c->idct    = ff_mmxext_idct;
2629
                }else{
2630
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2631
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2632
                    c->idct    = ff_mmx_idct;
2633
                }
2634
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2635
#endif
2636
            }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER || CONFIG_THEORA_DECODER) &&
2637
                     idct_algo==FF_IDCT_VP3){
2638
                if(mm_flags & FF_MM_SSE2){
2639
                    c->idct_put= ff_vp3_idct_put_sse2;
2640
                    c->idct_add= ff_vp3_idct_add_sse2;
2641
                    c->idct    = ff_vp3_idct_sse2;
2642
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2643
                }else{
2644
                    c->idct_put= ff_vp3_idct_put_mmx;
2645
                    c->idct_add= ff_vp3_idct_add_mmx;
2646
                    c->idct    = ff_vp3_idct_mmx;
2647
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2648
                }
2649
            }else if(idct_algo==FF_IDCT_CAVS){
2650
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2651
            }else if(idct_algo==FF_IDCT_XVIDMMX){
2652
                if(mm_flags & FF_MM_SSE2){
2653
                    c->idct_put= ff_idct_xvid_sse2_put;
2654
                    c->idct_add= ff_idct_xvid_sse2_add;
2655
                    c->idct    = ff_idct_xvid_sse2;
2656
                    c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2657
                }else if(mm_flags & FF_MM_MMX2){
2658
                    c->idct_put= ff_idct_xvid_mmx2_put;
2659
                    c->idct_add= ff_idct_xvid_mmx2_add;
2660
                    c->idct    = ff_idct_xvid_mmx2;
2661
                }else{
2662
                    c->idct_put= ff_idct_xvid_mmx_put;
2663
                    c->idct_add= ff_idct_xvid_mmx_add;
2664
                    c->idct    = ff_idct_xvid_mmx;
2665
                }
2666
            }
2667
        }
2668

    
2669
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2670
        c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2671
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2672
        c->clear_block  = clear_block_mmx;
2673
        c->clear_blocks = clear_blocks_mmx;
2674
        if (mm_flags & FF_MM_SSE)
2675
            c->clear_block = clear_block_sse;
2676

    
2677
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2678
        c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2679
        c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2680
        c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2681
        c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2682

    
2683
        SET_HPEL_FUNCS(put, 0, 16, mmx);
2684
        SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2685
        SET_HPEL_FUNCS(avg, 0, 16, mmx);
2686
        SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2687
        SET_HPEL_FUNCS(put, 1, 8, mmx);
2688
        SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2689
        SET_HPEL_FUNCS(avg, 1, 8, mmx);
2690
        SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2691

    
2692
        c->gmc= gmc_mmx;
2693

    
2694
        c->add_bytes= add_bytes_mmx;
2695
        c->add_bytes_l2= add_bytes_l2_mmx;
2696

    
2697
        c->draw_edges = draw_edges_mmx;
2698

    
2699
        if (CONFIG_ANY_H263) {
2700
            c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2701
            c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2702
        }
2703
        c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2704
        c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2705
        c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
2706

    
2707
        c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
2708
        c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
2709

    
2710
        c->h264_idct_dc_add=
2711
        c->h264_idct_add= ff_h264_idct_add_mmx;
2712
        c->h264_idct8_dc_add=
2713
        c->h264_idct8_add= ff_h264_idct8_add_mmx;
2714

    
2715
        c->h264_idct_add16     = ff_h264_idct_add16_mmx;
2716
        c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
2717
        c->h264_idct_add8      = ff_h264_idct_add8_mmx;
2718
        c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2719

    
2720
        if (CONFIG_VP6_DECODER) {
2721
            c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
2722
        }
2723

    
2724
        if (mm_flags & FF_MM_MMX2) {
2725
            c->prefetch = prefetch_mmx2;
2726

    
2727
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2728
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2729

    
2730
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2731
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2732
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2733

    
2734
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2735
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2736

    
2737
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2738
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2739
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2740

    
2741
            c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2742
            c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2743
            c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
2744
            c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
2745
            c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
2746
            c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2747

    
2748
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2749
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2750
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2751
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2752
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2753
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2754
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2755

    
2756
                if (CONFIG_VP3_DECODER || CONFIG_THEORA_DECODER) {
2757
                    c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2758
                    c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2759
                }
2760
            }
2761

    
2762
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2763
            c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2764
            c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2765
            c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2766
            c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2767
            c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2768
            c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2769
            c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2770
            c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2771
            c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2772
            c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2773
            c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2774
            c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2775
            c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2776
            c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2777
            c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2778
            c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2779

    
2780
            SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2781
            SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2782
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2783
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2784
            SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2785
            SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2786

    
2787
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2788
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2789
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2790
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2791
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2792
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2793

    
2794
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2795
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2796
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2797
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2798

    
2799
            c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
2800
            c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
2801

    
2802
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
2803

    
2804
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2805
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2806
            c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2807
            c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2808
            c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2809
            c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2810
            c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2811
            c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2812
            c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2813
            c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2814
            c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2815

    
2816
            c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2817
            c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2818
            c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2819
            c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2820
            c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2821
            c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2822
            c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2823
            c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2824

    
2825
            c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2826
            c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2827
            c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2828
            c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2829
            c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2830
            c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2831
            c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2832
            c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2833

    
2834
#if HAVE_YASM
2835
            c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2836
#endif
2837
#if HAVE_7REGS && HAVE_TEN_OPERANDS
2838
            if( mm_flags&FF_MM_3DNOW )
2839
                c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2840
#endif
2841

    
2842
            if (CONFIG_CAVS_DECODER)
2843
                ff_cavsdsp_init_mmx2(c, avctx);
2844

    
2845
            if (CONFIG_VC1_DECODER || CONFIG_WMV3_DECODER)
2846
                ff_vc1dsp_init_mmx(c, avctx);
2847

    
2848
            c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2849
        } else if (mm_flags & FF_MM_3DNOW) {
2850
            c->prefetch = prefetch_3dnow;
2851

    
2852
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2853
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2854

    
2855
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2856
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2857
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2858

    
2859
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2860
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2861

    
2862
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2863
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2864
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2865

    
2866
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2867
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2868
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2869
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2870
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2871
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2872
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2873
            }
2874

    
2875
            SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2876
            SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2877
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2878
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2879
            SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2880
            SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2881

    
2882
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2883
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2884
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2885
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2886
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2887
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2888

    
2889
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2890
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2891
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2892
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2893

    
2894
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2895
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2896

    
2897
            c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
2898
            c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
2899

    
2900
            if (CONFIG_CAVS_DECODER)
2901
                ff_cavsdsp_init_3dnow(c, avctx);
2902
        }
2903

    
2904

    
2905
#define H264_QPEL_FUNCS(x, y, CPU)\
2906
            c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2907
            c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2908
            c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2909
            c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2910
        if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2911
            // these functions are slower than mmx on AMD, but faster on Intel
2912
/* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2913
            c->put_pixels_tab[0][0] = put_pixels16_sse2;
2914
            c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2915
*/
2916
            H264_QPEL_FUNCS(0, 0, sse2);
2917
        }
2918
        if(mm_flags & FF_MM_SSE2){
2919
            c->h264_idct8_add = ff_h264_idct8_add_sse2;
2920
            c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
2921

    
2922
            H264_QPEL_FUNCS(0, 1, sse2);
2923
            H264_QPEL_FUNCS(0, 2, sse2);
2924
            H264_QPEL_FUNCS(0, 3, sse2);
2925
            H264_QPEL_FUNCS(1, 1, sse2);
2926
            H264_QPEL_FUNCS(1, 2, sse2);
2927
            H264_QPEL_FUNCS(1, 3, sse2);
2928
            H264_QPEL_FUNCS(2, 1, sse2);
2929
            H264_QPEL_FUNCS(2, 2, sse2);
2930
            H264_QPEL_FUNCS(2, 3, sse2);
2931
            H264_QPEL_FUNCS(3, 1, sse2);
2932
            H264_QPEL_FUNCS(3, 2, sse2);
2933
            H264_QPEL_FUNCS(3, 3, sse2);
2934

    
2935
            if (CONFIG_VP6_DECODER) {
2936
                c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
2937
            }
2938
        }
2939
#if HAVE_SSSE3
2940
        if(mm_flags & FF_MM_SSSE3){
2941
            H264_QPEL_FUNCS(1, 0, ssse3);
2942
            H264_QPEL_FUNCS(1, 1, ssse3);
2943
            H264_QPEL_FUNCS(1, 2, ssse3);
2944
            H264_QPEL_FUNCS(1, 3, ssse3);
2945
            H264_QPEL_FUNCS(2, 0, ssse3);
2946
            H264_QPEL_FUNCS(2, 1, ssse3);
2947
            H264_QPEL_FUNCS(2, 2, ssse3);
2948
            H264_QPEL_FUNCS(2, 3, ssse3);
2949
            H264_QPEL_FUNCS(3, 0, ssse3);
2950
            H264_QPEL_FUNCS(3, 1, ssse3);
2951
            H264_QPEL_FUNCS(3, 2, ssse3);
2952
            H264_QPEL_FUNCS(3, 3, ssse3);
2953
            c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
2954
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
2955
            c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2956
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2957
            c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2958
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2959
            c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2960
        }
2961
#endif
2962

    
2963
#if CONFIG_GPL && HAVE_YASM
2964
        if (mm_flags & FF_MM_MMX2){
2965
#if ARCH_X86_32
2966
            c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
2967
            c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
2968
#endif
2969
            if( mm_flags&FF_MM_SSE2 ){
2970
#if ARCH_X86_64 || !defined(__ICC) || __ICC > 1100
2971
                c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
2972
                c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
2973
                c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
2974
                c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
2975
#endif
2976
                c->h264_idct_add16 = ff_h264_idct_add16_sse2;
2977
                c->h264_idct_add8  = ff_h264_idct_add8_sse2;
2978
                c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
2979
            }
2980
        }
2981
#endif
2982

    
2983
#if CONFIG_SNOW_DECODER
2984
        if(mm_flags & FF_MM_SSE2 & 0){
2985
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2986
#if HAVE_7REGS
2987
            c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2988
#endif
2989
            c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
2990
        }
2991
        else{
2992
            if(mm_flags & FF_MM_MMX2){
2993
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2994
#if HAVE_7REGS
2995
            c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2996
#endif
2997
            }
2998
            c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
2999
        }
3000
#endif
3001

    
3002
        if(mm_flags & FF_MM_3DNOW){
3003
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
3004
            c->vector_fmul = vector_fmul_3dnow;
3005
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3006
                c->float_to_int16 = float_to_int16_3dnow;
3007
                c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
3008
            }
3009
        }
3010
        if(mm_flags & FF_MM_3DNOWEXT){
3011
            c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
3012
            c->vector_fmul_window = vector_fmul_window_3dnow2;
3013
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3014
                c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
3015
            }
3016
        }
3017
        if(mm_flags & FF_MM_SSE){
3018
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
3019
            c->ac3_downmix = ac3_downmix_sse;
3020
            c->vector_fmul = vector_fmul_sse;
3021
            c->vector_fmul_reverse = vector_fmul_reverse_sse;
3022
            c->vector_fmul_add_add = vector_fmul_add_add_sse;
3023
            c->vector_fmul_window = vector_fmul_window_sse;
3024
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
3025
            c->float_to_int16 = float_to_int16_sse;
3026
            c->float_to_int16_interleave = float_to_int16_interleave_sse;
3027
        }
3028
        if(mm_flags & FF_MM_3DNOW)
3029
            c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
3030
        if(mm_flags & FF_MM_SSE2){
3031
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
3032
            c->float_to_int16 = float_to_int16_sse2;
3033
            c->float_to_int16_interleave = float_to_int16_interleave_sse2;
3034
            c->add_int16 = add_int16_sse2;
3035
            c->sub_int16 = sub_int16_sse2;
3036
            c->scalarproduct_int16 = scalarproduct_int16_sse2;
3037
        }
3038
    }
3039

    
3040
    if (CONFIG_ENCODERS)
3041
        dsputilenc_init_mmx(c, avctx);
3042

    
3043
#if 0
3044
    // for speed testing
3045
    get_pixels = just_return;
3046
    put_pixels_clamped = just_return;
3047
    add_pixels_clamped = just_return;
3048

3049
    pix_abs16x16 = just_return;
3050
    pix_abs16x16_x2 = just_return;
3051
    pix_abs16x16_y2 = just_return;
3052
    pix_abs16x16_xy2 = just_return;
3053

3054
    put_pixels_tab[0] = just_return;
3055
    put_pixels_tab[1] = just_return;
3056
    put_pixels_tab[2] = just_return;
3057
    put_pixels_tab[3] = just_return;
3058

3059
    put_no_rnd_pixels_tab[0] = just_return;
3060
    put_no_rnd_pixels_tab[1] = just_return;
3061
    put_no_rnd_pixels_tab[2] = just_return;
3062
    put_no_rnd_pixels_tab[3] = just_return;
3063

3064
    avg_pixels_tab[0] = just_return;
3065
    avg_pixels_tab[1] = just_return;
3066
    avg_pixels_tab[2] = just_return;
3067
    avg_pixels_tab[3] = just_return;
3068

3069
    avg_no_rnd_pixels_tab[0] = just_return;
3070
    avg_no_rnd_pixels_tab[1] = just_return;
3071
    avg_no_rnd_pixels_tab[2] = just_return;
3072
    avg_no_rnd_pixels_tab[3] = just_return;
3073

3074
    //av_fdct = just_return;
3075
    //ff_idct = just_return;
3076
#endif
3077
}