Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / dsputil_mmx.c @ 2c67c659

History | View | Annotate | Download (122 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard.
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 *
22
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23
 */
24

    
25
#include "libavutil/x86_cpu.h"
26
#include "libavcodec/dsputil.h"
27
#include "libavcodec/h263.h"
28
#include "libavcodec/mpegvideo.h"
29
#include "libavcodec/simple_idct.h"
30
#include "dsputil_mmx.h"
31
#include "mmx.h"
32
#include "vp3dsp_mmx.h"
33
#include "vp3dsp_sse2.h"
34
#include "idct_xvid.h"
35

    
36
//#undef NDEBUG
37
//#include <assert.h>
38

    
39
int mm_flags; /* multimedia extension flags */
40

    
41
/* pixel operations */
42
DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
43
DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
44

    
45
DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
46
{0x8000000080000000ULL, 0x8000000080000000ULL};
47

    
48
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3  ) = 0x0003000300030003ULL;
49
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_4  ) = 0x0004000400040004ULL;
50
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
51
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
52
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
53
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
54
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
55
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
56
DECLARE_ALIGNED_16(const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
57
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
58
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_64 ) = 0x0040004000400040ULL;
59
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
60
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
61
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
62

    
63
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1  ) = 0x0101010101010101ULL;
64
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3  ) = 0x0303030303030303ULL;
65
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
66
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
67
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
68
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
69
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
70
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
71

    
72
DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
73
DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
74

    
75
#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
76
#define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
77

    
78
#define MOVQ_BFE(regd) \
79
    __asm__ volatile ( \
80
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
81
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
82

    
83
#ifndef PIC
84
#define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
85
#define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
86
#else
87
// for shared library it's better to use this way for accessing constants
88
// pcmpeqd -> -1
89
#define MOVQ_BONE(regd) \
90
    __asm__ volatile ( \
91
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
92
    "psrlw $15, %%" #regd " \n\t" \
93
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
94

    
95
#define MOVQ_WTWO(regd) \
96
    __asm__ volatile ( \
97
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
98
    "psrlw $15, %%" #regd " \n\t" \
99
    "psllw $1, %%" #regd " \n\t"::)
100

    
101
#endif
102

    
103
// using regr as temporary and for the output result
104
// first argument is unmodifed and second is trashed
105
// regfe is supposed to contain 0xfefefefefefefefe
106
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
107
    "movq " #rega ", " #regr "  \n\t"\
108
    "pand " #regb ", " #regr "  \n\t"\
109
    "pxor " #rega ", " #regb "  \n\t"\
110
    "pand " #regfe "," #regb "  \n\t"\
111
    "psrlq $1, " #regb "        \n\t"\
112
    "paddb " #regb ", " #regr " \n\t"
113

    
114
#define PAVGB_MMX(rega, regb, regr, regfe) \
115
    "movq " #rega ", " #regr "  \n\t"\
116
    "por  " #regb ", " #regr "  \n\t"\
117
    "pxor " #rega ", " #regb "  \n\t"\
118
    "pand " #regfe "," #regb "  \n\t"\
119
    "psrlq $1, " #regb "        \n\t"\
120
    "psubb " #regb ", " #regr " \n\t"
121

    
122
// mm6 is supposed to contain 0xfefefefefefefefe
123
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
124
    "movq " #rega ", " #regr "  \n\t"\
125
    "movq " #regc ", " #regp "  \n\t"\
126
    "pand " #regb ", " #regr "  \n\t"\
127
    "pand " #regd ", " #regp "  \n\t"\
128
    "pxor " #rega ", " #regb "  \n\t"\
129
    "pxor " #regc ", " #regd "  \n\t"\
130
    "pand %%mm6, " #regb "      \n\t"\
131
    "pand %%mm6, " #regd "      \n\t"\
132
    "psrlq $1, " #regb "        \n\t"\
133
    "psrlq $1, " #regd "        \n\t"\
134
    "paddb " #regb ", " #regr " \n\t"\
135
    "paddb " #regd ", " #regp " \n\t"
136

    
137
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
138
    "movq " #rega ", " #regr "  \n\t"\
139
    "movq " #regc ", " #regp "  \n\t"\
140
    "por  " #regb ", " #regr "  \n\t"\
141
    "por  " #regd ", " #regp "  \n\t"\
142
    "pxor " #rega ", " #regb "  \n\t"\
143
    "pxor " #regc ", " #regd "  \n\t"\
144
    "pand %%mm6, " #regb "      \n\t"\
145
    "pand %%mm6, " #regd "      \n\t"\
146
    "psrlq $1, " #regd "        \n\t"\
147
    "psrlq $1, " #regb "        \n\t"\
148
    "psubb " #regb ", " #regr " \n\t"\
149
    "psubb " #regd ", " #regp " \n\t"
150

    
151
/***********************************/
152
/* MMX no rounding */
153
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
154
#define SET_RND  MOVQ_WONE
155
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
156
#define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
157

    
158
#include "dsputil_mmx_rnd_template.c"
159

    
160
#undef DEF
161
#undef SET_RND
162
#undef PAVGBP
163
#undef PAVGB
164
/***********************************/
165
/* MMX rounding */
166

    
167
#define DEF(x, y) x ## _ ## y ##_mmx
168
#define SET_RND  MOVQ_WTWO
169
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
170
#define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
171

    
172
#include "dsputil_mmx_rnd_template.c"
173

    
174
#undef DEF
175
#undef SET_RND
176
#undef PAVGBP
177
#undef PAVGB
178

    
179
/***********************************/
180
/* 3Dnow specific */
181

    
182
#define DEF(x) x ## _3dnow
183
#define PAVGB "pavgusb"
184

    
185
#include "dsputil_mmx_avg_template.c"
186

    
187
#undef DEF
188
#undef PAVGB
189

    
190
/***********************************/
191
/* MMX2 specific */
192

    
193
#define DEF(x) x ## _mmx2
194

    
195
/* Introduced only in MMX2 set */
196
#define PAVGB "pavgb"
197

    
198
#include "dsputil_mmx_avg_template.c"
199

    
200
#undef DEF
201
#undef PAVGB
202

    
203
#define put_no_rnd_pixels16_mmx put_pixels16_mmx
204
#define put_no_rnd_pixels8_mmx put_pixels8_mmx
205
#define put_pixels16_mmx2 put_pixels16_mmx
206
#define put_pixels8_mmx2 put_pixels8_mmx
207
#define put_pixels4_mmx2 put_pixels4_mmx
208
#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
209
#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
210
#define put_pixels16_3dnow put_pixels16_mmx
211
#define put_pixels8_3dnow put_pixels8_mmx
212
#define put_pixels4_3dnow put_pixels4_mmx
213
#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
214
#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
215

    
216
/***********************************/
217
/* standard MMX */
218

    
219
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
220
{
221
    const DCTELEM *p;
222
    uint8_t *pix;
223

    
224
    /* read the pixels */
225
    p = block;
226
    pix = pixels;
227
    /* unrolled loop */
228
        __asm__ volatile(
229
                "movq   %3, %%mm0               \n\t"
230
                "movq   8%3, %%mm1              \n\t"
231
                "movq   16%3, %%mm2             \n\t"
232
                "movq   24%3, %%mm3             \n\t"
233
                "movq   32%3, %%mm4             \n\t"
234
                "movq   40%3, %%mm5             \n\t"
235
                "movq   48%3, %%mm6             \n\t"
236
                "movq   56%3, %%mm7             \n\t"
237
                "packuswb %%mm1, %%mm0          \n\t"
238
                "packuswb %%mm3, %%mm2          \n\t"
239
                "packuswb %%mm5, %%mm4          \n\t"
240
                "packuswb %%mm7, %%mm6          \n\t"
241
                "movq   %%mm0, (%0)             \n\t"
242
                "movq   %%mm2, (%0, %1)         \n\t"
243
                "movq   %%mm4, (%0, %1, 2)      \n\t"
244
                "movq   %%mm6, (%0, %2)         \n\t"
245
                ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
246
                :"memory");
247
        pix += line_size*4;
248
        p += 32;
249

    
250
    // if here would be an exact copy of the code above
251
    // compiler would generate some very strange code
252
    // thus using "r"
253
    __asm__ volatile(
254
            "movq       (%3), %%mm0             \n\t"
255
            "movq       8(%3), %%mm1            \n\t"
256
            "movq       16(%3), %%mm2           \n\t"
257
            "movq       24(%3), %%mm3           \n\t"
258
            "movq       32(%3), %%mm4           \n\t"
259
            "movq       40(%3), %%mm5           \n\t"
260
            "movq       48(%3), %%mm6           \n\t"
261
            "movq       56(%3), %%mm7           \n\t"
262
            "packuswb %%mm1, %%mm0              \n\t"
263
            "packuswb %%mm3, %%mm2              \n\t"
264
            "packuswb %%mm5, %%mm4              \n\t"
265
            "packuswb %%mm7, %%mm6              \n\t"
266
            "movq       %%mm0, (%0)             \n\t"
267
            "movq       %%mm2, (%0, %1)         \n\t"
268
            "movq       %%mm4, (%0, %1, 2)      \n\t"
269
            "movq       %%mm6, (%0, %2)         \n\t"
270
            ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
271
            :"memory");
272
}
273

    
274
static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
275
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
276

    
277
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
278
{
279
    int i;
280

    
281
    movq_m2r(*vector128, mm1);
282
    for (i = 0; i < 8; i++) {
283
        movq_m2r(*(block), mm0);
284
        packsswb_m2r(*(block + 4), mm0);
285
        block += 8;
286
        paddb_r2r(mm1, mm0);
287
        movq_r2m(mm0, *pixels);
288
        pixels += line_size;
289
    }
290
}
291

    
292
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
293
{
294
    const DCTELEM *p;
295
    uint8_t *pix;
296
    int i;
297

    
298
    /* read the pixels */
299
    p = block;
300
    pix = pixels;
301
    MOVQ_ZERO(mm7);
302
    i = 4;
303
    do {
304
        __asm__ volatile(
305
                "movq   (%2), %%mm0     \n\t"
306
                "movq   8(%2), %%mm1    \n\t"
307
                "movq   16(%2), %%mm2   \n\t"
308
                "movq   24(%2), %%mm3   \n\t"
309
                "movq   %0, %%mm4       \n\t"
310
                "movq   %1, %%mm6       \n\t"
311
                "movq   %%mm4, %%mm5    \n\t"
312
                "punpcklbw %%mm7, %%mm4 \n\t"
313
                "punpckhbw %%mm7, %%mm5 \n\t"
314
                "paddsw %%mm4, %%mm0    \n\t"
315
                "paddsw %%mm5, %%mm1    \n\t"
316
                "movq   %%mm6, %%mm5    \n\t"
317
                "punpcklbw %%mm7, %%mm6 \n\t"
318
                "punpckhbw %%mm7, %%mm5 \n\t"
319
                "paddsw %%mm6, %%mm2    \n\t"
320
                "paddsw %%mm5, %%mm3    \n\t"
321
                "packuswb %%mm1, %%mm0  \n\t"
322
                "packuswb %%mm3, %%mm2  \n\t"
323
                "movq   %%mm0, %0       \n\t"
324
                "movq   %%mm2, %1       \n\t"
325
                :"+m"(*pix), "+m"(*(pix+line_size))
326
                :"r"(p)
327
                :"memory");
328
        pix += line_size*2;
329
        p += 16;
330
    } while (--i);
331
}
332

    
333
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
334
{
335
    __asm__ volatile(
336
         "lea (%3, %3), %%"REG_a"       \n\t"
337
         ASMALIGN(3)
338
         "1:                            \n\t"
339
         "movd (%1), %%mm0              \n\t"
340
         "movd (%1, %3), %%mm1          \n\t"
341
         "movd %%mm0, (%2)              \n\t"
342
         "movd %%mm1, (%2, %3)          \n\t"
343
         "add %%"REG_a", %1             \n\t"
344
         "add %%"REG_a", %2             \n\t"
345
         "movd (%1), %%mm0              \n\t"
346
         "movd (%1, %3), %%mm1          \n\t"
347
         "movd %%mm0, (%2)              \n\t"
348
         "movd %%mm1, (%2, %3)          \n\t"
349
         "add %%"REG_a", %1             \n\t"
350
         "add %%"REG_a", %2             \n\t"
351
         "subl $4, %0                   \n\t"
352
         "jnz 1b                        \n\t"
353
         : "+g"(h), "+r" (pixels),  "+r" (block)
354
         : "r"((x86_reg)line_size)
355
         : "%"REG_a, "memory"
356
        );
357
}
358

    
359
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
360
{
361
    __asm__ volatile(
362
         "lea (%3, %3), %%"REG_a"       \n\t"
363
         ASMALIGN(3)
364
         "1:                            \n\t"
365
         "movq (%1), %%mm0              \n\t"
366
         "movq (%1, %3), %%mm1          \n\t"
367
         "movq %%mm0, (%2)              \n\t"
368
         "movq %%mm1, (%2, %3)          \n\t"
369
         "add %%"REG_a", %1             \n\t"
370
         "add %%"REG_a", %2             \n\t"
371
         "movq (%1), %%mm0              \n\t"
372
         "movq (%1, %3), %%mm1          \n\t"
373
         "movq %%mm0, (%2)              \n\t"
374
         "movq %%mm1, (%2, %3)          \n\t"
375
         "add %%"REG_a", %1             \n\t"
376
         "add %%"REG_a", %2             \n\t"
377
         "subl $4, %0                   \n\t"
378
         "jnz 1b                        \n\t"
379
         : "+g"(h), "+r" (pixels),  "+r" (block)
380
         : "r"((x86_reg)line_size)
381
         : "%"REG_a, "memory"
382
        );
383
}
384

    
385
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
386
{
387
    __asm__ volatile(
388
         "lea (%3, %3), %%"REG_a"       \n\t"
389
         ASMALIGN(3)
390
         "1:                            \n\t"
391
         "movq (%1), %%mm0              \n\t"
392
         "movq 8(%1), %%mm4             \n\t"
393
         "movq (%1, %3), %%mm1          \n\t"
394
         "movq 8(%1, %3), %%mm5         \n\t"
395
         "movq %%mm0, (%2)              \n\t"
396
         "movq %%mm4, 8(%2)             \n\t"
397
         "movq %%mm1, (%2, %3)          \n\t"
398
         "movq %%mm5, 8(%2, %3)         \n\t"
399
         "add %%"REG_a", %1             \n\t"
400
         "add %%"REG_a", %2             \n\t"
401
         "movq (%1), %%mm0              \n\t"
402
         "movq 8(%1), %%mm4             \n\t"
403
         "movq (%1, %3), %%mm1          \n\t"
404
         "movq 8(%1, %3), %%mm5         \n\t"
405
         "movq %%mm0, (%2)              \n\t"
406
         "movq %%mm4, 8(%2)             \n\t"
407
         "movq %%mm1, (%2, %3)          \n\t"
408
         "movq %%mm5, 8(%2, %3)         \n\t"
409
         "add %%"REG_a", %1             \n\t"
410
         "add %%"REG_a", %2             \n\t"
411
         "subl $4, %0                   \n\t"
412
         "jnz 1b                        \n\t"
413
         : "+g"(h), "+r" (pixels),  "+r" (block)
414
         : "r"((x86_reg)line_size)
415
         : "%"REG_a, "memory"
416
        );
417
}
418

    
419
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
420
{
421
    __asm__ volatile(
422
         "1:                            \n\t"
423
         "movdqu (%1), %%xmm0           \n\t"
424
         "movdqu (%1,%3), %%xmm1        \n\t"
425
         "movdqu (%1,%3,2), %%xmm2      \n\t"
426
         "movdqu (%1,%4), %%xmm3        \n\t"
427
         "movdqa %%xmm0, (%2)           \n\t"
428
         "movdqa %%xmm1, (%2,%3)        \n\t"
429
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
430
         "movdqa %%xmm3, (%2,%4)        \n\t"
431
         "subl $4, %0                   \n\t"
432
         "lea (%1,%3,4), %1             \n\t"
433
         "lea (%2,%3,4), %2             \n\t"
434
         "jnz 1b                        \n\t"
435
         : "+g"(h), "+r" (pixels),  "+r" (block)
436
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
437
         : "memory"
438
        );
439
}
440

    
441
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
442
{
443
    __asm__ volatile(
444
         "1:                            \n\t"
445
         "movdqu (%1), %%xmm0           \n\t"
446
         "movdqu (%1,%3), %%xmm1        \n\t"
447
         "movdqu (%1,%3,2), %%xmm2      \n\t"
448
         "movdqu (%1,%4), %%xmm3        \n\t"
449
         "pavgb  (%2), %%xmm0           \n\t"
450
         "pavgb  (%2,%3), %%xmm1        \n\t"
451
         "pavgb  (%2,%3,2), %%xmm2      \n\t"
452
         "pavgb  (%2,%4), %%xmm3        \n\t"
453
         "movdqa %%xmm0, (%2)           \n\t"
454
         "movdqa %%xmm1, (%2,%3)        \n\t"
455
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
456
         "movdqa %%xmm3, (%2,%4)        \n\t"
457
         "subl $4, %0                   \n\t"
458
         "lea (%1,%3,4), %1             \n\t"
459
         "lea (%2,%3,4), %2             \n\t"
460
         "jnz 1b                        \n\t"
461
         : "+g"(h), "+r" (pixels),  "+r" (block)
462
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
463
         : "memory"
464
        );
465
}
466

    
467
#define CLEAR_BLOCKS(name,n) \
468
static void name(DCTELEM *blocks)\
469
{\
470
    __asm__ volatile(\
471
                "pxor %%mm7, %%mm7              \n\t"\
472
                "mov     %1, %%"REG_a"          \n\t"\
473
                "1:                             \n\t"\
474
                "movq %%mm7, (%0, %%"REG_a")    \n\t"\
475
                "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
476
                "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
477
                "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
478
                "add $32, %%"REG_a"             \n\t"\
479
                " js 1b                         \n\t"\
480
                : : "r" (((uint8_t *)blocks)+128*n),\
481
                    "i" (-128*n)\
482
                : "%"REG_a\
483
        );\
484
}
485
CLEAR_BLOCKS(clear_blocks_mmx, 6)
486
CLEAR_BLOCKS(clear_block_mmx, 1)
487

    
488
static void clear_block_sse(DCTELEM *block)
489
{
490
    __asm__ volatile(
491
        "xorps  %%xmm0, %%xmm0  \n"
492
        "movaps %%xmm0,    (%0) \n"
493
        "movaps %%xmm0,  16(%0) \n"
494
        "movaps %%xmm0,  32(%0) \n"
495
        "movaps %%xmm0,  48(%0) \n"
496
        "movaps %%xmm0,  64(%0) \n"
497
        "movaps %%xmm0,  80(%0) \n"
498
        "movaps %%xmm0,  96(%0) \n"
499
        "movaps %%xmm0, 112(%0) \n"
500
        :: "r"(block)
501
        : "memory"
502
    );
503
}
504

    
505
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
506
    x86_reg i=0;
507
    __asm__ volatile(
508
        "jmp 2f                         \n\t"
509
        "1:                             \n\t"
510
        "movq  (%1, %0), %%mm0          \n\t"
511
        "movq  (%2, %0), %%mm1          \n\t"
512
        "paddb %%mm0, %%mm1             \n\t"
513
        "movq %%mm1, (%2, %0)           \n\t"
514
        "movq 8(%1, %0), %%mm0          \n\t"
515
        "movq 8(%2, %0), %%mm1          \n\t"
516
        "paddb %%mm0, %%mm1             \n\t"
517
        "movq %%mm1, 8(%2, %0)          \n\t"
518
        "add $16, %0                    \n\t"
519
        "2:                             \n\t"
520
        "cmp %3, %0                     \n\t"
521
        " js 1b                         \n\t"
522
        : "+r" (i)
523
        : "r"(src), "r"(dst), "r"((x86_reg)w-15)
524
    );
525
    for(; i<w; i++)
526
        dst[i+0] += src[i+0];
527
}
528

    
529
static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
530
    x86_reg i=0;
531
    __asm__ volatile(
532
        "jmp 2f                         \n\t"
533
        "1:                             \n\t"
534
        "movq   (%2, %0), %%mm0         \n\t"
535
        "movq  8(%2, %0), %%mm1         \n\t"
536
        "paddb  (%3, %0), %%mm0         \n\t"
537
        "paddb 8(%3, %0), %%mm1         \n\t"
538
        "movq %%mm0,  (%1, %0)          \n\t"
539
        "movq %%mm1, 8(%1, %0)          \n\t"
540
        "add $16, %0                    \n\t"
541
        "2:                             \n\t"
542
        "cmp %4, %0                     \n\t"
543
        " js 1b                         \n\t"
544
        : "+r" (i)
545
        : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
546
    );
547
    for(; i<w; i++)
548
        dst[i] = src1[i] + src2[i];
549
}
550

    
551
#define H263_LOOP_FILTER \
552
        "pxor %%mm7, %%mm7              \n\t"\
553
        "movq  %0, %%mm0                \n\t"\
554
        "movq  %0, %%mm1                \n\t"\
555
        "movq  %3, %%mm2                \n\t"\
556
        "movq  %3, %%mm3                \n\t"\
557
        "punpcklbw %%mm7, %%mm0         \n\t"\
558
        "punpckhbw %%mm7, %%mm1         \n\t"\
559
        "punpcklbw %%mm7, %%mm2         \n\t"\
560
        "punpckhbw %%mm7, %%mm3         \n\t"\
561
        "psubw %%mm2, %%mm0             \n\t"\
562
        "psubw %%mm3, %%mm1             \n\t"\
563
        "movq  %1, %%mm2                \n\t"\
564
        "movq  %1, %%mm3                \n\t"\
565
        "movq  %2, %%mm4                \n\t"\
566
        "movq  %2, %%mm5                \n\t"\
567
        "punpcklbw %%mm7, %%mm2         \n\t"\
568
        "punpckhbw %%mm7, %%mm3         \n\t"\
569
        "punpcklbw %%mm7, %%mm4         \n\t"\
570
        "punpckhbw %%mm7, %%mm5         \n\t"\
571
        "psubw %%mm2, %%mm4             \n\t"\
572
        "psubw %%mm3, %%mm5             \n\t"\
573
        "psllw $2, %%mm4                \n\t"\
574
        "psllw $2, %%mm5                \n\t"\
575
        "paddw %%mm0, %%mm4             \n\t"\
576
        "paddw %%mm1, %%mm5             \n\t"\
577
        "pxor %%mm6, %%mm6              \n\t"\
578
        "pcmpgtw %%mm4, %%mm6           \n\t"\
579
        "pcmpgtw %%mm5, %%mm7           \n\t"\
580
        "pxor %%mm6, %%mm4              \n\t"\
581
        "pxor %%mm7, %%mm5              \n\t"\
582
        "psubw %%mm6, %%mm4             \n\t"\
583
        "psubw %%mm7, %%mm5             \n\t"\
584
        "psrlw $3, %%mm4                \n\t"\
585
        "psrlw $3, %%mm5                \n\t"\
586
        "packuswb %%mm5, %%mm4          \n\t"\
587
        "packsswb %%mm7, %%mm6          \n\t"\
588
        "pxor %%mm7, %%mm7              \n\t"\
589
        "movd %4, %%mm2                 \n\t"\
590
        "punpcklbw %%mm2, %%mm2         \n\t"\
591
        "punpcklbw %%mm2, %%mm2         \n\t"\
592
        "punpcklbw %%mm2, %%mm2         \n\t"\
593
        "psubusb %%mm4, %%mm2           \n\t"\
594
        "movq %%mm2, %%mm3              \n\t"\
595
        "psubusb %%mm4, %%mm3           \n\t"\
596
        "psubb %%mm3, %%mm2             \n\t"\
597
        "movq %1, %%mm3                 \n\t"\
598
        "movq %2, %%mm4                 \n\t"\
599
        "pxor %%mm6, %%mm3              \n\t"\
600
        "pxor %%mm6, %%mm4              \n\t"\
601
        "paddusb %%mm2, %%mm3           \n\t"\
602
        "psubusb %%mm2, %%mm4           \n\t"\
603
        "pxor %%mm6, %%mm3              \n\t"\
604
        "pxor %%mm6, %%mm4              \n\t"\
605
        "paddusb %%mm2, %%mm2           \n\t"\
606
        "packsswb %%mm1, %%mm0          \n\t"\
607
        "pcmpgtb %%mm0, %%mm7           \n\t"\
608
        "pxor %%mm7, %%mm0              \n\t"\
609
        "psubb %%mm7, %%mm0             \n\t"\
610
        "movq %%mm0, %%mm1              \n\t"\
611
        "psubusb %%mm2, %%mm0           \n\t"\
612
        "psubb %%mm0, %%mm1             \n\t"\
613
        "pand %5, %%mm1                 \n\t"\
614
        "psrlw $2, %%mm1                \n\t"\
615
        "pxor %%mm7, %%mm1              \n\t"\
616
        "psubb %%mm7, %%mm1             \n\t"\
617
        "movq %0, %%mm5                 \n\t"\
618
        "movq %3, %%mm6                 \n\t"\
619
        "psubb %%mm1, %%mm5             \n\t"\
620
        "paddb %%mm1, %%mm6             \n\t"
621

    
622
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
623
    if(ENABLE_ANY_H263) {
624
    const int strength= ff_h263_loop_filter_strength[qscale];
625

    
626
    __asm__ volatile(
627

    
628
        H263_LOOP_FILTER
629

    
630
        "movq %%mm3, %1                 \n\t"
631
        "movq %%mm4, %2                 \n\t"
632
        "movq %%mm5, %0                 \n\t"
633
        "movq %%mm6, %3                 \n\t"
634
        : "+m" (*(uint64_t*)(src - 2*stride)),
635
          "+m" (*(uint64_t*)(src - 1*stride)),
636
          "+m" (*(uint64_t*)(src + 0*stride)),
637
          "+m" (*(uint64_t*)(src + 1*stride))
638
        : "g" (2*strength), "m"(ff_pb_FC)
639
    );
640
    }
641
}
642

    
643
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
644
    __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
645
        "movd  %4, %%mm0                \n\t"
646
        "movd  %5, %%mm1                \n\t"
647
        "movd  %6, %%mm2                \n\t"
648
        "movd  %7, %%mm3                \n\t"
649
        "punpcklbw %%mm1, %%mm0         \n\t"
650
        "punpcklbw %%mm3, %%mm2         \n\t"
651
        "movq %%mm0, %%mm1              \n\t"
652
        "punpcklwd %%mm2, %%mm0         \n\t"
653
        "punpckhwd %%mm2, %%mm1         \n\t"
654
        "movd  %%mm0, %0                \n\t"
655
        "punpckhdq %%mm0, %%mm0         \n\t"
656
        "movd  %%mm0, %1                \n\t"
657
        "movd  %%mm1, %2                \n\t"
658
        "punpckhdq %%mm1, %%mm1         \n\t"
659
        "movd  %%mm1, %3                \n\t"
660

    
661
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
662
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
663
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
664
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
665
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
666
           "m" (*(uint32_t*)(src + 1*src_stride)),
667
           "m" (*(uint32_t*)(src + 2*src_stride)),
668
           "m" (*(uint32_t*)(src + 3*src_stride))
669
    );
670
}
671

    
672
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
673
    if(ENABLE_ANY_H263) {
674
    const int strength= ff_h263_loop_filter_strength[qscale];
675
    DECLARE_ALIGNED(8, uint64_t, temp[4]);
676
    uint8_t *btemp= (uint8_t*)temp;
677

    
678
    src -= 2;
679

    
680
    transpose4x4(btemp  , src           , 8, stride);
681
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
682
    __asm__ volatile(
683
        H263_LOOP_FILTER // 5 3 4 6
684

    
685
        : "+m" (temp[0]),
686
          "+m" (temp[1]),
687
          "+m" (temp[2]),
688
          "+m" (temp[3])
689
        : "g" (2*strength), "m"(ff_pb_FC)
690
    );
691

    
692
    __asm__ volatile(
693
        "movq %%mm5, %%mm1              \n\t"
694
        "movq %%mm4, %%mm0              \n\t"
695
        "punpcklbw %%mm3, %%mm5         \n\t"
696
        "punpcklbw %%mm6, %%mm4         \n\t"
697
        "punpckhbw %%mm3, %%mm1         \n\t"
698
        "punpckhbw %%mm6, %%mm0         \n\t"
699
        "movq %%mm5, %%mm3              \n\t"
700
        "movq %%mm1, %%mm6              \n\t"
701
        "punpcklwd %%mm4, %%mm5         \n\t"
702
        "punpcklwd %%mm0, %%mm1         \n\t"
703
        "punpckhwd %%mm4, %%mm3         \n\t"
704
        "punpckhwd %%mm0, %%mm6         \n\t"
705
        "movd %%mm5, (%0)               \n\t"
706
        "punpckhdq %%mm5, %%mm5         \n\t"
707
        "movd %%mm5, (%0,%2)            \n\t"
708
        "movd %%mm3, (%0,%2,2)          \n\t"
709
        "punpckhdq %%mm3, %%mm3         \n\t"
710
        "movd %%mm3, (%0,%3)            \n\t"
711
        "movd %%mm1, (%1)               \n\t"
712
        "punpckhdq %%mm1, %%mm1         \n\t"
713
        "movd %%mm1, (%1,%2)            \n\t"
714
        "movd %%mm6, (%1,%2,2)          \n\t"
715
        "punpckhdq %%mm6, %%mm6         \n\t"
716
        "movd %%mm6, (%1,%3)            \n\t"
717
        :: "r" (src),
718
           "r" (src + 4*stride),
719
           "r" ((x86_reg)   stride ),
720
           "r" ((x86_reg)(3*stride))
721
    );
722
    }
723
}
724

    
725
/* draw the edges of width 'w' of an image of size width, height
726
   this mmx version can only handle w==8 || w==16 */
727
static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
728
{
729
    uint8_t *ptr, *last_line;
730
    int i;
731

    
732
    last_line = buf + (height - 1) * wrap;
733
    /* left and right */
734
    ptr = buf;
735
    if(w==8)
736
    {
737
        __asm__ volatile(
738
                "1:                             \n\t"
739
                "movd (%0), %%mm0               \n\t"
740
                "punpcklbw %%mm0, %%mm0         \n\t"
741
                "punpcklwd %%mm0, %%mm0         \n\t"
742
                "punpckldq %%mm0, %%mm0         \n\t"
743
                "movq %%mm0, -8(%0)             \n\t"
744
                "movq -8(%0, %2), %%mm1         \n\t"
745
                "punpckhbw %%mm1, %%mm1         \n\t"
746
                "punpckhwd %%mm1, %%mm1         \n\t"
747
                "punpckhdq %%mm1, %%mm1         \n\t"
748
                "movq %%mm1, (%0, %2)           \n\t"
749
                "add %1, %0                     \n\t"
750
                "cmp %3, %0                     \n\t"
751
                " jb 1b                         \n\t"
752
                : "+r" (ptr)
753
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
754
        );
755
    }
756
    else
757
    {
758
        __asm__ volatile(
759
                "1:                             \n\t"
760
                "movd (%0), %%mm0               \n\t"
761
                "punpcklbw %%mm0, %%mm0         \n\t"
762
                "punpcklwd %%mm0, %%mm0         \n\t"
763
                "punpckldq %%mm0, %%mm0         \n\t"
764
                "movq %%mm0, -8(%0)             \n\t"
765
                "movq %%mm0, -16(%0)            \n\t"
766
                "movq -8(%0, %2), %%mm1         \n\t"
767
                "punpckhbw %%mm1, %%mm1         \n\t"
768
                "punpckhwd %%mm1, %%mm1         \n\t"
769
                "punpckhdq %%mm1, %%mm1         \n\t"
770
                "movq %%mm1, (%0, %2)           \n\t"
771
                "movq %%mm1, 8(%0, %2)          \n\t"
772
                "add %1, %0                     \n\t"
773
                "cmp %3, %0                     \n\t"
774
                " jb 1b                         \n\t"
775
                : "+r" (ptr)
776
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
777
        );
778
    }
779

    
780
    for(i=0;i<w;i+=4) {
781
        /* top and bottom (and hopefully also the corners) */
782
        ptr= buf - (i + 1) * wrap - w;
783
        __asm__ volatile(
784
                "1:                             \n\t"
785
                "movq (%1, %0), %%mm0           \n\t"
786
                "movq %%mm0, (%0)               \n\t"
787
                "movq %%mm0, (%0, %2)           \n\t"
788
                "movq %%mm0, (%0, %2, 2)        \n\t"
789
                "movq %%mm0, (%0, %3)           \n\t"
790
                "add $8, %0                     \n\t"
791
                "cmp %4, %0                     \n\t"
792
                " jb 1b                         \n\t"
793
                : "+r" (ptr)
794
                : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
795
        );
796
        ptr= last_line + (i + 1) * wrap - w;
797
        __asm__ volatile(
798
                "1:                             \n\t"
799
                "movq (%1, %0), %%mm0           \n\t"
800
                "movq %%mm0, (%0)               \n\t"
801
                "movq %%mm0, (%0, %2)           \n\t"
802
                "movq %%mm0, (%0, %2, 2)        \n\t"
803
                "movq %%mm0, (%0, %3)           \n\t"
804
                "add $8, %0                     \n\t"
805
                "cmp %4, %0                     \n\t"
806
                " jb 1b                         \n\t"
807
                : "+r" (ptr)
808
                : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
809
        );
810
    }
811
}
812

    
813
#define PAETH(cpu, abs3)\
814
static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
815
{\
816
    x86_reg i = -bpp;\
817
    x86_reg end = w-3;\
818
    __asm__ volatile(\
819
        "pxor      %%mm7, %%mm7 \n"\
820
        "movd    (%1,%0), %%mm0 \n"\
821
        "movd    (%2,%0), %%mm1 \n"\
822
        "punpcklbw %%mm7, %%mm0 \n"\
823
        "punpcklbw %%mm7, %%mm1 \n"\
824
        "add       %4, %0 \n"\
825
        "1: \n"\
826
        "movq      %%mm1, %%mm2 \n"\
827
        "movd    (%2,%0), %%mm1 \n"\
828
        "movq      %%mm2, %%mm3 \n"\
829
        "punpcklbw %%mm7, %%mm1 \n"\
830
        "movq      %%mm2, %%mm4 \n"\
831
        "psubw     %%mm1, %%mm3 \n"\
832
        "psubw     %%mm0, %%mm4 \n"\
833
        "movq      %%mm3, %%mm5 \n"\
834
        "paddw     %%mm4, %%mm5 \n"\
835
        abs3\
836
        "movq      %%mm4, %%mm6 \n"\
837
        "pminsw    %%mm5, %%mm6 \n"\
838
        "pcmpgtw   %%mm6, %%mm3 \n"\
839
        "pcmpgtw   %%mm5, %%mm4 \n"\
840
        "movq      %%mm4, %%mm6 \n"\
841
        "pand      %%mm3, %%mm4 \n"\
842
        "pandn     %%mm3, %%mm6 \n"\
843
        "pandn     %%mm0, %%mm3 \n"\
844
        "movd    (%3,%0), %%mm0 \n"\
845
        "pand      %%mm1, %%mm6 \n"\
846
        "pand      %%mm4, %%mm2 \n"\
847
        "punpcklbw %%mm7, %%mm0 \n"\
848
        "movq      %6,    %%mm5 \n"\
849
        "paddw     %%mm6, %%mm0 \n"\
850
        "paddw     %%mm2, %%mm3 \n"\
851
        "paddw     %%mm3, %%mm0 \n"\
852
        "pand      %%mm5, %%mm0 \n"\
853
        "movq      %%mm0, %%mm3 \n"\
854
        "packuswb  %%mm3, %%mm3 \n"\
855
        "movd      %%mm3, (%1,%0) \n"\
856
        "add       %4, %0 \n"\
857
        "cmp       %5, %0 \n"\
858
        "jle 1b \n"\
859
        :"+r"(i)\
860
        :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
861
         "m"(ff_pw_255)\
862
        :"memory"\
863
    );\
864
}
865

    
866
#define ABS3_MMX2\
867
        "psubw     %%mm5, %%mm7 \n"\
868
        "pmaxsw    %%mm7, %%mm5 \n"\
869
        "pxor      %%mm6, %%mm6 \n"\
870
        "pxor      %%mm7, %%mm7 \n"\
871
        "psubw     %%mm3, %%mm6 \n"\
872
        "psubw     %%mm4, %%mm7 \n"\
873
        "pmaxsw    %%mm6, %%mm3 \n"\
874
        "pmaxsw    %%mm7, %%mm4 \n"\
875
        "pxor      %%mm7, %%mm7 \n"
876

    
877
#define ABS3_SSSE3\
878
        "pabsw     %%mm3, %%mm3 \n"\
879
        "pabsw     %%mm4, %%mm4 \n"\
880
        "pabsw     %%mm5, %%mm5 \n"
881

    
882
PAETH(mmx2, ABS3_MMX2)
883
#ifdef HAVE_SSSE3
884
PAETH(ssse3, ABS3_SSSE3)
885
#endif
886

    
887
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
888
        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
889
        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
890
        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
891
        "movq "#in7", " #m3 "             \n\t" /* d */\
892
        "movq "#in0", %%mm5               \n\t" /* D */\
893
        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
894
        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
895
        "movq "#in1", %%mm5               \n\t" /* C */\
896
        "movq "#in2", %%mm6               \n\t" /* B */\
897
        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
898
        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
899
        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
900
        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
901
        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
902
        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
903
        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
904
        "psraw $5, %%mm5                  \n\t"\
905
        "packuswb %%mm5, %%mm5            \n\t"\
906
        OP(%%mm5, out, %%mm7, d)
907

    
908
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
909
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
910
    uint64_t temp;\
911
\
912
    __asm__ volatile(\
913
        "pxor %%mm7, %%mm7                \n\t"\
914
        "1:                               \n\t"\
915
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
916
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
917
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
918
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
919
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
920
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
921
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
922
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
923
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
924
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
925
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
926
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
927
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
928
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
929
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
930
        "paddw %%mm3, %%mm5               \n\t" /* b */\
931
        "paddw %%mm2, %%mm6               \n\t" /* c */\
932
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
933
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
934
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
935
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
936
        "paddw %%mm4, %%mm0               \n\t" /* a */\
937
        "paddw %%mm1, %%mm5               \n\t" /* d */\
938
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
939
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
940
        "paddw %6, %%mm6                  \n\t"\
941
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
942
        "psraw $5, %%mm0                  \n\t"\
943
        "movq %%mm0, %5                   \n\t"\
944
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
945
        \
946
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
947
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
948
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
949
        "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
950
        "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
951
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
952
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
953
        "paddw %%mm0, %%mm2               \n\t" /* b */\
954
        "paddw %%mm5, %%mm3               \n\t" /* c */\
955
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
956
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
957
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
958
        "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
959
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
960
        "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
961
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
962
        "paddw %%mm2, %%mm1               \n\t" /* a */\
963
        "paddw %%mm6, %%mm4               \n\t" /* d */\
964
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
965
        "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
966
        "paddw %6, %%mm1                  \n\t"\
967
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
968
        "psraw $5, %%mm3                  \n\t"\
969
        "movq %5, %%mm1                   \n\t"\
970
        "packuswb %%mm3, %%mm1            \n\t"\
971
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
972
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
973
        \
974
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
975
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
976
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
977
        "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
978
        "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
979
        "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
980
        "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
981
        "paddw %%mm1, %%mm5               \n\t" /* b */\
982
        "paddw %%mm4, %%mm0               \n\t" /* c */\
983
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
984
        "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
985
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
986
        "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
987
        "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
988
        "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
989
        "paddw %%mm3, %%mm2               \n\t" /* d */\
990
        "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
991
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
992
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
993
        "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
994
        "paddw %%mm2, %%mm6               \n\t" /* a */\
995
        "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
996
        "paddw %6, %%mm0                  \n\t"\
997
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
998
        "psraw $5, %%mm0                  \n\t"\
999
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1000
        \
1001
        "paddw %%mm5, %%mm3               \n\t" /* a */\
1002
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
1003
        "paddw %%mm4, %%mm6               \n\t" /* b */\
1004
        "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
1005
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
1006
        "paddw %%mm1, %%mm4               \n\t" /* c */\
1007
        "paddw %%mm2, %%mm5               \n\t" /* d */\
1008
        "paddw %%mm6, %%mm6               \n\t" /* 2b */\
1009
        "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
1010
        "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1011
        "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
1012
        "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
1013
        "paddw %6, %%mm4                  \n\t"\
1014
        "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
1015
        "psraw $5, %%mm4                  \n\t"\
1016
        "packuswb %%mm4, %%mm0            \n\t"\
1017
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1018
        \
1019
        "add %3, %0                       \n\t"\
1020
        "add %4, %1                       \n\t"\
1021
        "decl %2                          \n\t"\
1022
        " jnz 1b                          \n\t"\
1023
        : "+a"(src), "+c"(dst), "+D"(h)\
1024
        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1025
        : "memory"\
1026
    );\
1027
}\
1028
\
1029
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1030
    int i;\
1031
    int16_t temp[16];\
1032
    /* quick HACK, XXX FIXME MUST be optimized */\
1033
    for(i=0; i<h; i++)\
1034
    {\
1035
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1036
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1037
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1038
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1039
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1040
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1041
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1042
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1043
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1044
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1045
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1046
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1047
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1048
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1049
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1050
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1051
        __asm__ volatile(\
1052
            "movq (%0), %%mm0               \n\t"\
1053
            "movq 8(%0), %%mm1              \n\t"\
1054
            "paddw %2, %%mm0                \n\t"\
1055
            "paddw %2, %%mm1                \n\t"\
1056
            "psraw $5, %%mm0                \n\t"\
1057
            "psraw $5, %%mm1                \n\t"\
1058
            "packuswb %%mm1, %%mm0          \n\t"\
1059
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1060
            "movq 16(%0), %%mm0             \n\t"\
1061
            "movq 24(%0), %%mm1             \n\t"\
1062
            "paddw %2, %%mm0                \n\t"\
1063
            "paddw %2, %%mm1                \n\t"\
1064
            "psraw $5, %%mm0                \n\t"\
1065
            "psraw $5, %%mm1                \n\t"\
1066
            "packuswb %%mm1, %%mm0          \n\t"\
1067
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1068
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1069
            : "memory"\
1070
        );\
1071
        dst+=dstStride;\
1072
        src+=srcStride;\
1073
    }\
1074
}\
1075
\
1076
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1077
    __asm__ volatile(\
1078
        "pxor %%mm7, %%mm7                \n\t"\
1079
        "1:                               \n\t"\
1080
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1081
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1082
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1083
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1084
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1085
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1086
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1087
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1088
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1089
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1090
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1091
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1092
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1093
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1094
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1095
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1096
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1097
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1098
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1099
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1100
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1101
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1102
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1103
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1104
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1105
        "paddw %5, %%mm6                  \n\t"\
1106
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1107
        "psraw $5, %%mm0                  \n\t"\
1108
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1109
        \
1110
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1111
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
1112
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
1113
        "paddw %%mm5, %%mm1               \n\t" /* a */\
1114
        "paddw %%mm6, %%mm2               \n\t" /* b */\
1115
        "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
1116
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
1117
        "paddw %%mm6, %%mm3               \n\t" /* c */\
1118
        "paddw %%mm5, %%mm4               \n\t" /* d */\
1119
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1120
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1121
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1122
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1123
        "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
1124
        "paddw %5, %%mm1                  \n\t"\
1125
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
1126
        "psraw $5, %%mm3                  \n\t"\
1127
        "packuswb %%mm3, %%mm0            \n\t"\
1128
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1129
        \
1130
        "add %3, %0                       \n\t"\
1131
        "add %4, %1                       \n\t"\
1132
        "decl %2                          \n\t"\
1133
        " jnz 1b                          \n\t"\
1134
        : "+a"(src), "+c"(dst), "+d"(h)\
1135
        : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1136
        : "memory"\
1137
    );\
1138
}\
1139
\
1140
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1141
    int i;\
1142
    int16_t temp[8];\
1143
    /* quick HACK, XXX FIXME MUST be optimized */\
1144
    for(i=0; i<h; i++)\
1145
    {\
1146
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1147
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1148
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1149
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1150
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1151
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1152
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1153
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1154
        __asm__ volatile(\
1155
            "movq (%0), %%mm0           \n\t"\
1156
            "movq 8(%0), %%mm1          \n\t"\
1157
            "paddw %2, %%mm0            \n\t"\
1158
            "paddw %2, %%mm1            \n\t"\
1159
            "psraw $5, %%mm0            \n\t"\
1160
            "psraw $5, %%mm1            \n\t"\
1161
            "packuswb %%mm1, %%mm0      \n\t"\
1162
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1163
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1164
            :"memory"\
1165
        );\
1166
        dst+=dstStride;\
1167
        src+=srcStride;\
1168
    }\
1169
}
1170

    
1171
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1172
\
1173
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1174
    uint64_t temp[17*4];\
1175
    uint64_t *temp_ptr= temp;\
1176
    int count= 17;\
1177
\
1178
    /*FIXME unroll */\
1179
    __asm__ volatile(\
1180
        "pxor %%mm7, %%mm7              \n\t"\
1181
        "1:                             \n\t"\
1182
        "movq (%0), %%mm0               \n\t"\
1183
        "movq (%0), %%mm1               \n\t"\
1184
        "movq 8(%0), %%mm2              \n\t"\
1185
        "movq 8(%0), %%mm3              \n\t"\
1186
        "punpcklbw %%mm7, %%mm0         \n\t"\
1187
        "punpckhbw %%mm7, %%mm1         \n\t"\
1188
        "punpcklbw %%mm7, %%mm2         \n\t"\
1189
        "punpckhbw %%mm7, %%mm3         \n\t"\
1190
        "movq %%mm0, (%1)               \n\t"\
1191
        "movq %%mm1, 17*8(%1)           \n\t"\
1192
        "movq %%mm2, 2*17*8(%1)         \n\t"\
1193
        "movq %%mm3, 3*17*8(%1)         \n\t"\
1194
        "add $8, %1                     \n\t"\
1195
        "add %3, %0                     \n\t"\
1196
        "decl %2                        \n\t"\
1197
        " jnz 1b                        \n\t"\
1198
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1199
        : "r" ((x86_reg)srcStride)\
1200
        : "memory"\
1201
    );\
1202
    \
1203
    temp_ptr= temp;\
1204
    count=4;\
1205
    \
1206
/*FIXME reorder for speed */\
1207
    __asm__ volatile(\
1208
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1209
        "1:                             \n\t"\
1210
        "movq (%0), %%mm0               \n\t"\
1211
        "movq 8(%0), %%mm1              \n\t"\
1212
        "movq 16(%0), %%mm2             \n\t"\
1213
        "movq 24(%0), %%mm3             \n\t"\
1214
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1215
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1216
        "add %4, %1                     \n\t"\
1217
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1218
        \
1219
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1220
        "add %4, %1                     \n\t"\
1221
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1222
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1223
        "add %4, %1                     \n\t"\
1224
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1225
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1226
        "add %4, %1                     \n\t"\
1227
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1228
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1229
        "add %4, %1                     \n\t"\
1230
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1231
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1232
        "add %4, %1                     \n\t"\
1233
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1234
        \
1235
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1236
        "add %4, %1                     \n\t"  \
1237
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1238
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1239
        \
1240
        "add $136, %0                   \n\t"\
1241
        "add %6, %1                     \n\t"\
1242
        "decl %2                        \n\t"\
1243
        " jnz 1b                        \n\t"\
1244
        \
1245
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1246
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1247
        :"memory"\
1248
    );\
1249
}\
1250
\
1251
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1252
    uint64_t temp[9*2];\
1253
    uint64_t *temp_ptr= temp;\
1254
    int count= 9;\
1255
\
1256
    /*FIXME unroll */\
1257
    __asm__ volatile(\
1258
        "pxor %%mm7, %%mm7              \n\t"\
1259
        "1:                             \n\t"\
1260
        "movq (%0), %%mm0               \n\t"\
1261
        "movq (%0), %%mm1               \n\t"\
1262
        "punpcklbw %%mm7, %%mm0         \n\t"\
1263
        "punpckhbw %%mm7, %%mm1         \n\t"\
1264
        "movq %%mm0, (%1)               \n\t"\
1265
        "movq %%mm1, 9*8(%1)            \n\t"\
1266
        "add $8, %1                     \n\t"\
1267
        "add %3, %0                     \n\t"\
1268
        "decl %2                        \n\t"\
1269
        " jnz 1b                        \n\t"\
1270
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1271
        : "r" ((x86_reg)srcStride)\
1272
        : "memory"\
1273
    );\
1274
    \
1275
    temp_ptr= temp;\
1276
    count=2;\
1277
    \
1278
/*FIXME reorder for speed */\
1279
    __asm__ volatile(\
1280
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1281
        "1:                             \n\t"\
1282
        "movq (%0), %%mm0               \n\t"\
1283
        "movq 8(%0), %%mm1              \n\t"\
1284
        "movq 16(%0), %%mm2             \n\t"\
1285
        "movq 24(%0), %%mm3             \n\t"\
1286
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1287
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1288
        "add %4, %1                     \n\t"\
1289
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1290
        \
1291
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1292
        "add %4, %1                     \n\t"\
1293
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1294
        \
1295
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1296
        "add %4, %1                     \n\t"\
1297
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1298
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1299
                \
1300
        "add $72, %0                    \n\t"\
1301
        "add %6, %1                     \n\t"\
1302
        "decl %2                        \n\t"\
1303
        " jnz 1b                        \n\t"\
1304
         \
1305
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1306
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1307
        : "memory"\
1308
   );\
1309
}\
1310
\
1311
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1312
    OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1313
}\
1314
\
1315
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1316
    uint64_t temp[8];\
1317
    uint8_t * const half= (uint8_t*)temp;\
1318
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1319
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1320
}\
1321
\
1322
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1323
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1324
}\
1325
\
1326
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1327
    uint64_t temp[8];\
1328
    uint8_t * const half= (uint8_t*)temp;\
1329
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1330
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1331
}\
1332
\
1333
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1334
    uint64_t temp[8];\
1335
    uint8_t * const half= (uint8_t*)temp;\
1336
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1337
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1338
}\
1339
\
1340
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1341
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1342
}\
1343
\
1344
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1345
    uint64_t temp[8];\
1346
    uint8_t * const half= (uint8_t*)temp;\
1347
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1348
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1349
}\
1350
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1351
    uint64_t half[8 + 9];\
1352
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1353
    uint8_t * const halfHV= ((uint8_t*)half);\
1354
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1355
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1356
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1357
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1358
}\
1359
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1360
    uint64_t half[8 + 9];\
1361
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1362
    uint8_t * const halfHV= ((uint8_t*)half);\
1363
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1364
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1365
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1366
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1367
}\
1368
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1369
    uint64_t half[8 + 9];\
1370
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1371
    uint8_t * const halfHV= ((uint8_t*)half);\
1372
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1373
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1374
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1375
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1376
}\
1377
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1378
    uint64_t half[8 + 9];\
1379
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1380
    uint8_t * const halfHV= ((uint8_t*)half);\
1381
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1382
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1383
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1384
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1385
}\
1386
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1387
    uint64_t half[8 + 9];\
1388
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1389
    uint8_t * const halfHV= ((uint8_t*)half);\
1390
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1391
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1392
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1393
}\
1394
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1395
    uint64_t half[8 + 9];\
1396
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1397
    uint8_t * const halfHV= ((uint8_t*)half);\
1398
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1399
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1400
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1401
}\
1402
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1403
    uint64_t half[8 + 9];\
1404
    uint8_t * const halfH= ((uint8_t*)half);\
1405
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1406
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1407
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1408
}\
1409
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1410
    uint64_t half[8 + 9];\
1411
    uint8_t * const halfH= ((uint8_t*)half);\
1412
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1413
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1414
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1415
}\
1416
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1417
    uint64_t half[9];\
1418
    uint8_t * const halfH= ((uint8_t*)half);\
1419
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1420
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1421
}\
1422
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1423
    OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1424
}\
1425
\
1426
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1427
    uint64_t temp[32];\
1428
    uint8_t * const half= (uint8_t*)temp;\
1429
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1430
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1431
}\
1432
\
1433
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1434
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1435
}\
1436
\
1437
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1438
    uint64_t temp[32];\
1439
    uint8_t * const half= (uint8_t*)temp;\
1440
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1441
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1442
}\
1443
\
1444
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1445
    uint64_t temp[32];\
1446
    uint8_t * const half= (uint8_t*)temp;\
1447
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1448
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1449
}\
1450
\
1451
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1452
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1453
}\
1454
\
1455
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1456
    uint64_t temp[32];\
1457
    uint8_t * const half= (uint8_t*)temp;\
1458
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1459
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1460
}\
1461
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1462
    uint64_t half[16*2 + 17*2];\
1463
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1464
    uint8_t * const halfHV= ((uint8_t*)half);\
1465
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1466
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1467
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1468
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1469
}\
1470
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1471
    uint64_t half[16*2 + 17*2];\
1472
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1473
    uint8_t * const halfHV= ((uint8_t*)half);\
1474
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1475
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1476
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1477
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1478
}\
1479
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1480
    uint64_t half[16*2 + 17*2];\
1481
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1482
    uint8_t * const halfHV= ((uint8_t*)half);\
1483
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1484
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1485
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1486
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1487
}\
1488
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1489
    uint64_t half[16*2 + 17*2];\
1490
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1491
    uint8_t * const halfHV= ((uint8_t*)half);\
1492
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1493
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1494
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1495
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1496
}\
1497
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1498
    uint64_t half[16*2 + 17*2];\
1499
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1500
    uint8_t * const halfHV= ((uint8_t*)half);\
1501
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1502
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1503
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1504
}\
1505
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1506
    uint64_t half[16*2 + 17*2];\
1507
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1508
    uint8_t * const halfHV= ((uint8_t*)half);\
1509
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1510
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1511
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1512
}\
1513
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1514
    uint64_t half[17*2];\
1515
    uint8_t * const halfH= ((uint8_t*)half);\
1516
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1517
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1518
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1519
}\
1520
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1521
    uint64_t half[17*2];\
1522
    uint8_t * const halfH= ((uint8_t*)half);\
1523
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1524
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1525
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1526
}\
1527
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1528
    uint64_t half[17*2];\
1529
    uint8_t * const halfH= ((uint8_t*)half);\
1530
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1531
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1532
}
1533

    
1534
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1535
#define AVG_3DNOW_OP(a,b,temp, size) \
1536
"mov" #size " " #b ", " #temp "   \n\t"\
1537
"pavgusb " #temp ", " #a "        \n\t"\
1538
"mov" #size " " #a ", " #b "      \n\t"
1539
#define AVG_MMX2_OP(a,b,temp, size) \
1540
"mov" #size " " #b ", " #temp "   \n\t"\
1541
"pavgb " #temp ", " #a "          \n\t"\
1542
"mov" #size " " #a ", " #b "      \n\t"
1543

    
1544
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1545
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1546
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1547
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1548
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1549
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1550
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1551
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1552
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1553

    
1554
/***********************************/
1555
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1556

    
1557
#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1558
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1559
    OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1560
}
1561
#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1562
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1563
    OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1564
}
1565

    
1566
#define QPEL_2TAP(OPNAME, SIZE, MMX)\
1567
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1568
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1569
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1570
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1571
                          OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1572
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1573
                          OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1574
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1575
                          OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1576
static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1577
    OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1578
}\
1579
static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1580
    OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1581
}\
1582
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
1583
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
1584
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
1585
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
1586
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
1587
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
1588
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
1589
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1590

    
1591
QPEL_2TAP(put_, 16, mmx2)
1592
QPEL_2TAP(avg_, 16, mmx2)
1593
QPEL_2TAP(put_,  8, mmx2)
1594
QPEL_2TAP(avg_,  8, mmx2)
1595
QPEL_2TAP(put_, 16, 3dnow)
1596
QPEL_2TAP(avg_, 16, 3dnow)
1597
QPEL_2TAP(put_,  8, 3dnow)
1598
QPEL_2TAP(avg_,  8, 3dnow)
1599

    
1600

    
1601
#if 0
1602
static void just_return() { return; }
1603
#endif
1604

    
1605
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1606
                    int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1607
    const int w = 8;
1608
    const int ix = ox>>(16+shift);
1609
    const int iy = oy>>(16+shift);
1610
    const int oxs = ox>>4;
1611
    const int oys = oy>>4;
1612
    const int dxxs = dxx>>4;
1613
    const int dxys = dxy>>4;
1614
    const int dyxs = dyx>>4;
1615
    const int dyys = dyy>>4;
1616
    const uint16_t r4[4] = {r,r,r,r};
1617
    const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1618
    const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1619
    const uint64_t shift2 = 2*shift;
1620
    uint8_t edge_buf[(h+1)*stride];
1621
    int x, y;
1622

    
1623
    const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1624
    const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1625
    const int dxh = dxy*(h-1);
1626
    const int dyw = dyx*(w-1);
1627
    if( // non-constant fullpel offset (3% of blocks)
1628
        ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1629
         (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1630
        // uses more than 16 bits of subpel mv (only at huge resolution)
1631
        || (dxx|dxy|dyx|dyy)&15 )
1632
    {
1633
        //FIXME could still use mmx for some of the rows
1634
        ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1635
        return;
1636
    }
1637

    
1638
    src += ix + iy*stride;
1639
    if( (unsigned)ix >= width-w ||
1640
        (unsigned)iy >= height-h )
1641
    {
1642
        ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1643
        src = edge_buf;
1644
    }
1645

    
1646
    __asm__ volatile(
1647
        "movd         %0, %%mm6 \n\t"
1648
        "pxor      %%mm7, %%mm7 \n\t"
1649
        "punpcklwd %%mm6, %%mm6 \n\t"
1650
        "punpcklwd %%mm6, %%mm6 \n\t"
1651
        :: "r"(1<<shift)
1652
    );
1653

    
1654
    for(x=0; x<w; x+=4){
1655
        uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1656
                            oxs - dxys + dxxs*(x+1),
1657
                            oxs - dxys + dxxs*(x+2),
1658
                            oxs - dxys + dxxs*(x+3) };
1659
        uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1660
                            oys - dyys + dyxs*(x+1),
1661
                            oys - dyys + dyxs*(x+2),
1662
                            oys - dyys + dyxs*(x+3) };
1663

    
1664
        for(y=0; y<h; y++){
1665
            __asm__ volatile(
1666
                "movq   %0,  %%mm4 \n\t"
1667
                "movq   %1,  %%mm5 \n\t"
1668
                "paddw  %2,  %%mm4 \n\t"
1669
                "paddw  %3,  %%mm5 \n\t"
1670
                "movq   %%mm4, %0  \n\t"
1671
                "movq   %%mm5, %1  \n\t"
1672
                "psrlw  $12, %%mm4 \n\t"
1673
                "psrlw  $12, %%mm5 \n\t"
1674
                : "+m"(*dx4), "+m"(*dy4)
1675
                : "m"(*dxy4), "m"(*dyy4)
1676
            );
1677

    
1678
            __asm__ volatile(
1679
                "movq   %%mm6, %%mm2 \n\t"
1680
                "movq   %%mm6, %%mm1 \n\t"
1681
                "psubw  %%mm4, %%mm2 \n\t"
1682
                "psubw  %%mm5, %%mm1 \n\t"
1683
                "movq   %%mm2, %%mm0 \n\t"
1684
                "movq   %%mm4, %%mm3 \n\t"
1685
                "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1686
                "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1687
                "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1688
                "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1689

    
1690
                "movd   %4,    %%mm5 \n\t"
1691
                "movd   %3,    %%mm4 \n\t"
1692
                "punpcklbw %%mm7, %%mm5 \n\t"
1693
                "punpcklbw %%mm7, %%mm4 \n\t"
1694
                "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1695
                "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1696

    
1697
                "movd   %2,    %%mm5 \n\t"
1698
                "movd   %1,    %%mm4 \n\t"
1699
                "punpcklbw %%mm7, %%mm5 \n\t"
1700
                "punpcklbw %%mm7, %%mm4 \n\t"
1701
                "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1702
                "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1703
                "paddw  %5,    %%mm1 \n\t"
1704
                "paddw  %%mm3, %%mm2 \n\t"
1705
                "paddw  %%mm1, %%mm0 \n\t"
1706
                "paddw  %%mm2, %%mm0 \n\t"
1707

    
1708
                "psrlw    %6,    %%mm0 \n\t"
1709
                "packuswb %%mm0, %%mm0 \n\t"
1710
                "movd     %%mm0, %0    \n\t"
1711

    
1712
                : "=m"(dst[x+y*stride])
1713
                : "m"(src[0]), "m"(src[1]),
1714
                  "m"(src[stride]), "m"(src[stride+1]),
1715
                  "m"(*r4), "m"(shift2)
1716
            );
1717
            src += stride;
1718
        }
1719
        src += 4-h*stride;
1720
    }
1721
}
1722

    
1723
#define PREFETCH(name, op) \
1724
static void name(void *mem, int stride, int h){\
1725
    const uint8_t *p= mem;\
1726
    do{\
1727
        __asm__ volatile(#op" %0" :: "m"(*p));\
1728
        p+= stride;\
1729
    }while(--h);\
1730
}
1731
PREFETCH(prefetch_mmx2,  prefetcht0)
1732
PREFETCH(prefetch_3dnow, prefetch)
1733
#undef PREFETCH
1734

    
1735
#include "h264dsp_mmx.c"
1736

    
1737
/* CAVS specific */
1738
void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
1739
void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx);
1740

    
1741
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1742
    put_pixels8_mmx(dst, src, stride, 8);
1743
}
1744
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1745
    avg_pixels8_mmx(dst, src, stride, 8);
1746
}
1747
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1748
    put_pixels16_mmx(dst, src, stride, 16);
1749
}
1750
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1751
    avg_pixels16_mmx(dst, src, stride, 16);
1752
}
1753

    
1754
/* VC1 specific */
1755
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx);
1756

    
1757
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1758
    put_pixels8_mmx(dst, src, stride, 8);
1759
}
1760

    
1761
/* external functions, from idct_mmx.c */
1762
void ff_mmx_idct(DCTELEM *block);
1763
void ff_mmxext_idct(DCTELEM *block);
1764

    
1765
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1766
   converted */
1767
#ifdef CONFIG_GPL
1768
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1769
{
1770
    ff_mmx_idct (block);
1771
    put_pixels_clamped_mmx(block, dest, line_size);
1772
}
1773
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1774
{
1775
    ff_mmx_idct (block);
1776
    add_pixels_clamped_mmx(block, dest, line_size);
1777
}
1778
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1779
{
1780
    ff_mmxext_idct (block);
1781
    put_pixels_clamped_mmx(block, dest, line_size);
1782
}
1783
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1784
{
1785
    ff_mmxext_idct (block);
1786
    add_pixels_clamped_mmx(block, dest, line_size);
1787
}
1788
#endif
1789
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1790
{
1791
    ff_idct_xvid_mmx (block);
1792
    put_pixels_clamped_mmx(block, dest, line_size);
1793
}
1794
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1795
{
1796
    ff_idct_xvid_mmx (block);
1797
    add_pixels_clamped_mmx(block, dest, line_size);
1798
}
1799
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1800
{
1801
    ff_idct_xvid_mmx2 (block);
1802
    put_pixels_clamped_mmx(block, dest, line_size);
1803
}
1804
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1805
{
1806
    ff_idct_xvid_mmx2 (block);
1807
    add_pixels_clamped_mmx(block, dest, line_size);
1808
}
1809

    
1810
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1811
{
1812
    int i;
1813
    __asm__ volatile("pxor %%mm7, %%mm7":);
1814
    for(i=0; i<blocksize; i+=2) {
1815
        __asm__ volatile(
1816
            "movq    %0,    %%mm0 \n\t"
1817
            "movq    %1,    %%mm1 \n\t"
1818
            "movq    %%mm0, %%mm2 \n\t"
1819
            "movq    %%mm1, %%mm3 \n\t"
1820
            "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1821
            "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1822
            "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
1823
            "pxor    %%mm2, %%mm1 \n\t"
1824
            "movq    %%mm3, %%mm4 \n\t"
1825
            "pand    %%mm1, %%mm3 \n\t"
1826
            "pandn   %%mm1, %%mm4 \n\t"
1827
            "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1828
            "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1829
            "movq    %%mm3, %1    \n\t"
1830
            "movq    %%mm0, %0    \n\t"
1831
            :"+m"(mag[i]), "+m"(ang[i])
1832
            ::"memory"
1833
        );
1834
    }
1835
    __asm__ volatile("femms");
1836
}
1837
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1838
{
1839
    int i;
1840

    
1841
    __asm__ volatile(
1842
            "movaps  %0,     %%xmm5 \n\t"
1843
        ::"m"(ff_pdw_80000000[0])
1844
    );
1845
    for(i=0; i<blocksize; i+=4) {
1846
        __asm__ volatile(
1847
            "movaps  %0,     %%xmm0 \n\t"
1848
            "movaps  %1,     %%xmm1 \n\t"
1849
            "xorps   %%xmm2, %%xmm2 \n\t"
1850
            "xorps   %%xmm3, %%xmm3 \n\t"
1851
            "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1852
            "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1853
            "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1854
            "xorps   %%xmm2, %%xmm1 \n\t"
1855
            "movaps  %%xmm3, %%xmm4 \n\t"
1856
            "andps   %%xmm1, %%xmm3 \n\t"
1857
            "andnps  %%xmm1, %%xmm4 \n\t"
1858
            "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1859
            "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1860
            "movaps  %%xmm3, %1     \n\t"
1861
            "movaps  %%xmm0, %0     \n\t"
1862
            :"+m"(mag[i]), "+m"(ang[i])
1863
            ::"memory"
1864
        );
1865
    }
1866
}
1867

    
1868
#define IF1(x) x
1869
#define IF0(x)
1870

    
1871
#define MIX5(mono,stereo)\
1872
    __asm__ volatile(\
1873
        "movss          0(%2), %%xmm5 \n"\
1874
        "movss          8(%2), %%xmm6 \n"\
1875
        "movss         24(%2), %%xmm7 \n"\
1876
        "shufps    $0, %%xmm5, %%xmm5 \n"\
1877
        "shufps    $0, %%xmm6, %%xmm6 \n"\
1878
        "shufps    $0, %%xmm7, %%xmm7 \n"\
1879
        "1: \n"\
1880
        "movaps       (%0,%1), %%xmm0 \n"\
1881
        "movaps  0x400(%0,%1), %%xmm1 \n"\
1882
        "movaps  0x800(%0,%1), %%xmm2 \n"\
1883
        "movaps  0xc00(%0,%1), %%xmm3 \n"\
1884
        "movaps 0x1000(%0,%1), %%xmm4 \n"\
1885
        "mulps         %%xmm5, %%xmm0 \n"\
1886
        "mulps         %%xmm6, %%xmm1 \n"\
1887
        "mulps         %%xmm5, %%xmm2 \n"\
1888
        "mulps         %%xmm7, %%xmm3 \n"\
1889
        "mulps         %%xmm7, %%xmm4 \n"\
1890
 stereo("addps         %%xmm1, %%xmm0 \n")\
1891
        "addps         %%xmm1, %%xmm2 \n"\
1892
        "addps         %%xmm3, %%xmm0 \n"\
1893
        "addps         %%xmm4, %%xmm2 \n"\
1894
   mono("addps         %%xmm2, %%xmm0 \n")\
1895
        "movaps  %%xmm0,      (%0,%1) \n"\
1896
 stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
1897
        "add $16, %0 \n"\
1898
        "jl 1b \n"\
1899
        :"+&r"(i)\
1900
        :"r"(samples[0]+len), "r"(matrix)\
1901
        :"memory"\
1902
    );
1903

    
1904
#define MIX_MISC(stereo)\
1905
    __asm__ volatile(\
1906
        "1: \n"\
1907
        "movaps  (%3,%0), %%xmm0 \n"\
1908
 stereo("movaps   %%xmm0, %%xmm1 \n")\
1909
        "mulps    %%xmm6, %%xmm0 \n"\
1910
 stereo("mulps    %%xmm7, %%xmm1 \n")\
1911
        "lea 1024(%3,%0), %1 \n"\
1912
        "mov %5, %2 \n"\
1913
        "2: \n"\
1914
        "movaps   (%1),   %%xmm2 \n"\
1915
 stereo("movaps   %%xmm2, %%xmm3 \n")\
1916
        "mulps   (%4,%2), %%xmm2 \n"\
1917
 stereo("mulps 16(%4,%2), %%xmm3 \n")\
1918
        "addps    %%xmm2, %%xmm0 \n"\
1919
 stereo("addps    %%xmm3, %%xmm1 \n")\
1920
        "add $1024, %1 \n"\
1921
        "add $32, %2 \n"\
1922
        "jl 2b \n"\
1923
        "movaps   %%xmm0,     (%3,%0) \n"\
1924
 stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
1925
        "add $16, %0 \n"\
1926
        "jl 1b \n"\
1927
        :"+&r"(i), "=&r"(j), "=&r"(k)\
1928
        :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
1929
        :"memory"\
1930
    );
1931

    
1932
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
1933
{
1934
    int (*matrix_cmp)[2] = (int(*)[2])matrix;
1935
    intptr_t i,j,k;
1936

    
1937
    i = -len*sizeof(float);
1938
    if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
1939
        MIX5(IF0,IF1);
1940
    } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
1941
        MIX5(IF1,IF0);
1942
    } else {
1943
        DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
1944
        j = 2*in_ch*sizeof(float);
1945
        __asm__ volatile(
1946
            "1: \n"
1947
            "sub $8, %0 \n"
1948
            "movss     (%2,%0), %%xmm6 \n"
1949
            "movss    4(%2,%0), %%xmm7 \n"
1950
            "shufps $0, %%xmm6, %%xmm6 \n"
1951
            "shufps $0, %%xmm7, %%xmm7 \n"
1952
            "movaps %%xmm6,   (%1,%0,4) \n"
1953
            "movaps %%xmm7, 16(%1,%0,4) \n"
1954
            "jg 1b \n"
1955
            :"+&r"(j)
1956
            :"r"(matrix_simd), "r"(matrix)
1957
            :"memory"
1958
        );
1959
        if(out_ch == 2) {
1960
            MIX_MISC(IF1);
1961
        } else {
1962
            MIX_MISC(IF0);
1963
        }
1964
    }
1965
}
1966

    
1967
static void vector_fmul_3dnow(float *dst, const float *src, int len){
1968
    x86_reg i = (len-4)*4;
1969
    __asm__ volatile(
1970
        "1: \n\t"
1971
        "movq    (%1,%0), %%mm0 \n\t"
1972
        "movq   8(%1,%0), %%mm1 \n\t"
1973
        "pfmul   (%2,%0), %%mm0 \n\t"
1974
        "pfmul  8(%2,%0), %%mm1 \n\t"
1975
        "movq   %%mm0,  (%1,%0) \n\t"
1976
        "movq   %%mm1, 8(%1,%0) \n\t"
1977
        "sub  $16, %0 \n\t"
1978
        "jge 1b \n\t"
1979
        "femms  \n\t"
1980
        :"+r"(i)
1981
        :"r"(dst), "r"(src)
1982
        :"memory"
1983
    );
1984
}
1985
static void vector_fmul_sse(float *dst, const float *src, int len){
1986
    x86_reg i = (len-8)*4;
1987
    __asm__ volatile(
1988
        "1: \n\t"
1989
        "movaps    (%1,%0), %%xmm0 \n\t"
1990
        "movaps  16(%1,%0), %%xmm1 \n\t"
1991
        "mulps     (%2,%0), %%xmm0 \n\t"
1992
        "mulps   16(%2,%0), %%xmm1 \n\t"
1993
        "movaps  %%xmm0,   (%1,%0) \n\t"
1994
        "movaps  %%xmm1, 16(%1,%0) \n\t"
1995
        "sub  $32, %0 \n\t"
1996
        "jge 1b \n\t"
1997
        :"+r"(i)
1998
        :"r"(dst), "r"(src)
1999
        :"memory"
2000
    );
2001
}
2002

    
2003
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2004
    x86_reg i = len*4-16;
2005
    __asm__ volatile(
2006
        "1: \n\t"
2007
        "pswapd   8(%1), %%mm0 \n\t"
2008
        "pswapd    (%1), %%mm1 \n\t"
2009
        "pfmul  (%3,%0), %%mm0 \n\t"
2010
        "pfmul 8(%3,%0), %%mm1 \n\t"
2011
        "movq  %%mm0,  (%2,%0) \n\t"
2012
        "movq  %%mm1, 8(%2,%0) \n\t"
2013
        "add   $16, %1 \n\t"
2014
        "sub   $16, %0 \n\t"
2015
        "jge   1b \n\t"
2016
        :"+r"(i), "+r"(src1)
2017
        :"r"(dst), "r"(src0)
2018
    );
2019
    __asm__ volatile("femms");
2020
}
2021
static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2022
    x86_reg i = len*4-32;
2023
    __asm__ volatile(
2024
        "1: \n\t"
2025
        "movaps        16(%1), %%xmm0 \n\t"
2026
        "movaps          (%1), %%xmm1 \n\t"
2027
        "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2028
        "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2029
        "mulps        (%3,%0), %%xmm0 \n\t"
2030
        "mulps      16(%3,%0), %%xmm1 \n\t"
2031
        "movaps     %%xmm0,   (%2,%0) \n\t"
2032
        "movaps     %%xmm1, 16(%2,%0) \n\t"
2033
        "add    $32, %1 \n\t"
2034
        "sub    $32, %0 \n\t"
2035
        "jge    1b \n\t"
2036
        :"+r"(i), "+r"(src1)
2037
        :"r"(dst), "r"(src0)
2038
    );
2039
}
2040

    
2041
static void vector_fmul_add_add_3dnow(float *dst, const float *src0, const float *src1,
2042
                                      const float *src2, int src3, int len, int step){
2043
    x86_reg i = (len-4)*4;
2044
    if(step == 2 && src3 == 0){
2045
        dst += (len-4)*2;
2046
        __asm__ volatile(
2047
            "1: \n\t"
2048
            "movq   (%2,%0),  %%mm0 \n\t"
2049
            "movq  8(%2,%0),  %%mm1 \n\t"
2050
            "pfmul  (%3,%0),  %%mm0 \n\t"
2051
            "pfmul 8(%3,%0),  %%mm1 \n\t"
2052
            "pfadd  (%4,%0),  %%mm0 \n\t"
2053
            "pfadd 8(%4,%0),  %%mm1 \n\t"
2054
            "movd     %%mm0,   (%1) \n\t"
2055
            "movd     %%mm1, 16(%1) \n\t"
2056
            "psrlq      $32,  %%mm0 \n\t"
2057
            "psrlq      $32,  %%mm1 \n\t"
2058
            "movd     %%mm0,  8(%1) \n\t"
2059
            "movd     %%mm1, 24(%1) \n\t"
2060
            "sub  $32, %1 \n\t"
2061
            "sub  $16, %0 \n\t"
2062
            "jge  1b \n\t"
2063
            :"+r"(i), "+r"(dst)
2064
            :"r"(src0), "r"(src1), "r"(src2)
2065
            :"memory"
2066
        );
2067
    }
2068
    else if(step == 1 && src3 == 0){
2069
        __asm__ volatile(
2070
            "1: \n\t"
2071
            "movq    (%2,%0), %%mm0 \n\t"
2072
            "movq   8(%2,%0), %%mm1 \n\t"
2073
            "pfmul   (%3,%0), %%mm0 \n\t"
2074
            "pfmul  8(%3,%0), %%mm1 \n\t"
2075
            "pfadd   (%4,%0), %%mm0 \n\t"
2076
            "pfadd  8(%4,%0), %%mm1 \n\t"
2077
            "movq  %%mm0,   (%1,%0) \n\t"
2078
            "movq  %%mm1,  8(%1,%0) \n\t"
2079
            "sub  $16, %0 \n\t"
2080
            "jge  1b \n\t"
2081
            :"+r"(i)
2082
            :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2083
            :"memory"
2084
        );
2085
    }
2086
    else
2087
        ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2088
    __asm__ volatile("femms");
2089
}
2090
static void vector_fmul_add_add_sse(float *dst, const float *src0, const float *src1,
2091
                                    const float *src2, int src3, int len, int step){
2092
    x86_reg i = (len-8)*4;
2093
    if(step == 2 && src3 == 0){
2094
        dst += (len-8)*2;
2095
        __asm__ volatile(
2096
            "1: \n\t"
2097
            "movaps   (%2,%0), %%xmm0 \n\t"
2098
            "movaps 16(%2,%0), %%xmm1 \n\t"
2099
            "mulps    (%3,%0), %%xmm0 \n\t"
2100
            "mulps  16(%3,%0), %%xmm1 \n\t"
2101
            "addps    (%4,%0), %%xmm0 \n\t"
2102
            "addps  16(%4,%0), %%xmm1 \n\t"
2103
            "movss     %%xmm0,   (%1) \n\t"
2104
            "movss     %%xmm1, 32(%1) \n\t"
2105
            "movhlps   %%xmm0, %%xmm2 \n\t"
2106
            "movhlps   %%xmm1, %%xmm3 \n\t"
2107
            "movss     %%xmm2, 16(%1) \n\t"
2108
            "movss     %%xmm3, 48(%1) \n\t"
2109
            "shufps $0xb1, %%xmm0, %%xmm0 \n\t"
2110
            "shufps $0xb1, %%xmm1, %%xmm1 \n\t"
2111
            "movss     %%xmm0,  8(%1) \n\t"
2112
            "movss     %%xmm1, 40(%1) \n\t"
2113
            "movhlps   %%xmm0, %%xmm2 \n\t"
2114
            "movhlps   %%xmm1, %%xmm3 \n\t"
2115
            "movss     %%xmm2, 24(%1) \n\t"
2116
            "movss     %%xmm3, 56(%1) \n\t"
2117
            "sub  $64, %1 \n\t"
2118
            "sub  $32, %0 \n\t"
2119
            "jge  1b \n\t"
2120
            :"+r"(i), "+r"(dst)
2121
            :"r"(src0), "r"(src1), "r"(src2)
2122
            :"memory"
2123
        );
2124
    }
2125
    else if(step == 1 && src3 == 0){
2126
        __asm__ volatile(
2127
            "1: \n\t"
2128
            "movaps   (%2,%0), %%xmm0 \n\t"
2129
            "movaps 16(%2,%0), %%xmm1 \n\t"
2130
            "mulps    (%3,%0), %%xmm0 \n\t"
2131
            "mulps  16(%3,%0), %%xmm1 \n\t"
2132
            "addps    (%4,%0), %%xmm0 \n\t"
2133
            "addps  16(%4,%0), %%xmm1 \n\t"
2134
            "movaps %%xmm0,   (%1,%0) \n\t"
2135
            "movaps %%xmm1, 16(%1,%0) \n\t"
2136
            "sub  $32, %0 \n\t"
2137
            "jge  1b \n\t"
2138
            :"+r"(i)
2139
            :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2140
            :"memory"
2141
        );
2142
    }
2143
    else
2144
        ff_vector_fmul_add_add_c(dst, src0, src1, src2, src3, len, step);
2145
}
2146

    
2147
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2148
                                      const float *win, float add_bias, int len){
2149
#ifdef HAVE_6REGS
2150
    if(add_bias == 0){
2151
        x86_reg i = -len*4;
2152
        x86_reg j = len*4-8;
2153
        __asm__ volatile(
2154
            "1: \n"
2155
            "pswapd  (%5,%1), %%mm1 \n"
2156
            "movq    (%5,%0), %%mm0 \n"
2157
            "pswapd  (%4,%1), %%mm5 \n"
2158
            "movq    (%3,%0), %%mm4 \n"
2159
            "movq      %%mm0, %%mm2 \n"
2160
            "movq      %%mm1, %%mm3 \n"
2161
            "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2162
            "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
2163
            "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2164
            "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
2165
            "pfadd     %%mm3, %%mm2 \n"
2166
            "pfsub     %%mm0, %%mm1 \n"
2167
            "pswapd    %%mm2, %%mm2 \n"
2168
            "movq      %%mm1, (%2,%0) \n"
2169
            "movq      %%mm2, (%2,%1) \n"
2170
            "sub $8, %1 \n"
2171
            "add $8, %0 \n"
2172
            "jl 1b \n"
2173
            "femms \n"
2174
            :"+r"(i), "+r"(j)
2175
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2176
        );
2177
    }else
2178
#endif
2179
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2180
}
2181

    
2182
static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2183
                                   const float *win, float add_bias, int len){
2184
#ifdef HAVE_6REGS
2185
    if(add_bias == 0){
2186
        x86_reg i = -len*4;
2187
        x86_reg j = len*4-16;
2188
        __asm__ volatile(
2189
            "1: \n"
2190
            "movaps       (%5,%1), %%xmm1 \n"
2191
            "movaps       (%5,%0), %%xmm0 \n"
2192
            "movaps       (%4,%1), %%xmm5 \n"
2193
            "movaps       (%3,%0), %%xmm4 \n"
2194
            "shufps $0x1b, %%xmm1, %%xmm1 \n"
2195
            "shufps $0x1b, %%xmm5, %%xmm5 \n"
2196
            "movaps        %%xmm0, %%xmm2 \n"
2197
            "movaps        %%xmm1, %%xmm3 \n"
2198
            "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2199
            "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
2200
            "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2201
            "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
2202
            "addps         %%xmm3, %%xmm2 \n"
2203
            "subps         %%xmm0, %%xmm1 \n"
2204
            "shufps $0x1b, %%xmm2, %%xmm2 \n"
2205
            "movaps        %%xmm1, (%2,%0) \n"
2206
            "movaps        %%xmm2, (%2,%1) \n"
2207
            "sub $16, %1 \n"
2208
            "add $16, %0 \n"
2209
            "jl 1b \n"
2210
            :"+r"(i), "+r"(j)
2211
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2212
        );
2213
    }else
2214
#endif
2215
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2216
}
2217

    
2218
static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2219
{
2220
    x86_reg i = -4*len;
2221
    __asm__ volatile(
2222
        "movss  %3, %%xmm4 \n"
2223
        "shufps $0, %%xmm4, %%xmm4 \n"
2224
        "1: \n"
2225
        "cvtpi2ps   (%2,%0), %%xmm0 \n"
2226
        "cvtpi2ps  8(%2,%0), %%xmm1 \n"
2227
        "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2228
        "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2229
        "movlhps  %%xmm1,    %%xmm0 \n"
2230
        "movlhps  %%xmm3,    %%xmm2 \n"
2231
        "mulps    %%xmm4,    %%xmm0 \n"
2232
        "mulps    %%xmm4,    %%xmm2 \n"
2233
        "movaps   %%xmm0,   (%1,%0) \n"
2234
        "movaps   %%xmm2, 16(%1,%0) \n"
2235
        "add $32, %0 \n"
2236
        "jl 1b \n"
2237
        :"+r"(i)
2238
        :"r"(dst+len), "r"(src+len), "m"(mul)
2239
    );
2240
}
2241

    
2242
static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2243
{
2244
    x86_reg i = -4*len;
2245
    __asm__ volatile(
2246
        "movss  %3, %%xmm4 \n"
2247
        "shufps $0, %%xmm4, %%xmm4 \n"
2248
        "1: \n"
2249
        "cvtdq2ps   (%2,%0), %%xmm0 \n"
2250
        "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2251
        "mulps    %%xmm4,    %%xmm0 \n"
2252
        "mulps    %%xmm4,    %%xmm1 \n"
2253
        "movaps   %%xmm0,   (%1,%0) \n"
2254
        "movaps   %%xmm1, 16(%1,%0) \n"
2255
        "add $32, %0 \n"
2256
        "jl 1b \n"
2257
        :"+r"(i)
2258
        :"r"(dst+len), "r"(src+len), "m"(mul)
2259
    );
2260
}
2261

    
2262
static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2263
    x86_reg reglen = len;
2264
    // not bit-exact: pf2id uses different rounding than C and SSE
2265
    __asm__ volatile(
2266
        "add        %0          , %0        \n\t"
2267
        "lea         (%2,%0,2)  , %2        \n\t"
2268
        "add        %0          , %1        \n\t"
2269
        "neg        %0                      \n\t"
2270
        "1:                                 \n\t"
2271
        "pf2id       (%2,%0,2)  , %%mm0     \n\t"
2272
        "pf2id      8(%2,%0,2)  , %%mm1     \n\t"
2273
        "pf2id     16(%2,%0,2)  , %%mm2     \n\t"
2274
        "pf2id     24(%2,%0,2)  , %%mm3     \n\t"
2275
        "packssdw   %%mm1       , %%mm0     \n\t"
2276
        "packssdw   %%mm3       , %%mm2     \n\t"
2277
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2278
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2279
        "add        $16         , %0        \n\t"
2280
        " js 1b                             \n\t"
2281
        "femms                              \n\t"
2282
        :"+r"(reglen), "+r"(dst), "+r"(src)
2283
    );
2284
}
2285
static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2286
    x86_reg reglen = len;
2287
    __asm__ volatile(
2288
        "add        %0          , %0        \n\t"
2289
        "lea         (%2,%0,2)  , %2        \n\t"
2290
        "add        %0          , %1        \n\t"
2291
        "neg        %0                      \n\t"
2292
        "1:                                 \n\t"
2293
        "cvtps2pi    (%2,%0,2)  , %%mm0     \n\t"
2294
        "cvtps2pi   8(%2,%0,2)  , %%mm1     \n\t"
2295
        "cvtps2pi  16(%2,%0,2)  , %%mm2     \n\t"
2296
        "cvtps2pi  24(%2,%0,2)  , %%mm3     \n\t"
2297
        "packssdw   %%mm1       , %%mm0     \n\t"
2298
        "packssdw   %%mm3       , %%mm2     \n\t"
2299
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2300
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2301
        "add        $16         , %0        \n\t"
2302
        " js 1b                             \n\t"
2303
        "emms                               \n\t"
2304
        :"+r"(reglen), "+r"(dst), "+r"(src)
2305
    );
2306
}
2307

    
2308
static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2309
    x86_reg reglen = len;
2310
    __asm__ volatile(
2311
        "add        %0          , %0        \n\t"
2312
        "lea         (%2,%0,2)  , %2        \n\t"
2313
        "add        %0          , %1        \n\t"
2314
        "neg        %0                      \n\t"
2315
        "1:                                 \n\t"
2316
        "cvtps2dq    (%2,%0,2)  , %%xmm0    \n\t"
2317
        "cvtps2dq  16(%2,%0,2)  , %%xmm1    \n\t"
2318
        "packssdw   %%xmm1      , %%xmm0    \n\t"
2319
        "movdqa     %%xmm0      ,  (%1,%0)  \n\t"
2320
        "add        $16         , %0        \n\t"
2321
        " js 1b                             \n\t"
2322
        :"+r"(reglen), "+r"(dst), "+r"(src)
2323
    );
2324
}
2325

    
2326
#ifdef HAVE_YASM
2327
void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2328
void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2329
void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2330
void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2331
void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2332
void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2333
void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2334
#ifdef ARCH_X86_32
2335
static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2336
{
2337
    ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2338
    ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2339
}
2340
#endif
2341
void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2342
void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2343
#else
2344
#define ff_float_to_int16_interleave6_sse(a,b,c)   float_to_int16_interleave_misc_sse(a,b,c,6)
2345
#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2346
#define ff_float_to_int16_interleave6_3dn2(a,b,c)  float_to_int16_interleave_misc_3dnow(a,b,c,6)
2347
#endif
2348
#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2349

    
2350
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2351
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2352
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2353
    DECLARE_ALIGNED_16(int16_t, tmp[len]);\
2354
    int i,j,c;\
2355
    for(c=0; c<channels; c++){\
2356
        float_to_int16_##cpu(tmp, src[c], len);\
2357
        for(i=0, j=c; i<len; i++, j+=channels)\
2358
            dst[j] = tmp[i];\
2359
    }\
2360
}\
2361
\
2362
static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2363
    if(channels==1)\
2364
        float_to_int16_##cpu(dst, src[0], len);\
2365
    else if(channels==2){\
2366
        x86_reg reglen = len; \
2367
        const float *src0 = src[0];\
2368
        const float *src1 = src[1];\
2369
        __asm__ volatile(\
2370
            "shl $2, %0 \n"\
2371
            "add %0, %1 \n"\
2372
            "add %0, %2 \n"\
2373
            "add %0, %3 \n"\
2374
            "neg %0 \n"\
2375
            body\
2376
            :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2377
        );\
2378
    }else if(channels==6){\
2379
        ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2380
    }else\
2381
        float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2382
}
2383

    
2384
FLOAT_TO_INT16_INTERLEAVE(3dnow,
2385
    "1:                         \n"
2386
    "pf2id     (%2,%0), %%mm0   \n"
2387
    "pf2id    8(%2,%0), %%mm1   \n"
2388
    "pf2id     (%3,%0), %%mm2   \n"
2389
    "pf2id    8(%3,%0), %%mm3   \n"
2390
    "packssdw    %%mm1, %%mm0   \n"
2391
    "packssdw    %%mm3, %%mm2   \n"
2392
    "movq        %%mm0, %%mm1   \n"
2393
    "punpcklwd   %%mm2, %%mm0   \n"
2394
    "punpckhwd   %%mm2, %%mm1   \n"
2395
    "movq        %%mm0,  (%1,%0)\n"
2396
    "movq        %%mm1, 8(%1,%0)\n"
2397
    "add $16, %0                \n"
2398
    "js 1b                      \n"
2399
    "femms                      \n"
2400
)
2401

    
2402
FLOAT_TO_INT16_INTERLEAVE(sse,
2403
    "1:                         \n"
2404
    "cvtps2pi  (%2,%0), %%mm0   \n"
2405
    "cvtps2pi 8(%2,%0), %%mm1   \n"
2406
    "cvtps2pi  (%3,%0), %%mm2   \n"
2407
    "cvtps2pi 8(%3,%0), %%mm3   \n"
2408
    "packssdw    %%mm1, %%mm0   \n"
2409
    "packssdw    %%mm3, %%mm2   \n"
2410
    "movq        %%mm0, %%mm1   \n"
2411
    "punpcklwd   %%mm2, %%mm0   \n"
2412
    "punpckhwd   %%mm2, %%mm1   \n"
2413
    "movq        %%mm0,  (%1,%0)\n"
2414
    "movq        %%mm1, 8(%1,%0)\n"
2415
    "add $16, %0                \n"
2416
    "js 1b                      \n"
2417
    "emms                       \n"
2418
)
2419

    
2420
FLOAT_TO_INT16_INTERLEAVE(sse2,
2421
    "1:                         \n"
2422
    "cvtps2dq  (%2,%0), %%xmm0  \n"
2423
    "cvtps2dq  (%3,%0), %%xmm1  \n"
2424
    "packssdw   %%xmm1, %%xmm0  \n"
2425
    "movhlps    %%xmm0, %%xmm1  \n"
2426
    "punpcklwd  %%xmm1, %%xmm0  \n"
2427
    "movdqa     %%xmm0, (%1,%0) \n"
2428
    "add $16, %0                \n"
2429
    "js 1b                      \n"
2430
)
2431

    
2432
static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2433
    if(channels==6)
2434
        ff_float_to_int16_interleave6_3dn2(dst, src, len);
2435
    else
2436
        float_to_int16_interleave_3dnow(dst, src, len, channels);
2437
}
2438

    
2439

    
2440
void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width);
2441
void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width);
2442
void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2443
void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
2444
void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2445
                                   int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2446
void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2447
                                  int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2448

    
2449

    
2450
static void add_int16_sse2(int16_t * v1, int16_t * v2, int order)
2451
{
2452
    x86_reg o = -(order << 1);
2453
    v1 += order;
2454
    v2 += order;
2455
    __asm__ volatile(
2456
        "1:                          \n\t"
2457
        "movdqu   (%1,%2),   %%xmm0  \n\t"
2458
        "movdqu 16(%1,%2),   %%xmm1  \n\t"
2459
        "paddw    (%0,%2),   %%xmm0  \n\t"
2460
        "paddw  16(%0,%2),   %%xmm1  \n\t"
2461
        "movdqa   %%xmm0,    (%0,%2) \n\t"
2462
        "movdqa   %%xmm1,  16(%0,%2) \n\t"
2463
        "add      $32,       %2      \n\t"
2464
        "js       1b                 \n\t"
2465
        : "+r"(v1), "+r"(v2), "+r"(o)
2466
    );
2467
}
2468

    
2469
static void sub_int16_sse2(int16_t * v1, int16_t * v2, int order)
2470
{
2471
    x86_reg o = -(order << 1);
2472
    v1 += order;
2473
    v2 += order;
2474
    __asm__ volatile(
2475
        "1:                           \n\t"
2476
        "movdqa    (%0,%2),   %%xmm0  \n\t"
2477
        "movdqa  16(%0,%2),   %%xmm2  \n\t"
2478
        "movdqu    (%1,%2),   %%xmm1  \n\t"
2479
        "movdqu  16(%1,%2),   %%xmm3  \n\t"
2480
        "psubw     %%xmm1,    %%xmm0  \n\t"
2481
        "psubw     %%xmm3,    %%xmm2  \n\t"
2482
        "movdqa    %%xmm0,    (%0,%2) \n\t"
2483
        "movdqa    %%xmm2,  16(%0,%2) \n\t"
2484
        "add       $32,       %2      \n\t"
2485
        "js        1b                 \n\t"
2486
        : "+r"(v1), "+r"(v2), "+r"(o)
2487
    );
2488
}
2489

    
2490
static int32_t scalarproduct_int16_sse2(int16_t * v1, int16_t * v2, int order, int shift)
2491
{
2492
    int res = 0;
2493
    DECLARE_ALIGNED_16(int64_t, sh);
2494
    x86_reg o = -(order << 1);
2495

    
2496
    v1 += order;
2497
    v2 += order;
2498
    sh = shift;
2499
    __asm__ volatile(
2500
        "pxor      %%xmm7,  %%xmm7        \n\t"
2501
        "1:                               \n\t"
2502
        "movdqu    (%0,%3), %%xmm0        \n\t"
2503
        "movdqu  16(%0,%3), %%xmm1        \n\t"
2504
        "pmaddwd   (%1,%3), %%xmm0        \n\t"
2505
        "pmaddwd 16(%1,%3), %%xmm1        \n\t"
2506
        "paddd     %%xmm0,  %%xmm7        \n\t"
2507
        "paddd     %%xmm1,  %%xmm7        \n\t"
2508
        "add       $32,     %3            \n\t"
2509
        "js        1b                     \n\t"
2510
        "movhlps   %%xmm7,  %%xmm2        \n\t"
2511
        "paddd     %%xmm2,  %%xmm7        \n\t"
2512
        "psrad     %4,      %%xmm7        \n\t"
2513
        "pshuflw   $0x4E,   %%xmm7,%%xmm2 \n\t"
2514
        "paddd     %%xmm2,  %%xmm7        \n\t"
2515
        "movd      %%xmm7,  %2            \n\t"
2516
        : "+r"(v1), "+r"(v2), "=r"(res), "+r"(o)
2517
        : "m"(sh)
2518
    );
2519
    return res;
2520
}
2521

    
2522
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2523
{
2524
    mm_flags = mm_support();
2525

    
2526
    if (avctx->dsp_mask) {
2527
        if (avctx->dsp_mask & FF_MM_FORCE)
2528
            mm_flags |= (avctx->dsp_mask & 0xffff);
2529
        else
2530
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2531
    }
2532

    
2533
#if 0
2534
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2535
    if (mm_flags & FF_MM_MMX)
2536
        av_log(avctx, AV_LOG_INFO, " mmx");
2537
    if (mm_flags & FF_MM_MMXEXT)
2538
        av_log(avctx, AV_LOG_INFO, " mmxext");
2539
    if (mm_flags & FF_MM_3DNOW)
2540
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2541
    if (mm_flags & FF_MM_SSE)
2542
        av_log(avctx, AV_LOG_INFO, " sse");
2543
    if (mm_flags & FF_MM_SSE2)
2544
        av_log(avctx, AV_LOG_INFO, " sse2");
2545
    av_log(avctx, AV_LOG_INFO, "\n");
2546
#endif
2547

    
2548
    if (mm_flags & FF_MM_MMX) {
2549
        const int idct_algo= avctx->idct_algo;
2550

    
2551
        if(avctx->lowres==0){
2552
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2553
                c->idct_put= ff_simple_idct_put_mmx;
2554
                c->idct_add= ff_simple_idct_add_mmx;
2555
                c->idct    = ff_simple_idct_mmx;
2556
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2557
#ifdef CONFIG_GPL
2558
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2559
                if(mm_flags & FF_MM_MMXEXT){
2560
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2561
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2562
                    c->idct    = ff_mmxext_idct;
2563
                }else{
2564
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2565
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2566
                    c->idct    = ff_mmx_idct;
2567
                }
2568
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2569
#endif
2570
            }else if((ENABLE_VP3_DECODER || ENABLE_VP5_DECODER || ENABLE_VP6_DECODER || ENABLE_THEORA_DECODER) &&
2571
                     idct_algo==FF_IDCT_VP3){
2572
                if(mm_flags & FF_MM_SSE2){
2573
                    c->idct_put= ff_vp3_idct_put_sse2;
2574
                    c->idct_add= ff_vp3_idct_add_sse2;
2575
                    c->idct    = ff_vp3_idct_sse2;
2576
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2577
                }else{
2578
                    c->idct_put= ff_vp3_idct_put_mmx;
2579
                    c->idct_add= ff_vp3_idct_add_mmx;
2580
                    c->idct    = ff_vp3_idct_mmx;
2581
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2582
                }
2583
            }else if(idct_algo==FF_IDCT_CAVS){
2584
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2585
            }else if(idct_algo==FF_IDCT_XVIDMMX){
2586
                if(mm_flags & FF_MM_SSE2){
2587
                    c->idct_put= ff_idct_xvid_sse2_put;
2588
                    c->idct_add= ff_idct_xvid_sse2_add;
2589
                    c->idct    = ff_idct_xvid_sse2;
2590
                    c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2591
                }else if(mm_flags & FF_MM_MMXEXT){
2592
                    c->idct_put= ff_idct_xvid_mmx2_put;
2593
                    c->idct_add= ff_idct_xvid_mmx2_add;
2594
                    c->idct    = ff_idct_xvid_mmx2;
2595
                }else{
2596
                    c->idct_put= ff_idct_xvid_mmx_put;
2597
                    c->idct_add= ff_idct_xvid_mmx_add;
2598
                    c->idct    = ff_idct_xvid_mmx;
2599
                }
2600
            }
2601
        }
2602

    
2603
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2604
        c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2605
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2606
        c->clear_block  = clear_block_mmx;
2607
        c->clear_blocks = clear_blocks_mmx;
2608
        if (mm_flags & FF_MM_SSE)
2609
            c->clear_block = clear_block_sse;
2610

    
2611
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2612
        c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2613
        c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2614
        c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2615
        c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2616

    
2617
        SET_HPEL_FUNCS(put, 0, 16, mmx);
2618
        SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2619
        SET_HPEL_FUNCS(avg, 0, 16, mmx);
2620
        SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2621
        SET_HPEL_FUNCS(put, 1, 8, mmx);
2622
        SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2623
        SET_HPEL_FUNCS(avg, 1, 8, mmx);
2624
        SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2625

    
2626
        c->gmc= gmc_mmx;
2627

    
2628
        c->add_bytes= add_bytes_mmx;
2629
        c->add_bytes_l2= add_bytes_l2_mmx;
2630

    
2631
        c->draw_edges = draw_edges_mmx;
2632

    
2633
        if (ENABLE_ANY_H263) {
2634
            c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2635
            c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2636
        }
2637
        c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2638
        c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2639
        c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_nornd;
2640

    
2641
        c->h264_idct_dc_add=
2642
        c->h264_idct_add= ff_h264_idct_add_mmx;
2643
        c->h264_idct8_dc_add=
2644
        c->h264_idct8_add= ff_h264_idct8_add_mmx;
2645

    
2646
        c->h264_idct_add16     = ff_h264_idct_add16_mmx;
2647
        c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
2648
        c->h264_idct_add8      = ff_h264_idct_add8_mmx;
2649
        c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2650

    
2651
        if (mm_flags & FF_MM_MMXEXT) {
2652
            c->prefetch = prefetch_mmx2;
2653

    
2654
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2655
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2656

    
2657
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2658
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2659
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2660

    
2661
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2662
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2663

    
2664
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2665
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2666
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2667

    
2668
            c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2669
            c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2670
            c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
2671
            c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
2672
            c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
2673
            c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2674

    
2675
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2676
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2677
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2678
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2679
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2680
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2681
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2682

    
2683
                if (ENABLE_VP3_DECODER || ENABLE_THEORA_DECODER) {
2684
                    c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2685
                    c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2686
                }
2687
            }
2688

    
2689
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2690
            c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2691
            c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2692
            c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2693
            c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2694
            c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2695
            c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2696
            c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2697
            c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2698
            c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2699
            c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2700
            c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2701
            c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2702
            c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2703
            c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2704
            c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2705
            c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2706

    
2707
            SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2708
            SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2709
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2710
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2711
            SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2712
            SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2713

    
2714
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2715
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2716
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2717
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2718
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2719
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2720

    
2721
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2722
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2723
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2724
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2725

    
2726
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2727
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2728
            c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2729
            c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2730
            c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2731
            c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2732
            c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2733
            c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2734
            c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2735
            c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2736
            c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2737

    
2738
            c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2739
            c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2740
            c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2741
            c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2742
            c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2743
            c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2744
            c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2745
            c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2746

    
2747
            c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2748
            c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2749
            c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2750
            c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2751
            c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2752
            c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2753
            c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2754
            c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2755

    
2756
            if (ENABLE_CAVS_DECODER)
2757
                ff_cavsdsp_init_mmx2(c, avctx);
2758

    
2759
            if (ENABLE_VC1_DECODER || ENABLE_WMV3_DECODER)
2760
                ff_vc1dsp_init_mmx(c, avctx);
2761

    
2762
            c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2763
        } else if (mm_flags & FF_MM_3DNOW) {
2764
            c->prefetch = prefetch_3dnow;
2765

    
2766
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2767
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2768

    
2769
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2770
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2771
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2772

    
2773
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2774
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2775

    
2776
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2777
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2778
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2779

    
2780
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2781
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2782
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2783
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2784
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2785
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2786
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2787
            }
2788

    
2789
            SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2790
            SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2791
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2792
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2793
            SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2794
            SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2795

    
2796
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2797
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2798
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2799
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2800
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2801
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2802

    
2803
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2804
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2805
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2806
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2807

    
2808
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2809
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2810

    
2811
            if (ENABLE_CAVS_DECODER)
2812
                ff_cavsdsp_init_3dnow(c, avctx);
2813
        }
2814

    
2815

    
2816
#define H264_QPEL_FUNCS(x, y, CPU)\
2817
            c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2818
            c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2819
            c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2820
            c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2821
        if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2822
            // these functions are slower than mmx on AMD, but faster on Intel
2823
/* FIXME works in most codecs, but crashes svq1 due to unaligned chroma
2824
            c->put_pixels_tab[0][0] = put_pixels16_sse2;
2825
            c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2826
*/
2827
            H264_QPEL_FUNCS(0, 0, sse2);
2828
        }
2829
        if(mm_flags & FF_MM_SSE2){
2830
            c->h264_idct8_add = ff_h264_idct8_add_sse2;
2831
            c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
2832

    
2833
            H264_QPEL_FUNCS(0, 1, sse2);
2834
            H264_QPEL_FUNCS(0, 2, sse2);
2835
            H264_QPEL_FUNCS(0, 3, sse2);
2836
            H264_QPEL_FUNCS(1, 1, sse2);
2837
            H264_QPEL_FUNCS(1, 2, sse2);
2838
            H264_QPEL_FUNCS(1, 3, sse2);
2839
            H264_QPEL_FUNCS(2, 1, sse2);
2840
            H264_QPEL_FUNCS(2, 2, sse2);
2841
            H264_QPEL_FUNCS(2, 3, sse2);
2842
            H264_QPEL_FUNCS(3, 1, sse2);
2843
            H264_QPEL_FUNCS(3, 2, sse2);
2844
            H264_QPEL_FUNCS(3, 3, sse2);
2845
        }
2846
#ifdef HAVE_SSSE3
2847
        if(mm_flags & FF_MM_SSSE3){
2848
            H264_QPEL_FUNCS(1, 0, ssse3);
2849
            H264_QPEL_FUNCS(1, 1, ssse3);
2850
            H264_QPEL_FUNCS(1, 2, ssse3);
2851
            H264_QPEL_FUNCS(1, 3, ssse3);
2852
            H264_QPEL_FUNCS(2, 0, ssse3);
2853
            H264_QPEL_FUNCS(2, 1, ssse3);
2854
            H264_QPEL_FUNCS(2, 2, ssse3);
2855
            H264_QPEL_FUNCS(2, 3, ssse3);
2856
            H264_QPEL_FUNCS(3, 0, ssse3);
2857
            H264_QPEL_FUNCS(3, 1, ssse3);
2858
            H264_QPEL_FUNCS(3, 2, ssse3);
2859
            H264_QPEL_FUNCS(3, 3, ssse3);
2860
            c->put_no_rnd_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_nornd;
2861
            c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2862
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2863
            c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2864
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2865
            c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2866
        }
2867
#endif
2868

    
2869
#if defined(CONFIG_GPL) && defined(HAVE_YASM)
2870
        if( mm_flags&FF_MM_MMXEXT ){
2871
#ifdef ARCH_X86_32
2872
            c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
2873
            c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
2874
#endif
2875
#if defined(ARCH_X86_64) || !defined(__ICC) || __ICC > 1100
2876
            if( mm_flags&FF_MM_SSE2 ){
2877
                c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
2878
                c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
2879
                c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
2880
                c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
2881
            }
2882
#endif
2883
        }
2884
#endif
2885

    
2886
#ifdef CONFIG_SNOW_DECODER
2887
        if(mm_flags & FF_MM_SSE2 & 0){
2888
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
2889
#ifdef HAVE_7REGS
2890
            c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
2891
#endif
2892
            c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
2893
        }
2894
        else{
2895
            if(mm_flags & FF_MM_MMXEXT){
2896
            c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
2897
#ifdef HAVE_7REGS
2898
            c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
2899
#endif
2900
            }
2901
            c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
2902
        }
2903
#endif
2904

    
2905
        if(mm_flags & FF_MM_3DNOW){
2906
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2907
            c->vector_fmul = vector_fmul_3dnow;
2908
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2909
                c->float_to_int16 = float_to_int16_3dnow;
2910
                c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2911
            }
2912
        }
2913
        if(mm_flags & FF_MM_3DNOWEXT){
2914
            c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2915
            c->vector_fmul_window = vector_fmul_window_3dnow2;
2916
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2917
                c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
2918
            }
2919
        }
2920
        if(mm_flags & FF_MM_SSE){
2921
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2922
            c->ac3_downmix = ac3_downmix_sse;
2923
            c->vector_fmul = vector_fmul_sse;
2924
            c->vector_fmul_reverse = vector_fmul_reverse_sse;
2925
            c->vector_fmul_add_add = vector_fmul_add_add_sse;
2926
            c->vector_fmul_window = vector_fmul_window_sse;
2927
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2928
            c->float_to_int16 = float_to_int16_sse;
2929
            c->float_to_int16_interleave = float_to_int16_interleave_sse;
2930
        }
2931
        if(mm_flags & FF_MM_3DNOW)
2932
            c->vector_fmul_add_add = vector_fmul_add_add_3dnow; // faster than sse
2933
        if(mm_flags & FF_MM_SSE2){
2934
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2935
            c->float_to_int16 = float_to_int16_sse2;
2936
            c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2937
            c->add_int16 = add_int16_sse2;
2938
            c->sub_int16 = sub_int16_sse2;
2939
            c->scalarproduct_int16 = scalarproduct_int16_sse2;
2940
        }
2941
    }
2942

    
2943
    if (ENABLE_ENCODERS)
2944
        dsputilenc_init_mmx(c, avctx);
2945

    
2946
#if 0
2947
    // for speed testing
2948
    get_pixels = just_return;
2949
    put_pixels_clamped = just_return;
2950
    add_pixels_clamped = just_return;
2951

2952
    pix_abs16x16 = just_return;
2953
    pix_abs16x16_x2 = just_return;
2954
    pix_abs16x16_y2 = just_return;
2955
    pix_abs16x16_xy2 = just_return;
2956

2957
    put_pixels_tab[0] = just_return;
2958
    put_pixels_tab[1] = just_return;
2959
    put_pixels_tab[2] = just_return;
2960
    put_pixels_tab[3] = just_return;
2961

2962
    put_no_rnd_pixels_tab[0] = just_return;
2963
    put_no_rnd_pixels_tab[1] = just_return;
2964
    put_no_rnd_pixels_tab[2] = just_return;
2965
    put_no_rnd_pixels_tab[3] = just_return;
2966

2967
    avg_pixels_tab[0] = just_return;
2968
    avg_pixels_tab[1] = just_return;
2969
    avg_pixels_tab[2] = just_return;
2970
    avg_pixels_tab[3] = just_return;
2971

2972
    avg_no_rnd_pixels_tab[0] = just_return;
2973
    avg_no_rnd_pixels_tab[1] = just_return;
2974
    avg_no_rnd_pixels_tab[2] = just_return;
2975
    avg_no_rnd_pixels_tab[3] = just_return;
2976

2977
    //av_fdct = just_return;
2978
    //ff_idct = just_return;
2979
#endif
2980
}