Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / dsputil_mmx.c @ 80ba1ddb

History | View | Annotate | Download (123 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 *
22
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23
 */
24

    
25
#include "libavutil/cpu.h"
26
#include "libavutil/x86_cpu.h"
27
#include "libavcodec/dsputil.h"
28
#include "libavcodec/h264dsp.h"
29
#include "libavcodec/mpegvideo.h"
30
#include "libavcodec/simple_idct.h"
31
#include "libavcodec/ac3dec.h"
32
#include "dsputil_mmx.h"
33
#include "idct_xvid.h"
34

    
35
//#undef NDEBUG
36
//#include <assert.h>
37

    
38
/* pixel operations */
39
DECLARE_ALIGNED(8,  const uint64_t, ff_bone) = 0x0101010101010101ULL;
40
DECLARE_ALIGNED(8,  const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
41

    
42
DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
43
{0x8000000080000000ULL, 0x8000000080000000ULL};
44

    
45
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_1  ) = 0x0001000100010001ULL;
46
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_3  ) = {0x0003000300030003ULL, 0x0003000300030003ULL};
47
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_4  ) = {0x0004000400040004ULL, 0x0004000400040004ULL};
48
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
49
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
50
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_9  ) = {0x0009000900090009ULL, 0x0009000900090009ULL};
51
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
52
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
53
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_17 ) = {0x0011001100110011ULL, 0x0011001100110011ULL};
54
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_18 ) = {0x0012001200120012ULL, 0x0012001200120012ULL};
55
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
56
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_27 ) = {0x001B001B001B001BULL, 0x001B001B001B001BULL};
57
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
58
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
59
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
60
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_53 ) = 0x0035003500350035ULL;
61
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_63 ) = {0x003F003F003F003FULL, 0x003F003F003F003FULL};
62
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
63
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
64
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
65
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
66

    
67
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_0  ) = {0x0000000000000000ULL, 0x0000000000000000ULL};
68
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_1  ) = {0x0101010101010101ULL, 0x0101010101010101ULL};
69
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_3  ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
70
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_4  ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
71
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
72
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
73
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
74
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
75
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
76
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_A1 ) = {0xA1A1A1A1A1A1A1A1ULL, 0xA1A1A1A1A1A1A1A1ULL};
77
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
78
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
79
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
80

    
81
DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
82
DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
83

    
84
#define JUMPALIGN() __asm__ volatile (".p2align 3"::)
85
#define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
86

    
87
#define MOVQ_BFE(regd) \
88
    __asm__ volatile ( \
89
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
90
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
91

    
92
#ifndef PIC
93
#define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
94
#define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
95
#else
96
// for shared library it's better to use this way for accessing constants
97
// pcmpeqd -> -1
98
#define MOVQ_BONE(regd) \
99
    __asm__ volatile ( \
100
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
101
    "psrlw $15, %%" #regd " \n\t" \
102
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
103

    
104
#define MOVQ_WTWO(regd) \
105
    __asm__ volatile ( \
106
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
107
    "psrlw $15, %%" #regd " \n\t" \
108
    "psllw $1, %%" #regd " \n\t"::)
109

    
110
#endif
111

    
112
// using regr as temporary and for the output result
113
// first argument is unmodifed and second is trashed
114
// regfe is supposed to contain 0xfefefefefefefefe
115
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
116
    "movq " #rega ", " #regr "  \n\t"\
117
    "pand " #regb ", " #regr "  \n\t"\
118
    "pxor " #rega ", " #regb "  \n\t"\
119
    "pand " #regfe "," #regb "  \n\t"\
120
    "psrlq $1, " #regb "        \n\t"\
121
    "paddb " #regb ", " #regr " \n\t"
122

    
123
#define PAVGB_MMX(rega, regb, regr, regfe) \
124
    "movq " #rega ", " #regr "  \n\t"\
125
    "por  " #regb ", " #regr "  \n\t"\
126
    "pxor " #rega ", " #regb "  \n\t"\
127
    "pand " #regfe "," #regb "  \n\t"\
128
    "psrlq $1, " #regb "        \n\t"\
129
    "psubb " #regb ", " #regr " \n\t"
130

    
131
// mm6 is supposed to contain 0xfefefefefefefefe
132
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
133
    "movq " #rega ", " #regr "  \n\t"\
134
    "movq " #regc ", " #regp "  \n\t"\
135
    "pand " #regb ", " #regr "  \n\t"\
136
    "pand " #regd ", " #regp "  \n\t"\
137
    "pxor " #rega ", " #regb "  \n\t"\
138
    "pxor " #regc ", " #regd "  \n\t"\
139
    "pand %%mm6, " #regb "      \n\t"\
140
    "pand %%mm6, " #regd "      \n\t"\
141
    "psrlq $1, " #regb "        \n\t"\
142
    "psrlq $1, " #regd "        \n\t"\
143
    "paddb " #regb ", " #regr " \n\t"\
144
    "paddb " #regd ", " #regp " \n\t"
145

    
146
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
147
    "movq " #rega ", " #regr "  \n\t"\
148
    "movq " #regc ", " #regp "  \n\t"\
149
    "por  " #regb ", " #regr "  \n\t"\
150
    "por  " #regd ", " #regp "  \n\t"\
151
    "pxor " #rega ", " #regb "  \n\t"\
152
    "pxor " #regc ", " #regd "  \n\t"\
153
    "pand %%mm6, " #regb "      \n\t"\
154
    "pand %%mm6, " #regd "      \n\t"\
155
    "psrlq $1, " #regd "        \n\t"\
156
    "psrlq $1, " #regb "        \n\t"\
157
    "psubb " #regb ", " #regr " \n\t"\
158
    "psubb " #regd ", " #regp " \n\t"
159

    
160
/***********************************/
161
/* MMX no rounding */
162
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
163
#define SET_RND  MOVQ_WONE
164
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
165
#define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
166
#define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
167

    
168
#include "dsputil_mmx_rnd_template.c"
169

    
170
#undef DEF
171
#undef SET_RND
172
#undef PAVGBP
173
#undef PAVGB
174
/***********************************/
175
/* MMX rounding */
176

    
177
#define DEF(x, y) x ## _ ## y ##_mmx
178
#define SET_RND  MOVQ_WTWO
179
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
180
#define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
181

    
182
#include "dsputil_mmx_rnd_template.c"
183

    
184
#undef DEF
185
#undef SET_RND
186
#undef PAVGBP
187
#undef PAVGB
188
#undef OP_AVG
189

    
190
/***********************************/
191
/* 3Dnow specific */
192

    
193
#define DEF(x) x ## _3dnow
194
#define PAVGB "pavgusb"
195
#define OP_AVG PAVGB
196

    
197
#include "dsputil_mmx_avg_template.c"
198

    
199
#undef DEF
200
#undef PAVGB
201
#undef OP_AVG
202

    
203
/***********************************/
204
/* MMX2 specific */
205

    
206
#define DEF(x) x ## _mmx2
207

    
208
/* Introduced only in MMX2 set */
209
#define PAVGB "pavgb"
210
#define OP_AVG PAVGB
211

    
212
#include "dsputil_mmx_avg_template.c"
213

    
214
#undef DEF
215
#undef PAVGB
216
#undef OP_AVG
217

    
218
#define put_no_rnd_pixels16_mmx put_pixels16_mmx
219
#define put_no_rnd_pixels8_mmx put_pixels8_mmx
220
#define put_pixels16_mmx2 put_pixels16_mmx
221
#define put_pixels8_mmx2 put_pixels8_mmx
222
#define put_pixels4_mmx2 put_pixels4_mmx
223
#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
224
#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
225
#define put_pixels16_3dnow put_pixels16_mmx
226
#define put_pixels8_3dnow put_pixels8_mmx
227
#define put_pixels4_3dnow put_pixels4_mmx
228
#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
229
#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
230

    
231
/***********************************/
232
/* standard MMX */
233

    
234
void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
235
{
236
    const DCTELEM *p;
237
    uint8_t *pix;
238

    
239
    /* read the pixels */
240
    p = block;
241
    pix = pixels;
242
    /* unrolled loop */
243
        __asm__ volatile(
244
                "movq   %3, %%mm0               \n\t"
245
                "movq   8%3, %%mm1              \n\t"
246
                "movq   16%3, %%mm2             \n\t"
247
                "movq   24%3, %%mm3             \n\t"
248
                "movq   32%3, %%mm4             \n\t"
249
                "movq   40%3, %%mm5             \n\t"
250
                "movq   48%3, %%mm6             \n\t"
251
                "movq   56%3, %%mm7             \n\t"
252
                "packuswb %%mm1, %%mm0          \n\t"
253
                "packuswb %%mm3, %%mm2          \n\t"
254
                "packuswb %%mm5, %%mm4          \n\t"
255
                "packuswb %%mm7, %%mm6          \n\t"
256
                "movq   %%mm0, (%0)             \n\t"
257
                "movq   %%mm2, (%0, %1)         \n\t"
258
                "movq   %%mm4, (%0, %1, 2)      \n\t"
259
                "movq   %%mm6, (%0, %2)         \n\t"
260
                ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
261
                :"memory");
262
        pix += line_size*4;
263
        p += 32;
264

    
265
    // if here would be an exact copy of the code above
266
    // compiler would generate some very strange code
267
    // thus using "r"
268
    __asm__ volatile(
269
            "movq       (%3), %%mm0             \n\t"
270
            "movq       8(%3), %%mm1            \n\t"
271
            "movq       16(%3), %%mm2           \n\t"
272
            "movq       24(%3), %%mm3           \n\t"
273
            "movq       32(%3), %%mm4           \n\t"
274
            "movq       40(%3), %%mm5           \n\t"
275
            "movq       48(%3), %%mm6           \n\t"
276
            "movq       56(%3), %%mm7           \n\t"
277
            "packuswb %%mm1, %%mm0              \n\t"
278
            "packuswb %%mm3, %%mm2              \n\t"
279
            "packuswb %%mm5, %%mm4              \n\t"
280
            "packuswb %%mm7, %%mm6              \n\t"
281
            "movq       %%mm0, (%0)             \n\t"
282
            "movq       %%mm2, (%0, %1)         \n\t"
283
            "movq       %%mm4, (%0, %1, 2)      \n\t"
284
            "movq       %%mm6, (%0, %2)         \n\t"
285
            ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
286
            :"memory");
287
}
288

    
289
DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
290
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
291

    
292
#define put_signed_pixels_clamped_mmx_half(off) \
293
            "movq    "#off"(%2), %%mm1          \n\t"\
294
            "movq 16+"#off"(%2), %%mm2          \n\t"\
295
            "movq 32+"#off"(%2), %%mm3          \n\t"\
296
            "movq 48+"#off"(%2), %%mm4          \n\t"\
297
            "packsswb  8+"#off"(%2), %%mm1      \n\t"\
298
            "packsswb 24+"#off"(%2), %%mm2      \n\t"\
299
            "packsswb 40+"#off"(%2), %%mm3      \n\t"\
300
            "packsswb 56+"#off"(%2), %%mm4      \n\t"\
301
            "paddb %%mm0, %%mm1                 \n\t"\
302
            "paddb %%mm0, %%mm2                 \n\t"\
303
            "paddb %%mm0, %%mm3                 \n\t"\
304
            "paddb %%mm0, %%mm4                 \n\t"\
305
            "movq %%mm1, (%0)                   \n\t"\
306
            "movq %%mm2, (%0, %3)               \n\t"\
307
            "movq %%mm3, (%0, %3, 2)            \n\t"\
308
            "movq %%mm4, (%0, %1)               \n\t"
309

    
310
void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
311
{
312
    x86_reg line_skip = line_size;
313
    x86_reg line_skip3;
314

    
315
    __asm__ volatile (
316
            "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
317
            "lea (%3, %3, 2), %1                \n\t"
318
            put_signed_pixels_clamped_mmx_half(0)
319
            "lea (%0, %3, 4), %0                \n\t"
320
            put_signed_pixels_clamped_mmx_half(64)
321
            :"+&r" (pixels), "=&r" (line_skip3)
322
            :"r" (block), "r"(line_skip)
323
            :"memory");
324
}
325

    
326
void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
327
{
328
    const DCTELEM *p;
329
    uint8_t *pix;
330
    int i;
331

    
332
    /* read the pixels */
333
    p = block;
334
    pix = pixels;
335
    MOVQ_ZERO(mm7);
336
    i = 4;
337
    do {
338
        __asm__ volatile(
339
                "movq   (%2), %%mm0     \n\t"
340
                "movq   8(%2), %%mm1    \n\t"
341
                "movq   16(%2), %%mm2   \n\t"
342
                "movq   24(%2), %%mm3   \n\t"
343
                "movq   %0, %%mm4       \n\t"
344
                "movq   %1, %%mm6       \n\t"
345
                "movq   %%mm4, %%mm5    \n\t"
346
                "punpcklbw %%mm7, %%mm4 \n\t"
347
                "punpckhbw %%mm7, %%mm5 \n\t"
348
                "paddsw %%mm4, %%mm0    \n\t"
349
                "paddsw %%mm5, %%mm1    \n\t"
350
                "movq   %%mm6, %%mm5    \n\t"
351
                "punpcklbw %%mm7, %%mm6 \n\t"
352
                "punpckhbw %%mm7, %%mm5 \n\t"
353
                "paddsw %%mm6, %%mm2    \n\t"
354
                "paddsw %%mm5, %%mm3    \n\t"
355
                "packuswb %%mm1, %%mm0  \n\t"
356
                "packuswb %%mm3, %%mm2  \n\t"
357
                "movq   %%mm0, %0       \n\t"
358
                "movq   %%mm2, %1       \n\t"
359
                :"+m"(*pix), "+m"(*(pix+line_size))
360
                :"r"(p)
361
                :"memory");
362
        pix += line_size*2;
363
        p += 16;
364
    } while (--i);
365
}
366

    
367
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
368
{
369
    __asm__ volatile(
370
         "lea (%3, %3), %%"REG_a"       \n\t"
371
         ".p2align 3                    \n\t"
372
         "1:                            \n\t"
373
         "movd (%1), %%mm0              \n\t"
374
         "movd (%1, %3), %%mm1          \n\t"
375
         "movd %%mm0, (%2)              \n\t"
376
         "movd %%mm1, (%2, %3)          \n\t"
377
         "add %%"REG_a", %1             \n\t"
378
         "add %%"REG_a", %2             \n\t"
379
         "movd (%1), %%mm0              \n\t"
380
         "movd (%1, %3), %%mm1          \n\t"
381
         "movd %%mm0, (%2)              \n\t"
382
         "movd %%mm1, (%2, %3)          \n\t"
383
         "add %%"REG_a", %1             \n\t"
384
         "add %%"REG_a", %2             \n\t"
385
         "subl $4, %0                   \n\t"
386
         "jnz 1b                        \n\t"
387
         : "+g"(h), "+r" (pixels),  "+r" (block)
388
         : "r"((x86_reg)line_size)
389
         : "%"REG_a, "memory"
390
        );
391
}
392

    
393
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
394
{
395
    __asm__ volatile(
396
         "lea (%3, %3), %%"REG_a"       \n\t"
397
         ".p2align 3                    \n\t"
398
         "1:                            \n\t"
399
         "movq (%1), %%mm0              \n\t"
400
         "movq (%1, %3), %%mm1          \n\t"
401
         "movq %%mm0, (%2)              \n\t"
402
         "movq %%mm1, (%2, %3)          \n\t"
403
         "add %%"REG_a", %1             \n\t"
404
         "add %%"REG_a", %2             \n\t"
405
         "movq (%1), %%mm0              \n\t"
406
         "movq (%1, %3), %%mm1          \n\t"
407
         "movq %%mm0, (%2)              \n\t"
408
         "movq %%mm1, (%2, %3)          \n\t"
409
         "add %%"REG_a", %1             \n\t"
410
         "add %%"REG_a", %2             \n\t"
411
         "subl $4, %0                   \n\t"
412
         "jnz 1b                        \n\t"
413
         : "+g"(h), "+r" (pixels),  "+r" (block)
414
         : "r"((x86_reg)line_size)
415
         : "%"REG_a, "memory"
416
        );
417
}
418

    
419
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
420
{
421
    __asm__ volatile(
422
         "lea (%3, %3), %%"REG_a"       \n\t"
423
         ".p2align 3                    \n\t"
424
         "1:                            \n\t"
425
         "movq (%1), %%mm0              \n\t"
426
         "movq 8(%1), %%mm4             \n\t"
427
         "movq (%1, %3), %%mm1          \n\t"
428
         "movq 8(%1, %3), %%mm5         \n\t"
429
         "movq %%mm0, (%2)              \n\t"
430
         "movq %%mm4, 8(%2)             \n\t"
431
         "movq %%mm1, (%2, %3)          \n\t"
432
         "movq %%mm5, 8(%2, %3)         \n\t"
433
         "add %%"REG_a", %1             \n\t"
434
         "add %%"REG_a", %2             \n\t"
435
         "movq (%1), %%mm0              \n\t"
436
         "movq 8(%1), %%mm4             \n\t"
437
         "movq (%1, %3), %%mm1          \n\t"
438
         "movq 8(%1, %3), %%mm5         \n\t"
439
         "movq %%mm0, (%2)              \n\t"
440
         "movq %%mm4, 8(%2)             \n\t"
441
         "movq %%mm1, (%2, %3)          \n\t"
442
         "movq %%mm5, 8(%2, %3)         \n\t"
443
         "add %%"REG_a", %1             \n\t"
444
         "add %%"REG_a", %2             \n\t"
445
         "subl $4, %0                   \n\t"
446
         "jnz 1b                        \n\t"
447
         : "+g"(h), "+r" (pixels),  "+r" (block)
448
         : "r"((x86_reg)line_size)
449
         : "%"REG_a, "memory"
450
        );
451
}
452

    
453
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
454
{
455
    __asm__ volatile(
456
         "1:                            \n\t"
457
         "movdqu (%1), %%xmm0           \n\t"
458
         "movdqu (%1,%3), %%xmm1        \n\t"
459
         "movdqu (%1,%3,2), %%xmm2      \n\t"
460
         "movdqu (%1,%4), %%xmm3        \n\t"
461
         "movdqa %%xmm0, (%2)           \n\t"
462
         "movdqa %%xmm1, (%2,%3)        \n\t"
463
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
464
         "movdqa %%xmm3, (%2,%4)        \n\t"
465
         "subl $4, %0                   \n\t"
466
         "lea (%1,%3,4), %1             \n\t"
467
         "lea (%2,%3,4), %2             \n\t"
468
         "jnz 1b                        \n\t"
469
         : "+g"(h), "+r" (pixels),  "+r" (block)
470
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
471
         : "memory"
472
        );
473
}
474

    
475
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
476
{
477
    __asm__ volatile(
478
         "1:                            \n\t"
479
         "movdqu (%1), %%xmm0           \n\t"
480
         "movdqu (%1,%3), %%xmm1        \n\t"
481
         "movdqu (%1,%3,2), %%xmm2      \n\t"
482
         "movdqu (%1,%4), %%xmm3        \n\t"
483
         "pavgb  (%2), %%xmm0           \n\t"
484
         "pavgb  (%2,%3), %%xmm1        \n\t"
485
         "pavgb  (%2,%3,2), %%xmm2      \n\t"
486
         "pavgb  (%2,%4), %%xmm3        \n\t"
487
         "movdqa %%xmm0, (%2)           \n\t"
488
         "movdqa %%xmm1, (%2,%3)        \n\t"
489
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
490
         "movdqa %%xmm3, (%2,%4)        \n\t"
491
         "subl $4, %0                   \n\t"
492
         "lea (%1,%3,4), %1             \n\t"
493
         "lea (%2,%3,4), %2             \n\t"
494
         "jnz 1b                        \n\t"
495
         : "+g"(h), "+r" (pixels),  "+r" (block)
496
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
497
         : "memory"
498
        );
499
}
500

    
501
#define CLEAR_BLOCKS(name,n) \
502
static void name(DCTELEM *blocks)\
503
{\
504
    __asm__ volatile(\
505
                "pxor %%mm7, %%mm7              \n\t"\
506
                "mov     %1, %%"REG_a"          \n\t"\
507
                "1:                             \n\t"\
508
                "movq %%mm7, (%0, %%"REG_a")    \n\t"\
509
                "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
510
                "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
511
                "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
512
                "add $32, %%"REG_a"             \n\t"\
513
                " js 1b                         \n\t"\
514
                : : "r" (((uint8_t *)blocks)+128*n),\
515
                    "i" (-128*n)\
516
                : "%"REG_a\
517
        );\
518
}
519
CLEAR_BLOCKS(clear_blocks_mmx, 6)
520
CLEAR_BLOCKS(clear_block_mmx, 1)
521

    
522
static void clear_block_sse(DCTELEM *block)
523
{
524
    __asm__ volatile(
525
        "xorps  %%xmm0, %%xmm0  \n"
526
        "movaps %%xmm0,    (%0) \n"
527
        "movaps %%xmm0,  16(%0) \n"
528
        "movaps %%xmm0,  32(%0) \n"
529
        "movaps %%xmm0,  48(%0) \n"
530
        "movaps %%xmm0,  64(%0) \n"
531
        "movaps %%xmm0,  80(%0) \n"
532
        "movaps %%xmm0,  96(%0) \n"
533
        "movaps %%xmm0, 112(%0) \n"
534
        :: "r"(block)
535
        : "memory"
536
    );
537
}
538

    
539
static void clear_blocks_sse(DCTELEM *blocks)
540
{\
541
    __asm__ volatile(
542
        "xorps  %%xmm0, %%xmm0  \n"
543
        "mov     %1, %%"REG_a"  \n"
544
        "1:                     \n"
545
        "movaps %%xmm0,    (%0, %%"REG_a") \n"
546
        "movaps %%xmm0,  16(%0, %%"REG_a") \n"
547
        "movaps %%xmm0,  32(%0, %%"REG_a") \n"
548
        "movaps %%xmm0,  48(%0, %%"REG_a") \n"
549
        "movaps %%xmm0,  64(%0, %%"REG_a") \n"
550
        "movaps %%xmm0,  80(%0, %%"REG_a") \n"
551
        "movaps %%xmm0,  96(%0, %%"REG_a") \n"
552
        "movaps %%xmm0, 112(%0, %%"REG_a") \n"
553
        "add $128, %%"REG_a"    \n"
554
        " js 1b                 \n"
555
        : : "r" (((uint8_t *)blocks)+128*6),
556
            "i" (-128*6)
557
        : "%"REG_a
558
    );
559
}
560

    
561
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
562
    x86_reg i=0;
563
    __asm__ volatile(
564
        "jmp 2f                         \n\t"
565
        "1:                             \n\t"
566
        "movq  (%1, %0), %%mm0          \n\t"
567
        "movq  (%2, %0), %%mm1          \n\t"
568
        "paddb %%mm0, %%mm1             \n\t"
569
        "movq %%mm1, (%2, %0)           \n\t"
570
        "movq 8(%1, %0), %%mm0          \n\t"
571
        "movq 8(%2, %0), %%mm1          \n\t"
572
        "paddb %%mm0, %%mm1             \n\t"
573
        "movq %%mm1, 8(%2, %0)          \n\t"
574
        "add $16, %0                    \n\t"
575
        "2:                             \n\t"
576
        "cmp %3, %0                     \n\t"
577
        " js 1b                         \n\t"
578
        : "+r" (i)
579
        : "r"(src), "r"(dst), "r"((x86_reg)w-15)
580
    );
581
    for(; i<w; i++)
582
        dst[i+0] += src[i+0];
583
}
584

    
585
static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
586
    x86_reg i=0;
587
    __asm__ volatile(
588
        "jmp 2f                         \n\t"
589
        "1:                             \n\t"
590
        "movq   (%2, %0), %%mm0         \n\t"
591
        "movq  8(%2, %0), %%mm1         \n\t"
592
        "paddb  (%3, %0), %%mm0         \n\t"
593
        "paddb 8(%3, %0), %%mm1         \n\t"
594
        "movq %%mm0,  (%1, %0)          \n\t"
595
        "movq %%mm1, 8(%1, %0)          \n\t"
596
        "add $16, %0                    \n\t"
597
        "2:                             \n\t"
598
        "cmp %4, %0                     \n\t"
599
        " js 1b                         \n\t"
600
        : "+r" (i)
601
        : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
602
    );
603
    for(; i<w; i++)
604
        dst[i] = src1[i] + src2[i];
605
}
606

    
607
#if HAVE_7REGS && HAVE_TEN_OPERANDS
608
static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
609
    x86_reg w2 = -w;
610
    x86_reg x;
611
    int l = *left & 0xff;
612
    int tl = *left_top & 0xff;
613
    int t;
614
    __asm__ volatile(
615
        "mov    %7, %3 \n"
616
        "1: \n"
617
        "movzbl (%3,%4), %2 \n"
618
        "mov    %2, %k3 \n"
619
        "sub   %b1, %b3 \n"
620
        "add   %b0, %b3 \n"
621
        "mov    %2, %1 \n"
622
        "cmp    %0, %2 \n"
623
        "cmovg  %0, %2 \n"
624
        "cmovg  %1, %0 \n"
625
        "cmp   %k3, %0 \n"
626
        "cmovg %k3, %0 \n"
627
        "mov    %7, %3 \n"
628
        "cmp    %2, %0 \n"
629
        "cmovl  %2, %0 \n"
630
        "add (%6,%4), %b0 \n"
631
        "mov   %b0, (%5,%4) \n"
632
        "inc    %4 \n"
633
        "jl 1b \n"
634
        :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
635
        :"r"(dst+w), "r"(diff+w), "rm"(top+w)
636
    );
637
    *left = l;
638
    *left_top = tl;
639
}
640
#endif
641

    
642
#define H263_LOOP_FILTER \
643
        "pxor %%mm7, %%mm7              \n\t"\
644
        "movq  %0, %%mm0                \n\t"\
645
        "movq  %0, %%mm1                \n\t"\
646
        "movq  %3, %%mm2                \n\t"\
647
        "movq  %3, %%mm3                \n\t"\
648
        "punpcklbw %%mm7, %%mm0         \n\t"\
649
        "punpckhbw %%mm7, %%mm1         \n\t"\
650
        "punpcklbw %%mm7, %%mm2         \n\t"\
651
        "punpckhbw %%mm7, %%mm3         \n\t"\
652
        "psubw %%mm2, %%mm0             \n\t"\
653
        "psubw %%mm3, %%mm1             \n\t"\
654
        "movq  %1, %%mm2                \n\t"\
655
        "movq  %1, %%mm3                \n\t"\
656
        "movq  %2, %%mm4                \n\t"\
657
        "movq  %2, %%mm5                \n\t"\
658
        "punpcklbw %%mm7, %%mm2         \n\t"\
659
        "punpckhbw %%mm7, %%mm3         \n\t"\
660
        "punpcklbw %%mm7, %%mm4         \n\t"\
661
        "punpckhbw %%mm7, %%mm5         \n\t"\
662
        "psubw %%mm2, %%mm4             \n\t"\
663
        "psubw %%mm3, %%mm5             \n\t"\
664
        "psllw $2, %%mm4                \n\t"\
665
        "psllw $2, %%mm5                \n\t"\
666
        "paddw %%mm0, %%mm4             \n\t"\
667
        "paddw %%mm1, %%mm5             \n\t"\
668
        "pxor %%mm6, %%mm6              \n\t"\
669
        "pcmpgtw %%mm4, %%mm6           \n\t"\
670
        "pcmpgtw %%mm5, %%mm7           \n\t"\
671
        "pxor %%mm6, %%mm4              \n\t"\
672
        "pxor %%mm7, %%mm5              \n\t"\
673
        "psubw %%mm6, %%mm4             \n\t"\
674
        "psubw %%mm7, %%mm5             \n\t"\
675
        "psrlw $3, %%mm4                \n\t"\
676
        "psrlw $3, %%mm5                \n\t"\
677
        "packuswb %%mm5, %%mm4          \n\t"\
678
        "packsswb %%mm7, %%mm6          \n\t"\
679
        "pxor %%mm7, %%mm7              \n\t"\
680
        "movd %4, %%mm2                 \n\t"\
681
        "punpcklbw %%mm2, %%mm2         \n\t"\
682
        "punpcklbw %%mm2, %%mm2         \n\t"\
683
        "punpcklbw %%mm2, %%mm2         \n\t"\
684
        "psubusb %%mm4, %%mm2           \n\t"\
685
        "movq %%mm2, %%mm3              \n\t"\
686
        "psubusb %%mm4, %%mm3           \n\t"\
687
        "psubb %%mm3, %%mm2             \n\t"\
688
        "movq %1, %%mm3                 \n\t"\
689
        "movq %2, %%mm4                 \n\t"\
690
        "pxor %%mm6, %%mm3              \n\t"\
691
        "pxor %%mm6, %%mm4              \n\t"\
692
        "paddusb %%mm2, %%mm3           \n\t"\
693
        "psubusb %%mm2, %%mm4           \n\t"\
694
        "pxor %%mm6, %%mm3              \n\t"\
695
        "pxor %%mm6, %%mm4              \n\t"\
696
        "paddusb %%mm2, %%mm2           \n\t"\
697
        "packsswb %%mm1, %%mm0          \n\t"\
698
        "pcmpgtb %%mm0, %%mm7           \n\t"\
699
        "pxor %%mm7, %%mm0              \n\t"\
700
        "psubb %%mm7, %%mm0             \n\t"\
701
        "movq %%mm0, %%mm1              \n\t"\
702
        "psubusb %%mm2, %%mm0           \n\t"\
703
        "psubb %%mm0, %%mm1             \n\t"\
704
        "pand %5, %%mm1                 \n\t"\
705
        "psrlw $2, %%mm1                \n\t"\
706
        "pxor %%mm7, %%mm1              \n\t"\
707
        "psubb %%mm7, %%mm1             \n\t"\
708
        "movq %0, %%mm5                 \n\t"\
709
        "movq %3, %%mm6                 \n\t"\
710
        "psubb %%mm1, %%mm5             \n\t"\
711
        "paddb %%mm1, %%mm6             \n\t"
712

    
713
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
714
    if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
715
    const int strength= ff_h263_loop_filter_strength[qscale];
716

    
717
    __asm__ volatile(
718

    
719
        H263_LOOP_FILTER
720

    
721
        "movq %%mm3, %1                 \n\t"
722
        "movq %%mm4, %2                 \n\t"
723
        "movq %%mm5, %0                 \n\t"
724
        "movq %%mm6, %3                 \n\t"
725
        : "+m" (*(uint64_t*)(src - 2*stride)),
726
          "+m" (*(uint64_t*)(src - 1*stride)),
727
          "+m" (*(uint64_t*)(src + 0*stride)),
728
          "+m" (*(uint64_t*)(src + 1*stride))
729
        : "g" (2*strength), "m"(ff_pb_FC)
730
    );
731
    }
732
}
733

    
734
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
735
    if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
736
    const int strength= ff_h263_loop_filter_strength[qscale];
737
    DECLARE_ALIGNED(8, uint64_t, temp)[4];
738
    uint8_t *btemp= (uint8_t*)temp;
739

    
740
    src -= 2;
741

    
742
    transpose4x4(btemp  , src           , 8, stride);
743
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
744
    __asm__ volatile(
745
        H263_LOOP_FILTER // 5 3 4 6
746

    
747
        : "+m" (temp[0]),
748
          "+m" (temp[1]),
749
          "+m" (temp[2]),
750
          "+m" (temp[3])
751
        : "g" (2*strength), "m"(ff_pb_FC)
752
    );
753

    
754
    __asm__ volatile(
755
        "movq %%mm5, %%mm1              \n\t"
756
        "movq %%mm4, %%mm0              \n\t"
757
        "punpcklbw %%mm3, %%mm5         \n\t"
758
        "punpcklbw %%mm6, %%mm4         \n\t"
759
        "punpckhbw %%mm3, %%mm1         \n\t"
760
        "punpckhbw %%mm6, %%mm0         \n\t"
761
        "movq %%mm5, %%mm3              \n\t"
762
        "movq %%mm1, %%mm6              \n\t"
763
        "punpcklwd %%mm4, %%mm5         \n\t"
764
        "punpcklwd %%mm0, %%mm1         \n\t"
765
        "punpckhwd %%mm4, %%mm3         \n\t"
766
        "punpckhwd %%mm0, %%mm6         \n\t"
767
        "movd %%mm5, (%0)               \n\t"
768
        "punpckhdq %%mm5, %%mm5         \n\t"
769
        "movd %%mm5, (%0,%2)            \n\t"
770
        "movd %%mm3, (%0,%2,2)          \n\t"
771
        "punpckhdq %%mm3, %%mm3         \n\t"
772
        "movd %%mm3, (%0,%3)            \n\t"
773
        "movd %%mm1, (%1)               \n\t"
774
        "punpckhdq %%mm1, %%mm1         \n\t"
775
        "movd %%mm1, (%1,%2)            \n\t"
776
        "movd %%mm6, (%1,%2,2)          \n\t"
777
        "punpckhdq %%mm6, %%mm6         \n\t"
778
        "movd %%mm6, (%1,%3)            \n\t"
779
        :: "r" (src),
780
           "r" (src + 4*stride),
781
           "r" ((x86_reg)   stride ),
782
           "r" ((x86_reg)(3*stride))
783
    );
784
    }
785
}
786

    
787
/* draw the edges of width 'w' of an image of size width, height
788
   this mmx version can only handle w==8 || w==16 */
789
static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
790
{
791
    uint8_t *ptr, *last_line;
792
    int i;
793

    
794
    last_line = buf + (height - 1) * wrap;
795
    /* left and right */
796
    ptr = buf;
797
    if(w==8)
798
    {
799
        __asm__ volatile(
800
                "1:                             \n\t"
801
                "movd (%0), %%mm0               \n\t"
802
                "punpcklbw %%mm0, %%mm0         \n\t"
803
                "punpcklwd %%mm0, %%mm0         \n\t"
804
                "punpckldq %%mm0, %%mm0         \n\t"
805
                "movq %%mm0, -8(%0)             \n\t"
806
                "movq -8(%0, %2), %%mm1         \n\t"
807
                "punpckhbw %%mm1, %%mm1         \n\t"
808
                "punpckhwd %%mm1, %%mm1         \n\t"
809
                "punpckhdq %%mm1, %%mm1         \n\t"
810
                "movq %%mm1, (%0, %2)           \n\t"
811
                "add %1, %0                     \n\t"
812
                "cmp %3, %0                     \n\t"
813
                " jb 1b                         \n\t"
814
                : "+r" (ptr)
815
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
816
        );
817
    }
818
    else
819
    {
820
        __asm__ volatile(
821
                "1:                             \n\t"
822
                "movd (%0), %%mm0               \n\t"
823
                "punpcklbw %%mm0, %%mm0         \n\t"
824
                "punpcklwd %%mm0, %%mm0         \n\t"
825
                "punpckldq %%mm0, %%mm0         \n\t"
826
                "movq %%mm0, -8(%0)             \n\t"
827
                "movq %%mm0, -16(%0)            \n\t"
828
                "movq -8(%0, %2), %%mm1         \n\t"
829
                "punpckhbw %%mm1, %%mm1         \n\t"
830
                "punpckhwd %%mm1, %%mm1         \n\t"
831
                "punpckhdq %%mm1, %%mm1         \n\t"
832
                "movq %%mm1, (%0, %2)           \n\t"
833
                "movq %%mm1, 8(%0, %2)          \n\t"
834
                "add %1, %0                     \n\t"
835
                "cmp %3, %0                     \n\t"
836
                " jb 1b                         \n\t"
837
                : "+r" (ptr)
838
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
839
        );
840
    }
841

    
842
    for(i=0;i<w;i+=4) {
843
        /* top and bottom (and hopefully also the corners) */
844
        ptr= buf - (i + 1) * wrap - w;
845
        __asm__ volatile(
846
                "1:                             \n\t"
847
                "movq (%1, %0), %%mm0           \n\t"
848
                "movq %%mm0, (%0)               \n\t"
849
                "movq %%mm0, (%0, %2)           \n\t"
850
                "movq %%mm0, (%0, %2, 2)        \n\t"
851
                "movq %%mm0, (%0, %3)           \n\t"
852
                "add $8, %0                     \n\t"
853
                "cmp %4, %0                     \n\t"
854
                " jb 1b                         \n\t"
855
                : "+r" (ptr)
856
                : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
857
        );
858
        ptr= last_line + (i + 1) * wrap - w;
859
        __asm__ volatile(
860
                "1:                             \n\t"
861
                "movq (%1, %0), %%mm0           \n\t"
862
                "movq %%mm0, (%0)               \n\t"
863
                "movq %%mm0, (%0, %2)           \n\t"
864
                "movq %%mm0, (%0, %2, 2)        \n\t"
865
                "movq %%mm0, (%0, %3)           \n\t"
866
                "add $8, %0                     \n\t"
867
                "cmp %4, %0                     \n\t"
868
                " jb 1b                         \n\t"
869
                : "+r" (ptr)
870
                : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
871
        );
872
    }
873
}
874

    
875
#define PAETH(cpu, abs3)\
876
static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
877
{\
878
    x86_reg i = -bpp;\
879
    x86_reg end = w-3;\
880
    __asm__ volatile(\
881
        "pxor      %%mm7, %%mm7 \n"\
882
        "movd    (%1,%0), %%mm0 \n"\
883
        "movd    (%2,%0), %%mm1 \n"\
884
        "punpcklbw %%mm7, %%mm0 \n"\
885
        "punpcklbw %%mm7, %%mm1 \n"\
886
        "add       %4, %0 \n"\
887
        "1: \n"\
888
        "movq      %%mm1, %%mm2 \n"\
889
        "movd    (%2,%0), %%mm1 \n"\
890
        "movq      %%mm2, %%mm3 \n"\
891
        "punpcklbw %%mm7, %%mm1 \n"\
892
        "movq      %%mm2, %%mm4 \n"\
893
        "psubw     %%mm1, %%mm3 \n"\
894
        "psubw     %%mm0, %%mm4 \n"\
895
        "movq      %%mm3, %%mm5 \n"\
896
        "paddw     %%mm4, %%mm5 \n"\
897
        abs3\
898
        "movq      %%mm4, %%mm6 \n"\
899
        "pminsw    %%mm5, %%mm6 \n"\
900
        "pcmpgtw   %%mm6, %%mm3 \n"\
901
        "pcmpgtw   %%mm5, %%mm4 \n"\
902
        "movq      %%mm4, %%mm6 \n"\
903
        "pand      %%mm3, %%mm4 \n"\
904
        "pandn     %%mm3, %%mm6 \n"\
905
        "pandn     %%mm0, %%mm3 \n"\
906
        "movd    (%3,%0), %%mm0 \n"\
907
        "pand      %%mm1, %%mm6 \n"\
908
        "pand      %%mm4, %%mm2 \n"\
909
        "punpcklbw %%mm7, %%mm0 \n"\
910
        "movq      %6,    %%mm5 \n"\
911
        "paddw     %%mm6, %%mm0 \n"\
912
        "paddw     %%mm2, %%mm3 \n"\
913
        "paddw     %%mm3, %%mm0 \n"\
914
        "pand      %%mm5, %%mm0 \n"\
915
        "movq      %%mm0, %%mm3 \n"\
916
        "packuswb  %%mm3, %%mm3 \n"\
917
        "movd      %%mm3, (%1,%0) \n"\
918
        "add       %4, %0 \n"\
919
        "cmp       %5, %0 \n"\
920
        "jle 1b \n"\
921
        :"+r"(i)\
922
        :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
923
         "m"(ff_pw_255)\
924
        :"memory"\
925
    );\
926
}
927

    
928
#define ABS3_MMX2\
929
        "psubw     %%mm5, %%mm7 \n"\
930
        "pmaxsw    %%mm7, %%mm5 \n"\
931
        "pxor      %%mm6, %%mm6 \n"\
932
        "pxor      %%mm7, %%mm7 \n"\
933
        "psubw     %%mm3, %%mm6 \n"\
934
        "psubw     %%mm4, %%mm7 \n"\
935
        "pmaxsw    %%mm6, %%mm3 \n"\
936
        "pmaxsw    %%mm7, %%mm4 \n"\
937
        "pxor      %%mm7, %%mm7 \n"
938

    
939
#define ABS3_SSSE3\
940
        "pabsw     %%mm3, %%mm3 \n"\
941
        "pabsw     %%mm4, %%mm4 \n"\
942
        "pabsw     %%mm5, %%mm5 \n"
943

    
944
PAETH(mmx2, ABS3_MMX2)
945
#if HAVE_SSSE3
946
PAETH(ssse3, ABS3_SSSE3)
947
#endif
948

    
949
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
950
        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
951
        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
952
        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
953
        "movq "#in7", " #m3 "             \n\t" /* d */\
954
        "movq "#in0", %%mm5               \n\t" /* D */\
955
        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
956
        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
957
        "movq "#in1", %%mm5               \n\t" /* C */\
958
        "movq "#in2", %%mm6               \n\t" /* B */\
959
        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
960
        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
961
        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
962
        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
963
        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
964
        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
965
        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
966
        "psraw $5, %%mm5                  \n\t"\
967
        "packuswb %%mm5, %%mm5            \n\t"\
968
        OP(%%mm5, out, %%mm7, d)
969

    
970
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
971
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
972
    uint64_t temp;\
973
\
974
    __asm__ volatile(\
975
        "pxor %%mm7, %%mm7                \n\t"\
976
        "1:                               \n\t"\
977
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
978
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
979
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
980
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
981
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
982
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
983
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
984
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
985
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
986
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
987
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
988
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
989
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
990
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
991
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
992
        "paddw %%mm3, %%mm5               \n\t" /* b */\
993
        "paddw %%mm2, %%mm6               \n\t" /* c */\
994
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
995
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
996
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
997
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
998
        "paddw %%mm4, %%mm0               \n\t" /* a */\
999
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1000
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1001
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1002
        "paddw %6, %%mm6                  \n\t"\
1003
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1004
        "psraw $5, %%mm0                  \n\t"\
1005
        "movq %%mm0, %5                   \n\t"\
1006
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1007
        \
1008
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1009
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1010
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1011
        "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
1012
        "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
1013
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
1014
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
1015
        "paddw %%mm0, %%mm2               \n\t" /* b */\
1016
        "paddw %%mm5, %%mm3               \n\t" /* c */\
1017
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1018
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1019
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1020
        "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
1021
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
1022
        "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
1023
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1024
        "paddw %%mm2, %%mm1               \n\t" /* a */\
1025
        "paddw %%mm6, %%mm4               \n\t" /* d */\
1026
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1027
        "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
1028
        "paddw %6, %%mm1                  \n\t"\
1029
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
1030
        "psraw $5, %%mm3                  \n\t"\
1031
        "movq %5, %%mm1                   \n\t"\
1032
        "packuswb %%mm3, %%mm1            \n\t"\
1033
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1034
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1035
        \
1036
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1037
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1038
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1039
        "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
1040
        "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
1041
        "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
1042
        "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
1043
        "paddw %%mm1, %%mm5               \n\t" /* b */\
1044
        "paddw %%mm4, %%mm0               \n\t" /* c */\
1045
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1046
        "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
1047
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1048
        "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
1049
        "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
1050
        "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
1051
        "paddw %%mm3, %%mm2               \n\t" /* d */\
1052
        "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
1053
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1054
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
1055
        "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
1056
        "paddw %%mm2, %%mm6               \n\t" /* a */\
1057
        "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1058
        "paddw %6, %%mm0                  \n\t"\
1059
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1060
        "psraw $5, %%mm0                  \n\t"\
1061
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1062
        \
1063
        "paddw %%mm5, %%mm3               \n\t" /* a */\
1064
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
1065
        "paddw %%mm4, %%mm6               \n\t" /* b */\
1066
        "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
1067
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
1068
        "paddw %%mm1, %%mm4               \n\t" /* c */\
1069
        "paddw %%mm2, %%mm5               \n\t" /* d */\
1070
        "paddw %%mm6, %%mm6               \n\t" /* 2b */\
1071
        "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
1072
        "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1073
        "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
1074
        "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
1075
        "paddw %6, %%mm4                  \n\t"\
1076
        "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
1077
        "psraw $5, %%mm4                  \n\t"\
1078
        "packuswb %%mm4, %%mm0            \n\t"\
1079
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1080
        \
1081
        "add %3, %0                       \n\t"\
1082
        "add %4, %1                       \n\t"\
1083
        "decl %2                          \n\t"\
1084
        " jnz 1b                          \n\t"\
1085
        : "+a"(src), "+c"(dst), "+D"(h)\
1086
        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1087
        : "memory"\
1088
    );\
1089
}\
1090
\
1091
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1092
    int i;\
1093
    int16_t temp[16];\
1094
    /* quick HACK, XXX FIXME MUST be optimized */\
1095
    for(i=0; i<h; i++)\
1096
    {\
1097
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1098
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1099
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1100
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1101
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1102
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1103
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1104
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1105
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1106
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1107
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1108
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1109
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1110
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1111
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1112
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1113
        __asm__ volatile(\
1114
            "movq (%0), %%mm0               \n\t"\
1115
            "movq 8(%0), %%mm1              \n\t"\
1116
            "paddw %2, %%mm0                \n\t"\
1117
            "paddw %2, %%mm1                \n\t"\
1118
            "psraw $5, %%mm0                \n\t"\
1119
            "psraw $5, %%mm1                \n\t"\
1120
            "packuswb %%mm1, %%mm0          \n\t"\
1121
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1122
            "movq 16(%0), %%mm0             \n\t"\
1123
            "movq 24(%0), %%mm1             \n\t"\
1124
            "paddw %2, %%mm0                \n\t"\
1125
            "paddw %2, %%mm1                \n\t"\
1126
            "psraw $5, %%mm0                \n\t"\
1127
            "psraw $5, %%mm1                \n\t"\
1128
            "packuswb %%mm1, %%mm0          \n\t"\
1129
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1130
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1131
            : "memory"\
1132
        );\
1133
        dst+=dstStride;\
1134
        src+=srcStride;\
1135
    }\
1136
}\
1137
\
1138
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1139
    __asm__ volatile(\
1140
        "pxor %%mm7, %%mm7                \n\t"\
1141
        "1:                               \n\t"\
1142
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1143
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1144
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1145
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1146
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1147
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1148
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1149
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1150
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1151
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1152
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1153
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1154
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1155
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1156
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1157
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1158
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1159
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1160
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1161
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1162
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1163
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1164
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1165
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1166
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1167
        "paddw %5, %%mm6                  \n\t"\
1168
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1169
        "psraw $5, %%mm0                  \n\t"\
1170
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1171
        \
1172
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1173
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
1174
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
1175
        "paddw %%mm5, %%mm1               \n\t" /* a */\
1176
        "paddw %%mm6, %%mm2               \n\t" /* b */\
1177
        "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
1178
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
1179
        "paddw %%mm6, %%mm3               \n\t" /* c */\
1180
        "paddw %%mm5, %%mm4               \n\t" /* d */\
1181
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1182
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1183
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1184
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1185
        "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
1186
        "paddw %5, %%mm1                  \n\t"\
1187
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
1188
        "psraw $5, %%mm3                  \n\t"\
1189
        "packuswb %%mm3, %%mm0            \n\t"\
1190
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1191
        \
1192
        "add %3, %0                       \n\t"\
1193
        "add %4, %1                       \n\t"\
1194
        "decl %2                          \n\t"\
1195
        " jnz 1b                          \n\t"\
1196
        : "+a"(src), "+c"(dst), "+d"(h)\
1197
        : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1198
        : "memory"\
1199
    );\
1200
}\
1201
\
1202
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1203
    int i;\
1204
    int16_t temp[8];\
1205
    /* quick HACK, XXX FIXME MUST be optimized */\
1206
    for(i=0; i<h; i++)\
1207
    {\
1208
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1209
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1210
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1211
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1212
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1213
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1214
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1215
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1216
        __asm__ volatile(\
1217
            "movq (%0), %%mm0           \n\t"\
1218
            "movq 8(%0), %%mm1          \n\t"\
1219
            "paddw %2, %%mm0            \n\t"\
1220
            "paddw %2, %%mm1            \n\t"\
1221
            "psraw $5, %%mm0            \n\t"\
1222
            "psraw $5, %%mm1            \n\t"\
1223
            "packuswb %%mm1, %%mm0      \n\t"\
1224
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1225
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1226
            :"memory"\
1227
        );\
1228
        dst+=dstStride;\
1229
        src+=srcStride;\
1230
    }\
1231
}
1232

    
1233
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1234
\
1235
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1236
    uint64_t temp[17*4];\
1237
    uint64_t *temp_ptr= temp;\
1238
    int count= 17;\
1239
\
1240
    /*FIXME unroll */\
1241
    __asm__ volatile(\
1242
        "pxor %%mm7, %%mm7              \n\t"\
1243
        "1:                             \n\t"\
1244
        "movq (%0), %%mm0               \n\t"\
1245
        "movq (%0), %%mm1               \n\t"\
1246
        "movq 8(%0), %%mm2              \n\t"\
1247
        "movq 8(%0), %%mm3              \n\t"\
1248
        "punpcklbw %%mm7, %%mm0         \n\t"\
1249
        "punpckhbw %%mm7, %%mm1         \n\t"\
1250
        "punpcklbw %%mm7, %%mm2         \n\t"\
1251
        "punpckhbw %%mm7, %%mm3         \n\t"\
1252
        "movq %%mm0, (%1)               \n\t"\
1253
        "movq %%mm1, 17*8(%1)           \n\t"\
1254
        "movq %%mm2, 2*17*8(%1)         \n\t"\
1255
        "movq %%mm3, 3*17*8(%1)         \n\t"\
1256
        "add $8, %1                     \n\t"\
1257
        "add %3, %0                     \n\t"\
1258
        "decl %2                        \n\t"\
1259
        " jnz 1b                        \n\t"\
1260
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1261
        : "r" ((x86_reg)srcStride)\
1262
        : "memory"\
1263
    );\
1264
    \
1265
    temp_ptr= temp;\
1266
    count=4;\
1267
    \
1268
/*FIXME reorder for speed */\
1269
    __asm__ volatile(\
1270
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1271
        "1:                             \n\t"\
1272
        "movq (%0), %%mm0               \n\t"\
1273
        "movq 8(%0), %%mm1              \n\t"\
1274
        "movq 16(%0), %%mm2             \n\t"\
1275
        "movq 24(%0), %%mm3             \n\t"\
1276
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1277
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1278
        "add %4, %1                     \n\t"\
1279
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1280
        \
1281
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1282
        "add %4, %1                     \n\t"\
1283
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1284
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1285
        "add %4, %1                     \n\t"\
1286
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1287
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1288
        "add %4, %1                     \n\t"\
1289
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1290
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1291
        "add %4, %1                     \n\t"\
1292
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1293
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1294
        "add %4, %1                     \n\t"\
1295
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1296
        \
1297
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1298
        "add %4, %1                     \n\t"  \
1299
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1300
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1301
        \
1302
        "add $136, %0                   \n\t"\
1303
        "add %6, %1                     \n\t"\
1304
        "decl %2                        \n\t"\
1305
        " jnz 1b                        \n\t"\
1306
        \
1307
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1308
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1309
        :"memory"\
1310
    );\
1311
}\
1312
\
1313
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1314
    uint64_t temp[9*2];\
1315
    uint64_t *temp_ptr= temp;\
1316
    int count= 9;\
1317
\
1318
    /*FIXME unroll */\
1319
    __asm__ volatile(\
1320
        "pxor %%mm7, %%mm7              \n\t"\
1321
        "1:                             \n\t"\
1322
        "movq (%0), %%mm0               \n\t"\
1323
        "movq (%0), %%mm1               \n\t"\
1324
        "punpcklbw %%mm7, %%mm0         \n\t"\
1325
        "punpckhbw %%mm7, %%mm1         \n\t"\
1326
        "movq %%mm0, (%1)               \n\t"\
1327
        "movq %%mm1, 9*8(%1)            \n\t"\
1328
        "add $8, %1                     \n\t"\
1329
        "add %3, %0                     \n\t"\
1330
        "decl %2                        \n\t"\
1331
        " jnz 1b                        \n\t"\
1332
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1333
        : "r" ((x86_reg)srcStride)\
1334
        : "memory"\
1335
    );\
1336
    \
1337
    temp_ptr= temp;\
1338
    count=2;\
1339
    \
1340
/*FIXME reorder for speed */\
1341
    __asm__ volatile(\
1342
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1343
        "1:                             \n\t"\
1344
        "movq (%0), %%mm0               \n\t"\
1345
        "movq 8(%0), %%mm1              \n\t"\
1346
        "movq 16(%0), %%mm2             \n\t"\
1347
        "movq 24(%0), %%mm3             \n\t"\
1348
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1349
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1350
        "add %4, %1                     \n\t"\
1351
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1352
        \
1353
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1354
        "add %4, %1                     \n\t"\
1355
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1356
        \
1357
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1358
        "add %4, %1                     \n\t"\
1359
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1360
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1361
                \
1362
        "add $72, %0                    \n\t"\
1363
        "add %6, %1                     \n\t"\
1364
        "decl %2                        \n\t"\
1365
        " jnz 1b                        \n\t"\
1366
         \
1367
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1368
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1369
        : "memory"\
1370
   );\
1371
}\
1372
\
1373
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1374
    OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1375
}\
1376
\
1377
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1378
    uint64_t temp[8];\
1379
    uint8_t * const half= (uint8_t*)temp;\
1380
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1381
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1382
}\
1383
\
1384
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1385
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1386
}\
1387
\
1388
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1389
    uint64_t temp[8];\
1390
    uint8_t * const half= (uint8_t*)temp;\
1391
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1392
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1393
}\
1394
\
1395
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1396
    uint64_t temp[8];\
1397
    uint8_t * const half= (uint8_t*)temp;\
1398
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1399
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1400
}\
1401
\
1402
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1403
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1404
}\
1405
\
1406
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1407
    uint64_t temp[8];\
1408
    uint8_t * const half= (uint8_t*)temp;\
1409
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1410
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1411
}\
1412
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1413
    uint64_t half[8 + 9];\
1414
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1415
    uint8_t * const halfHV= ((uint8_t*)half);\
1416
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1417
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1418
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1419
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1420
}\
1421
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1422
    uint64_t half[8 + 9];\
1423
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1424
    uint8_t * const halfHV= ((uint8_t*)half);\
1425
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1426
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1427
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1428
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1429
}\
1430
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1431
    uint64_t half[8 + 9];\
1432
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1433
    uint8_t * const halfHV= ((uint8_t*)half);\
1434
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1435
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1436
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1437
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1438
}\
1439
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1440
    uint64_t half[8 + 9];\
1441
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1442
    uint8_t * const halfHV= ((uint8_t*)half);\
1443
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1444
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1445
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1446
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1447
}\
1448
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1449
    uint64_t half[8 + 9];\
1450
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1451
    uint8_t * const halfHV= ((uint8_t*)half);\
1452
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1453
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1454
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1455
}\
1456
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1457
    uint64_t half[8 + 9];\
1458
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1459
    uint8_t * const halfHV= ((uint8_t*)half);\
1460
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1461
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1462
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1463
}\
1464
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1465
    uint64_t half[8 + 9];\
1466
    uint8_t * const halfH= ((uint8_t*)half);\
1467
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1468
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1469
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1470
}\
1471
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1472
    uint64_t half[8 + 9];\
1473
    uint8_t * const halfH= ((uint8_t*)half);\
1474
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1475
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1476
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1477
}\
1478
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1479
    uint64_t half[9];\
1480
    uint8_t * const halfH= ((uint8_t*)half);\
1481
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1482
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1483
}\
1484
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1485
    OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1486
}\
1487
\
1488
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1489
    uint64_t temp[32];\
1490
    uint8_t * const half= (uint8_t*)temp;\
1491
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1492
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1493
}\
1494
\
1495
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1496
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1497
}\
1498
\
1499
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1500
    uint64_t temp[32];\
1501
    uint8_t * const half= (uint8_t*)temp;\
1502
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1503
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1504
}\
1505
\
1506
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1507
    uint64_t temp[32];\
1508
    uint8_t * const half= (uint8_t*)temp;\
1509
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1510
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1511
}\
1512
\
1513
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1514
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1515
}\
1516
\
1517
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1518
    uint64_t temp[32];\
1519
    uint8_t * const half= (uint8_t*)temp;\
1520
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1521
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1522
}\
1523
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1524
    uint64_t half[16*2 + 17*2];\
1525
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1526
    uint8_t * const halfHV= ((uint8_t*)half);\
1527
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1528
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1529
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1530
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1531
}\
1532
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1533
    uint64_t half[16*2 + 17*2];\
1534
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1535
    uint8_t * const halfHV= ((uint8_t*)half);\
1536
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1537
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1538
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1539
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1540
}\
1541
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1542
    uint64_t half[16*2 + 17*2];\
1543
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1544
    uint8_t * const halfHV= ((uint8_t*)half);\
1545
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1546
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1547
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1548
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1549
}\
1550
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1551
    uint64_t half[16*2 + 17*2];\
1552
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1553
    uint8_t * const halfHV= ((uint8_t*)half);\
1554
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1555
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1556
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1557
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1558
}\
1559
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1560
    uint64_t half[16*2 + 17*2];\
1561
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1562
    uint8_t * const halfHV= ((uint8_t*)half);\
1563
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1564
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1565
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1566
}\
1567
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1568
    uint64_t half[16*2 + 17*2];\
1569
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1570
    uint8_t * const halfHV= ((uint8_t*)half);\
1571
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1572
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1573
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1574
}\
1575
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1576
    uint64_t half[17*2];\
1577
    uint8_t * const halfH= ((uint8_t*)half);\
1578
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1579
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1580
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1581
}\
1582
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1583
    uint64_t half[17*2];\
1584
    uint8_t * const halfH= ((uint8_t*)half);\
1585
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1586
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1587
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1588
}\
1589
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1590
    uint64_t half[17*2];\
1591
    uint8_t * const halfH= ((uint8_t*)half);\
1592
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1593
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1594
}
1595

    
1596
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1597
#define AVG_3DNOW_OP(a,b,temp, size) \
1598
"mov" #size " " #b ", " #temp "   \n\t"\
1599
"pavgusb " #temp ", " #a "        \n\t"\
1600
"mov" #size " " #a ", " #b "      \n\t"
1601
#define AVG_MMX2_OP(a,b,temp, size) \
1602
"mov" #size " " #b ", " #temp "   \n\t"\
1603
"pavgb " #temp ", " #a "          \n\t"\
1604
"mov" #size " " #a ", " #b "      \n\t"
1605

    
1606
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1607
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1608
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1609
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1610
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1611
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1612
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1613
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1614
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1615

    
1616
/***********************************/
1617
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1618

    
1619
#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1620
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1621
    OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1622
}
1623
#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1624
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1625
    OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1626
}
1627

    
1628
#define QPEL_2TAP(OPNAME, SIZE, MMX)\
1629
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1630
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1631
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1632
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1633
                          OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1634
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1635
                          OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1636
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1637
                          OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1638
static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1639
    OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1640
}\
1641
static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1642
    OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1643
}\
1644
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
1645
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
1646
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
1647
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
1648
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
1649
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
1650
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
1651
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1652

    
1653
QPEL_2TAP(put_, 16, mmx2)
1654
QPEL_2TAP(avg_, 16, mmx2)
1655
QPEL_2TAP(put_,  8, mmx2)
1656
QPEL_2TAP(avg_,  8, mmx2)
1657
QPEL_2TAP(put_, 16, 3dnow)
1658
QPEL_2TAP(avg_, 16, 3dnow)
1659
QPEL_2TAP(put_,  8, 3dnow)
1660
QPEL_2TAP(avg_,  8, 3dnow)
1661

    
1662

    
1663
#if 0
1664
static void just_return(void) { return; }
1665
#endif
1666

    
1667
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1668
                    int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1669
    const int w = 8;
1670
    const int ix = ox>>(16+shift);
1671
    const int iy = oy>>(16+shift);
1672
    const int oxs = ox>>4;
1673
    const int oys = oy>>4;
1674
    const int dxxs = dxx>>4;
1675
    const int dxys = dxy>>4;
1676
    const int dyxs = dyx>>4;
1677
    const int dyys = dyy>>4;
1678
    const uint16_t r4[4] = {r,r,r,r};
1679
    const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1680
    const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1681
    const uint64_t shift2 = 2*shift;
1682
    uint8_t edge_buf[(h+1)*stride];
1683
    int x, y;
1684

    
1685
    const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1686
    const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1687
    const int dxh = dxy*(h-1);
1688
    const int dyw = dyx*(w-1);
1689
    if( // non-constant fullpel offset (3% of blocks)
1690
        ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1691
         (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1692
        // uses more than 16 bits of subpel mv (only at huge resolution)
1693
        || (dxx|dxy|dyx|dyy)&15 )
1694
    {
1695
        //FIXME could still use mmx for some of the rows
1696
        ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1697
        return;
1698
    }
1699

    
1700
    src += ix + iy*stride;
1701
    if( (unsigned)ix >= width-w ||
1702
        (unsigned)iy >= height-h )
1703
    {
1704
        ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1705
        src = edge_buf;
1706
    }
1707

    
1708
    __asm__ volatile(
1709
        "movd         %0, %%mm6 \n\t"
1710
        "pxor      %%mm7, %%mm7 \n\t"
1711
        "punpcklwd %%mm6, %%mm6 \n\t"
1712
        "punpcklwd %%mm6, %%mm6 \n\t"
1713
        :: "r"(1<<shift)
1714
    );
1715

    
1716
    for(x=0; x<w; x+=4){
1717
        uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1718
                            oxs - dxys + dxxs*(x+1),
1719
                            oxs - dxys + dxxs*(x+2),
1720
                            oxs - dxys + dxxs*(x+3) };
1721
        uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1722
                            oys - dyys + dyxs*(x+1),
1723
                            oys - dyys + dyxs*(x+2),
1724
                            oys - dyys + dyxs*(x+3) };
1725

    
1726
        for(y=0; y<h; y++){
1727
            __asm__ volatile(
1728
                "movq   %0,  %%mm4 \n\t"
1729
                "movq   %1,  %%mm5 \n\t"
1730
                "paddw  %2,  %%mm4 \n\t"
1731
                "paddw  %3,  %%mm5 \n\t"
1732
                "movq   %%mm4, %0  \n\t"
1733
                "movq   %%mm5, %1  \n\t"
1734
                "psrlw  $12, %%mm4 \n\t"
1735
                "psrlw  $12, %%mm5 \n\t"
1736
                : "+m"(*dx4), "+m"(*dy4)
1737
                : "m"(*dxy4), "m"(*dyy4)
1738
            );
1739

    
1740
            __asm__ volatile(
1741
                "movq   %%mm6, %%mm2 \n\t"
1742
                "movq   %%mm6, %%mm1 \n\t"
1743
                "psubw  %%mm4, %%mm2 \n\t"
1744
                "psubw  %%mm5, %%mm1 \n\t"
1745
                "movq   %%mm2, %%mm0 \n\t"
1746
                "movq   %%mm4, %%mm3 \n\t"
1747
                "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1748
                "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1749
                "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1750
                "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1751

    
1752
                "movd   %4,    %%mm5 \n\t"
1753
                "movd   %3,    %%mm4 \n\t"
1754
                "punpcklbw %%mm7, %%mm5 \n\t"
1755
                "punpcklbw %%mm7, %%mm4 \n\t"
1756
                "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1757
                "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1758

    
1759
                "movd   %2,    %%mm5 \n\t"
1760
                "movd   %1,    %%mm4 \n\t"
1761
                "punpcklbw %%mm7, %%mm5 \n\t"
1762
                "punpcklbw %%mm7, %%mm4 \n\t"
1763
                "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1764
                "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1765
                "paddw  %5,    %%mm1 \n\t"
1766
                "paddw  %%mm3, %%mm2 \n\t"
1767
                "paddw  %%mm1, %%mm0 \n\t"
1768
                "paddw  %%mm2, %%mm0 \n\t"
1769

    
1770
                "psrlw    %6,    %%mm0 \n\t"
1771
                "packuswb %%mm0, %%mm0 \n\t"
1772
                "movd     %%mm0, %0    \n\t"
1773

    
1774
                : "=m"(dst[x+y*stride])
1775
                : "m"(src[0]), "m"(src[1]),
1776
                  "m"(src[stride]), "m"(src[stride+1]),
1777
                  "m"(*r4), "m"(shift2)
1778
            );
1779
            src += stride;
1780
        }
1781
        src += 4-h*stride;
1782
    }
1783
}
1784

    
1785
#define PREFETCH(name, op) \
1786
static void name(void *mem, int stride, int h){\
1787
    const uint8_t *p= mem;\
1788
    do{\
1789
        __asm__ volatile(#op" %0" :: "m"(*p));\
1790
        p+= stride;\
1791
    }while(--h);\
1792
}
1793
PREFETCH(prefetch_mmx2,  prefetcht0)
1794
PREFETCH(prefetch_3dnow, prefetch)
1795
#undef PREFETCH
1796

    
1797
#include "h264_qpel_mmx.c"
1798

    
1799
void ff_put_h264_chroma_mc8_mmx_rnd   (uint8_t *dst, uint8_t *src,
1800
                                       int stride, int h, int x, int y);
1801
void ff_put_vc1_chroma_mc8_mmx_nornd  (uint8_t *dst, uint8_t *src,
1802
                                       int stride, int h, int x, int y);
1803
void ff_put_rv40_chroma_mc8_mmx       (uint8_t *dst, uint8_t *src,
1804
                                       int stride, int h, int x, int y);
1805
void ff_avg_h264_chroma_mc8_mmx2_rnd  (uint8_t *dst, uint8_t *src,
1806
                                       int stride, int h, int x, int y);
1807
void ff_avg_vc1_chroma_mc8_mmx2_nornd (uint8_t *dst, uint8_t *src,
1808
                                       int stride, int h, int x, int y);
1809
void ff_avg_rv40_chroma_mc8_mmx2      (uint8_t *dst, uint8_t *src,
1810
                                       int stride, int h, int x, int y);
1811
void ff_avg_h264_chroma_mc8_3dnow_rnd (uint8_t *dst, uint8_t *src,
1812
                                       int stride, int h, int x, int y);
1813
void ff_avg_vc1_chroma_mc8_3dnow_nornd(uint8_t *dst, uint8_t *src,
1814
                                       int stride, int h, int x, int y);
1815
void ff_avg_rv40_chroma_mc8_3dnow     (uint8_t *dst, uint8_t *src,
1816
                                       int stride, int h, int x, int y);
1817

    
1818
void ff_put_h264_chroma_mc4_mmx       (uint8_t *dst, uint8_t *src,
1819
                                       int stride, int h, int x, int y);
1820
void ff_put_rv40_chroma_mc4_mmx       (uint8_t *dst, uint8_t *src,
1821
                                       int stride, int h, int x, int y);
1822
void ff_avg_h264_chroma_mc4_mmx2      (uint8_t *dst, uint8_t *src,
1823
                                       int stride, int h, int x, int y);
1824
void ff_avg_rv40_chroma_mc4_mmx2      (uint8_t *dst, uint8_t *src,
1825
                                       int stride, int h, int x, int y);
1826
void ff_avg_h264_chroma_mc4_3dnow     (uint8_t *dst, uint8_t *src,
1827
                                       int stride, int h, int x, int y);
1828
void ff_avg_rv40_chroma_mc4_3dnow     (uint8_t *dst, uint8_t *src,
1829
                                       int stride, int h, int x, int y);
1830

    
1831
void ff_put_h264_chroma_mc2_mmx2      (uint8_t *dst, uint8_t *src,
1832
                                       int stride, int h, int x, int y);
1833
void ff_avg_h264_chroma_mc2_mmx2      (uint8_t *dst, uint8_t *src,
1834
                                       int stride, int h, int x, int y);
1835

    
1836
void ff_put_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1837
                                       int stride, int h, int x, int y);
1838
void ff_put_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
1839
                                       int stride, int h, int x, int y);
1840
void ff_put_h264_chroma_mc4_ssse3     (uint8_t *dst, uint8_t *src,
1841
                                       int stride, int h, int x, int y);
1842

    
1843
void ff_avg_h264_chroma_mc8_ssse3_rnd (uint8_t *dst, uint8_t *src,
1844
                                       int stride, int h, int x, int y);
1845
void ff_avg_vc1_chroma_mc8_ssse3_nornd(uint8_t *dst, uint8_t *src,
1846
                                       int stride, int h, int x, int y);
1847
void ff_avg_h264_chroma_mc4_ssse3     (uint8_t *dst, uint8_t *src,
1848
                                       int stride, int h, int x, int y);
1849

    
1850

    
1851
/* CAVS specific */
1852
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1853
    put_pixels8_mmx(dst, src, stride, 8);
1854
}
1855
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1856
    avg_pixels8_mmx(dst, src, stride, 8);
1857
}
1858
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1859
    put_pixels16_mmx(dst, src, stride, 16);
1860
}
1861
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1862
    avg_pixels16_mmx(dst, src, stride, 16);
1863
}
1864

    
1865
/* VC1 specific */
1866
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1867
    put_pixels8_mmx(dst, src, stride, 8);
1868
}
1869
void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1870
    avg_pixels8_mmx2(dst, src, stride, 8);
1871
}
1872

    
1873
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1874
   converted */
1875
#if CONFIG_GPL
1876
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1877
{
1878
    ff_mmx_idct (block);
1879
    ff_put_pixels_clamped_mmx(block, dest, line_size);
1880
}
1881
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1882
{
1883
    ff_mmx_idct (block);
1884
    ff_add_pixels_clamped_mmx(block, dest, line_size);
1885
}
1886
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1887
{
1888
    ff_mmxext_idct (block);
1889
    ff_put_pixels_clamped_mmx(block, dest, line_size);
1890
}
1891
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1892
{
1893
    ff_mmxext_idct (block);
1894
    ff_add_pixels_clamped_mmx(block, dest, line_size);
1895
}
1896
#endif
1897
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1898
{
1899
    ff_idct_xvid_mmx (block);
1900
    ff_put_pixels_clamped_mmx(block, dest, line_size);
1901
}
1902
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1903
{
1904
    ff_idct_xvid_mmx (block);
1905
    ff_add_pixels_clamped_mmx(block, dest, line_size);
1906
}
1907
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1908
{
1909
    ff_idct_xvid_mmx2 (block);
1910
    ff_put_pixels_clamped_mmx(block, dest, line_size);
1911
}
1912
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1913
{
1914
    ff_idct_xvid_mmx2 (block);
1915
    ff_add_pixels_clamped_mmx(block, dest, line_size);
1916
}
1917

    
1918
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1919
{
1920
    int i;
1921
    __asm__ volatile("pxor %%mm7, %%mm7":);
1922
    for(i=0; i<blocksize; i+=2) {
1923
        __asm__ volatile(
1924
            "movq    %0,    %%mm0 \n\t"
1925
            "movq    %1,    %%mm1 \n\t"
1926
            "movq    %%mm0, %%mm2 \n\t"
1927
            "movq    %%mm1, %%mm3 \n\t"
1928
            "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1929
            "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1930
            "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
1931
            "pxor    %%mm2, %%mm1 \n\t"
1932
            "movq    %%mm3, %%mm4 \n\t"
1933
            "pand    %%mm1, %%mm3 \n\t"
1934
            "pandn   %%mm1, %%mm4 \n\t"
1935
            "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1936
            "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1937
            "movq    %%mm3, %1    \n\t"
1938
            "movq    %%mm0, %0    \n\t"
1939
            :"+m"(mag[i]), "+m"(ang[i])
1940
            ::"memory"
1941
        );
1942
    }
1943
    __asm__ volatile("femms");
1944
}
1945
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1946
{
1947
    int i;
1948

    
1949
    __asm__ volatile(
1950
            "movaps  %0,     %%xmm5 \n\t"
1951
        ::"m"(ff_pdw_80000000[0])
1952
    );
1953
    for(i=0; i<blocksize; i+=4) {
1954
        __asm__ volatile(
1955
            "movaps  %0,     %%xmm0 \n\t"
1956
            "movaps  %1,     %%xmm1 \n\t"
1957
            "xorps   %%xmm2, %%xmm2 \n\t"
1958
            "xorps   %%xmm3, %%xmm3 \n\t"
1959
            "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1960
            "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1961
            "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1962
            "xorps   %%xmm2, %%xmm1 \n\t"
1963
            "movaps  %%xmm3, %%xmm4 \n\t"
1964
            "andps   %%xmm1, %%xmm3 \n\t"
1965
            "andnps  %%xmm1, %%xmm4 \n\t"
1966
            "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1967
            "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1968
            "movaps  %%xmm3, %1     \n\t"
1969
            "movaps  %%xmm0, %0     \n\t"
1970
            :"+m"(mag[i]), "+m"(ang[i])
1971
            ::"memory"
1972
        );
1973
    }
1974
}
1975

    
1976
#define IF1(x) x
1977
#define IF0(x)
1978

    
1979
#define MIX5(mono,stereo)\
1980
    __asm__ volatile(\
1981
        "movss          0(%2), %%xmm5 \n"\
1982
        "movss          8(%2), %%xmm6 \n"\
1983
        "movss         24(%2), %%xmm7 \n"\
1984
        "shufps    $0, %%xmm5, %%xmm5 \n"\
1985
        "shufps    $0, %%xmm6, %%xmm6 \n"\
1986
        "shufps    $0, %%xmm7, %%xmm7 \n"\
1987
        "1: \n"\
1988
        "movaps       (%0,%1), %%xmm0 \n"\
1989
        "movaps  0x400(%0,%1), %%xmm1 \n"\
1990
        "movaps  0x800(%0,%1), %%xmm2 \n"\
1991
        "movaps  0xc00(%0,%1), %%xmm3 \n"\
1992
        "movaps 0x1000(%0,%1), %%xmm4 \n"\
1993
        "mulps         %%xmm5, %%xmm0 \n"\
1994
        "mulps         %%xmm6, %%xmm1 \n"\
1995
        "mulps         %%xmm5, %%xmm2 \n"\
1996
        "mulps         %%xmm7, %%xmm3 \n"\
1997
        "mulps         %%xmm7, %%xmm4 \n"\
1998
 stereo("addps         %%xmm1, %%xmm0 \n")\
1999
        "addps         %%xmm1, %%xmm2 \n"\
2000
        "addps         %%xmm3, %%xmm0 \n"\
2001
        "addps         %%xmm4, %%xmm2 \n"\
2002
   mono("addps         %%xmm2, %%xmm0 \n")\
2003
        "movaps  %%xmm0,      (%0,%1) \n"\
2004
 stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
2005
        "add $16, %0 \n"\
2006
        "jl 1b \n"\
2007
        :"+&r"(i)\
2008
        :"r"(samples[0]+len), "r"(matrix)\
2009
        :XMM_CLOBBERS("%xmm0", "%xmm1", "%xmm2", "%xmm3", \
2010
                      "%xmm4", "%xmm5", "%xmm6", "%xmm7",)\
2011
         "memory"\
2012
    );
2013

    
2014
#define MIX_MISC(stereo)\
2015
    __asm__ volatile(\
2016
        "1: \n"\
2017
        "movaps  (%3,%0), %%xmm0 \n"\
2018
 stereo("movaps   %%xmm0, %%xmm1 \n")\
2019
        "mulps    %%xmm4, %%xmm0 \n"\
2020
 stereo("mulps    %%xmm5, %%xmm1 \n")\
2021
        "lea 1024(%3,%0), %1 \n"\
2022
        "mov %5, %2 \n"\
2023
        "2: \n"\
2024
        "movaps   (%1),   %%xmm2 \n"\
2025
 stereo("movaps   %%xmm2, %%xmm3 \n")\
2026
        "mulps   (%4,%2), %%xmm2 \n"\
2027
 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2028
        "addps    %%xmm2, %%xmm0 \n"\
2029
 stereo("addps    %%xmm3, %%xmm1 \n")\
2030
        "add $1024, %1 \n"\
2031
        "add $32, %2 \n"\
2032
        "jl 2b \n"\
2033
        "movaps   %%xmm0,     (%3,%0) \n"\
2034
 stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
2035
        "add $16, %0 \n"\
2036
        "jl 1b \n"\
2037
        :"+&r"(i), "=&r"(j), "=&r"(k)\
2038
        :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2039
        :"memory"\
2040
    );
2041

    
2042
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2043
{
2044
    int (*matrix_cmp)[2] = (int(*)[2])matrix;
2045
    intptr_t i,j,k;
2046

    
2047
    i = -len*sizeof(float);
2048
    if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2049
        MIX5(IF0,IF1);
2050
    } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2051
        MIX5(IF1,IF0);
2052
    } else {
2053
        DECLARE_ALIGNED(16, float, matrix_simd)[AC3_MAX_CHANNELS][2][4];
2054
        j = 2*in_ch*sizeof(float);
2055
        __asm__ volatile(
2056
            "1: \n"
2057
            "sub $8, %0 \n"
2058
            "movss     (%2,%0), %%xmm4 \n"
2059
            "movss    4(%2,%0), %%xmm5 \n"
2060
            "shufps $0, %%xmm4, %%xmm4 \n"
2061
            "shufps $0, %%xmm5, %%xmm5 \n"
2062
            "movaps %%xmm4,   (%1,%0,4) \n"
2063
            "movaps %%xmm5, 16(%1,%0,4) \n"
2064
            "jg 1b \n"
2065
            :"+&r"(j)
2066
            :"r"(matrix_simd), "r"(matrix)
2067
            :"memory"
2068
        );
2069
        if(out_ch == 2) {
2070
            MIX_MISC(IF1);
2071
        } else {
2072
            MIX_MISC(IF0);
2073
        }
2074
    }
2075
}
2076

    
2077
static void vector_fmul_3dnow(float *dst, const float *src0, const float *src1, int len){
2078
    x86_reg i = (len-4)*4;
2079
    __asm__ volatile(
2080
        "1: \n\t"
2081
        "movq    (%2,%0), %%mm0 \n\t"
2082
        "movq   8(%2,%0), %%mm1 \n\t"
2083
        "pfmul   (%3,%0), %%mm0 \n\t"
2084
        "pfmul  8(%3,%0), %%mm1 \n\t"
2085
        "movq   %%mm0,  (%1,%0) \n\t"
2086
        "movq   %%mm1, 8(%1,%0) \n\t"
2087
        "sub  $16, %0 \n\t"
2088
        "jge 1b \n\t"
2089
        "femms  \n\t"
2090
        :"+r"(i)
2091
        :"r"(dst), "r"(src0), "r"(src1)
2092
        :"memory"
2093
    );
2094
}
2095
static void vector_fmul_sse(float *dst, const float *src0, const float *src1, int len){
2096
    x86_reg i = (len-8)*4;
2097
    __asm__ volatile(
2098
        "1: \n\t"
2099
        "movaps    (%2,%0), %%xmm0 \n\t"
2100
        "movaps  16(%2,%0), %%xmm1 \n\t"
2101
        "mulps     (%3,%0), %%xmm0 \n\t"
2102
        "mulps   16(%3,%0), %%xmm1 \n\t"
2103
        "movaps  %%xmm0,   (%1,%0) \n\t"
2104
        "movaps  %%xmm1, 16(%1,%0) \n\t"
2105
        "sub  $32, %0 \n\t"
2106
        "jge 1b \n\t"
2107
        :"+r"(i)
2108
        :"r"(dst), "r"(src0), "r"(src1)
2109
        :"memory"
2110
    );
2111
}
2112

    
2113
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2114
    x86_reg i = len*4-16;
2115
    __asm__ volatile(
2116
        "1: \n\t"
2117
        "pswapd   8(%1), %%mm0 \n\t"
2118
        "pswapd    (%1), %%mm1 \n\t"
2119
        "pfmul  (%3,%0), %%mm0 \n\t"
2120
        "pfmul 8(%3,%0), %%mm1 \n\t"
2121
        "movq  %%mm0,  (%2,%0) \n\t"
2122
        "movq  %%mm1, 8(%2,%0) \n\t"
2123
        "add   $16, %1 \n\t"
2124
        "sub   $16, %0 \n\t"
2125
        "jge   1b \n\t"
2126
        :"+r"(i), "+r"(src1)
2127
        :"r"(dst), "r"(src0)
2128
    );
2129
    __asm__ volatile("femms");
2130
}
2131
static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2132
    x86_reg i = len*4-32;
2133
    __asm__ volatile(
2134
        "1: \n\t"
2135
        "movaps        16(%1), %%xmm0 \n\t"
2136
        "movaps          (%1), %%xmm1 \n\t"
2137
        "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2138
        "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2139
        "mulps        (%3,%0), %%xmm0 \n\t"
2140
        "mulps      16(%3,%0), %%xmm1 \n\t"
2141
        "movaps     %%xmm0,   (%2,%0) \n\t"
2142
        "movaps     %%xmm1, 16(%2,%0) \n\t"
2143
        "add    $32, %1 \n\t"
2144
        "sub    $32, %0 \n\t"
2145
        "jge    1b \n\t"
2146
        :"+r"(i), "+r"(src1)
2147
        :"r"(dst), "r"(src0)
2148
    );
2149
}
2150

    
2151
static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
2152
                                  const float *src2, int len){
2153
    x86_reg i = (len-4)*4;
2154
    __asm__ volatile(
2155
        "1: \n\t"
2156
        "movq    (%2,%0), %%mm0 \n\t"
2157
        "movq   8(%2,%0), %%mm1 \n\t"
2158
        "pfmul   (%3,%0), %%mm0 \n\t"
2159
        "pfmul  8(%3,%0), %%mm1 \n\t"
2160
        "pfadd   (%4,%0), %%mm0 \n\t"
2161
        "pfadd  8(%4,%0), %%mm1 \n\t"
2162
        "movq  %%mm0,   (%1,%0) \n\t"
2163
        "movq  %%mm1,  8(%1,%0) \n\t"
2164
        "sub  $16, %0 \n\t"
2165
        "jge  1b \n\t"
2166
        :"+r"(i)
2167
        :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2168
        :"memory"
2169
    );
2170
    __asm__ volatile("femms");
2171
}
2172
static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2173
                                const float *src2, int len){
2174
    x86_reg i = (len-8)*4;
2175
    __asm__ volatile(
2176
        "1: \n\t"
2177
        "movaps   (%2,%0), %%xmm0 \n\t"
2178
        "movaps 16(%2,%0), %%xmm1 \n\t"
2179
        "mulps    (%3,%0), %%xmm0 \n\t"
2180
        "mulps  16(%3,%0), %%xmm1 \n\t"
2181
        "addps    (%4,%0), %%xmm0 \n\t"
2182
        "addps  16(%4,%0), %%xmm1 \n\t"
2183
        "movaps %%xmm0,   (%1,%0) \n\t"
2184
        "movaps %%xmm1, 16(%1,%0) \n\t"
2185
        "sub  $32, %0 \n\t"
2186
        "jge  1b \n\t"
2187
        :"+r"(i)
2188
        :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2189
        :"memory"
2190
    );
2191
}
2192

    
2193
#if HAVE_6REGS
2194
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2195
                                      const float *win, int len){
2196
        x86_reg i = -len*4;
2197
        x86_reg j = len*4-8;
2198
        __asm__ volatile(
2199
            "1: \n"
2200
            "pswapd  (%5,%1), %%mm1 \n"
2201
            "movq    (%5,%0), %%mm0 \n"
2202
            "pswapd  (%4,%1), %%mm5 \n"
2203
            "movq    (%3,%0), %%mm4 \n"
2204
            "movq      %%mm0, %%mm2 \n"
2205
            "movq      %%mm1, %%mm3 \n"
2206
            "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2207
            "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
2208
            "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2209
            "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
2210
            "pfadd     %%mm3, %%mm2 \n"
2211
            "pfsub     %%mm0, %%mm1 \n"
2212
            "pswapd    %%mm2, %%mm2 \n"
2213
            "movq      %%mm1, (%2,%0) \n"
2214
            "movq      %%mm2, (%2,%1) \n"
2215
            "sub $8, %1 \n"
2216
            "add $8, %0 \n"
2217
            "jl 1b \n"
2218
            "femms \n"
2219
            :"+r"(i), "+r"(j)
2220
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2221
        );
2222
}
2223

    
2224
static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2225
                                   const float *win, int len){
2226
        x86_reg i = -len*4;
2227
        x86_reg j = len*4-16;
2228
        __asm__ volatile(
2229
            "1: \n"
2230
            "movaps       (%5,%1), %%xmm1 \n"
2231
            "movaps       (%5,%0), %%xmm0 \n"
2232
            "movaps       (%4,%1), %%xmm5 \n"
2233
            "movaps       (%3,%0), %%xmm4 \n"
2234
            "shufps $0x1b, %%xmm1, %%xmm1 \n"
2235
            "shufps $0x1b, %%xmm5, %%xmm5 \n"
2236
            "movaps        %%xmm0, %%xmm2 \n"
2237
            "movaps        %%xmm1, %%xmm3 \n"
2238
            "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2239
            "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
2240
            "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2241
            "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
2242
            "addps         %%xmm3, %%xmm2 \n"
2243
            "subps         %%xmm0, %%xmm1 \n"
2244
            "shufps $0x1b, %%xmm2, %%xmm2 \n"
2245
            "movaps        %%xmm1, (%2,%0) \n"
2246
            "movaps        %%xmm2, (%2,%1) \n"
2247
            "sub $16, %1 \n"
2248
            "add $16, %0 \n"
2249
            "jl 1b \n"
2250
            :"+r"(i), "+r"(j)
2251
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2252
        );
2253
}
2254
#endif /* HAVE_6REGS */
2255

    
2256
static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2257
{
2258
    x86_reg i = -4*len;
2259
    __asm__ volatile(
2260
        "movss  %3, %%xmm4 \n"
2261
        "shufps $0, %%xmm4, %%xmm4 \n"
2262
        "1: \n"
2263
        "cvtpi2ps   (%2,%0), %%xmm0 \n"
2264
        "cvtpi2ps  8(%2,%0), %%xmm1 \n"
2265
        "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2266
        "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2267
        "movlhps  %%xmm1,    %%xmm0 \n"
2268
        "movlhps  %%xmm3,    %%xmm2 \n"
2269
        "mulps    %%xmm4,    %%xmm0 \n"
2270
        "mulps    %%xmm4,    %%xmm2 \n"
2271
        "movaps   %%xmm0,   (%1,%0) \n"
2272
        "movaps   %%xmm2, 16(%1,%0) \n"
2273
        "add $32, %0 \n"
2274
        "jl 1b \n"
2275
        :"+r"(i)
2276
        :"r"(dst+len), "r"(src+len), "m"(mul)
2277
    );
2278
}
2279

    
2280
static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2281
{
2282
    x86_reg i = -4*len;
2283
    __asm__ volatile(
2284
        "movss  %3, %%xmm4 \n"
2285
        "shufps $0, %%xmm4, %%xmm4 \n"
2286
        "1: \n"
2287
        "cvtdq2ps   (%2,%0), %%xmm0 \n"
2288
        "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2289
        "mulps    %%xmm4,    %%xmm0 \n"
2290
        "mulps    %%xmm4,    %%xmm1 \n"
2291
        "movaps   %%xmm0,   (%1,%0) \n"
2292
        "movaps   %%xmm1, 16(%1,%0) \n"
2293
        "add $32, %0 \n"
2294
        "jl 1b \n"
2295
        :"+r"(i)
2296
        :"r"(dst+len), "r"(src+len), "m"(mul)
2297
    );
2298
}
2299

    
2300
static void vector_clipf_sse(float *dst, const float *src, float min, float max,
2301
                             int len)
2302
{
2303
    x86_reg i = (len-16)*4;
2304
    __asm__ volatile(
2305
        "movss  %3, %%xmm4 \n"
2306
        "movss  %4, %%xmm5 \n"
2307
        "shufps $0, %%xmm4, %%xmm4 \n"
2308
        "shufps $0, %%xmm5, %%xmm5 \n"
2309
        "1: \n\t"
2310
        "movaps    (%2,%0), %%xmm0 \n\t" // 3/1 on intel
2311
        "movaps  16(%2,%0), %%xmm1 \n\t"
2312
        "movaps  32(%2,%0), %%xmm2 \n\t"
2313
        "movaps  48(%2,%0), %%xmm3 \n\t"
2314
        "maxps      %%xmm4, %%xmm0 \n\t"
2315
        "maxps      %%xmm4, %%xmm1 \n\t"
2316
        "maxps      %%xmm4, %%xmm2 \n\t"
2317
        "maxps      %%xmm4, %%xmm3 \n\t"
2318
        "minps      %%xmm5, %%xmm0 \n\t"
2319
        "minps      %%xmm5, %%xmm1 \n\t"
2320
        "minps      %%xmm5, %%xmm2 \n\t"
2321
        "minps      %%xmm5, %%xmm3 \n\t"
2322
        "movaps  %%xmm0,   (%1,%0) \n\t"
2323
        "movaps  %%xmm1, 16(%1,%0) \n\t"
2324
        "movaps  %%xmm2, 32(%1,%0) \n\t"
2325
        "movaps  %%xmm3, 48(%1,%0) \n\t"
2326
        "sub  $64, %0 \n\t"
2327
        "jge 1b \n\t"
2328
        :"+&r"(i)
2329
        :"r"(dst), "r"(src), "m"(min), "m"(max)
2330
        :"memory"
2331
    );
2332
}
2333

    
2334
static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2335
    x86_reg reglen = len;
2336
    // not bit-exact: pf2id uses different rounding than C and SSE
2337
    __asm__ volatile(
2338
        "add        %0          , %0        \n\t"
2339
        "lea         (%2,%0,2)  , %2        \n\t"
2340
        "add        %0          , %1        \n\t"
2341
        "neg        %0                      \n\t"
2342
        "1:                                 \n\t"
2343
        "pf2id       (%2,%0,2)  , %%mm0     \n\t"
2344
        "pf2id      8(%2,%0,2)  , %%mm1     \n\t"
2345
        "pf2id     16(%2,%0,2)  , %%mm2     \n\t"
2346
        "pf2id     24(%2,%0,2)  , %%mm3     \n\t"
2347
        "packssdw   %%mm1       , %%mm0     \n\t"
2348
        "packssdw   %%mm3       , %%mm2     \n\t"
2349
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2350
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2351
        "add        $16         , %0        \n\t"
2352
        " js 1b                             \n\t"
2353
        "femms                              \n\t"
2354
        :"+r"(reglen), "+r"(dst), "+r"(src)
2355
    );
2356
}
2357
static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2358
    x86_reg reglen = len;
2359
    __asm__ volatile(
2360
        "add        %0          , %0        \n\t"
2361
        "lea         (%2,%0,2)  , %2        \n\t"
2362
        "add        %0          , %1        \n\t"
2363
        "neg        %0                      \n\t"
2364
        "1:                                 \n\t"
2365
        "cvtps2pi    (%2,%0,2)  , %%mm0     \n\t"
2366
        "cvtps2pi   8(%2,%0,2)  , %%mm1     \n\t"
2367
        "cvtps2pi  16(%2,%0,2)  , %%mm2     \n\t"
2368
        "cvtps2pi  24(%2,%0,2)  , %%mm3     \n\t"
2369
        "packssdw   %%mm1       , %%mm0     \n\t"
2370
        "packssdw   %%mm3       , %%mm2     \n\t"
2371
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2372
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2373
        "add        $16         , %0        \n\t"
2374
        " js 1b                             \n\t"
2375
        "emms                               \n\t"
2376
        :"+r"(reglen), "+r"(dst), "+r"(src)
2377
    );
2378
}
2379

    
2380
static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2381
    x86_reg reglen = len;
2382
    __asm__ volatile(
2383
        "add        %0          , %0        \n\t"
2384
        "lea         (%2,%0,2)  , %2        \n\t"
2385
        "add        %0          , %1        \n\t"
2386
        "neg        %0                      \n\t"
2387
        "1:                                 \n\t"
2388
        "cvtps2dq    (%2,%0,2)  , %%xmm0    \n\t"
2389
        "cvtps2dq  16(%2,%0,2)  , %%xmm1    \n\t"
2390
        "packssdw   %%xmm1      , %%xmm0    \n\t"
2391
        "movdqa     %%xmm0      ,  (%1,%0)  \n\t"
2392
        "add        $16         , %0        \n\t"
2393
        " js 1b                             \n\t"
2394
        :"+r"(reglen), "+r"(dst), "+r"(src)
2395
    );
2396
}
2397

    
2398
void ff_vp3_idct_mmx(int16_t *input_data);
2399
void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2400
void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block);
2401

    
2402
void ff_vp3_idct_dc_add_mmx2(uint8_t *dest, int line_size, const DCTELEM *block);
2403

    
2404
void ff_vp3_v_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2405
void ff_vp3_h_loop_filter_mmx2(uint8_t *src, int stride, int *bounding_values);
2406

    
2407
void ff_vp3_idct_sse2(int16_t *input_data);
2408
void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2409
void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block);
2410

    
2411
void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2412
void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2413
void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2414
int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
2415
int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
2416
int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2417
int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2418
int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2419
void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
2420
int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
2421
int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
2422

    
2423
#if !HAVE_YASM
2424
#define ff_float_to_int16_interleave6_sse(a,b,c)   float_to_int16_interleave_misc_sse(a,b,c,6)
2425
#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2426
#define ff_float_to_int16_interleave6_3dn2(a,b,c)  float_to_int16_interleave_misc_3dnow(a,b,c,6)
2427
#endif
2428
#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2429

    
2430
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2431
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2432
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2433
    DECLARE_ALIGNED(16, int16_t, tmp)[len];\
2434
    int i,j,c;\
2435
    for(c=0; c<channels; c++){\
2436
        float_to_int16_##cpu(tmp, src[c], len);\
2437
        for(i=0, j=c; i<len; i++, j+=channels)\
2438
            dst[j] = tmp[i];\
2439
    }\
2440
}\
2441
\
2442
static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2443
    if(channels==1)\
2444
        float_to_int16_##cpu(dst, src[0], len);\
2445
    else if(channels==2){\
2446
        x86_reg reglen = len; \
2447
        const float *src0 = src[0];\
2448
        const float *src1 = src[1];\
2449
        __asm__ volatile(\
2450
            "shl $2, %0 \n"\
2451
            "add %0, %1 \n"\
2452
            "add %0, %2 \n"\
2453
            "add %0, %3 \n"\
2454
            "neg %0 \n"\
2455
            body\
2456
            :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2457
        );\
2458
    }else if(channels==6){\
2459
        ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2460
    }else\
2461
        float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2462
}
2463

    
2464
FLOAT_TO_INT16_INTERLEAVE(3dnow,
2465
    "1:                         \n"
2466
    "pf2id     (%2,%0), %%mm0   \n"
2467
    "pf2id    8(%2,%0), %%mm1   \n"
2468
    "pf2id     (%3,%0), %%mm2   \n"
2469
    "pf2id    8(%3,%0), %%mm3   \n"
2470
    "packssdw    %%mm1, %%mm0   \n"
2471
    "packssdw    %%mm3, %%mm2   \n"
2472
    "movq        %%mm0, %%mm1   \n"
2473
    "punpcklwd   %%mm2, %%mm0   \n"
2474
    "punpckhwd   %%mm2, %%mm1   \n"
2475
    "movq        %%mm0,  (%1,%0)\n"
2476
    "movq        %%mm1, 8(%1,%0)\n"
2477
    "add $16, %0                \n"
2478
    "js 1b                      \n"
2479
    "femms                      \n"
2480
)
2481

    
2482
FLOAT_TO_INT16_INTERLEAVE(sse,
2483
    "1:                         \n"
2484
    "cvtps2pi  (%2,%0), %%mm0   \n"
2485
    "cvtps2pi 8(%2,%0), %%mm1   \n"
2486
    "cvtps2pi  (%3,%0), %%mm2   \n"
2487
    "cvtps2pi 8(%3,%0), %%mm3   \n"
2488
    "packssdw    %%mm1, %%mm0   \n"
2489
    "packssdw    %%mm3, %%mm2   \n"
2490
    "movq        %%mm0, %%mm1   \n"
2491
    "punpcklwd   %%mm2, %%mm0   \n"
2492
    "punpckhwd   %%mm2, %%mm1   \n"
2493
    "movq        %%mm0,  (%1,%0)\n"
2494
    "movq        %%mm1, 8(%1,%0)\n"
2495
    "add $16, %0                \n"
2496
    "js 1b                      \n"
2497
    "emms                       \n"
2498
)
2499

    
2500
FLOAT_TO_INT16_INTERLEAVE(sse2,
2501
    "1:                         \n"
2502
    "cvtps2dq  (%2,%0), %%xmm0  \n"
2503
    "cvtps2dq  (%3,%0), %%xmm1  \n"
2504
    "packssdw   %%xmm1, %%xmm0  \n"
2505
    "movhlps    %%xmm0, %%xmm1  \n"
2506
    "punpcklwd  %%xmm1, %%xmm0  \n"
2507
    "movdqa     %%xmm0, (%1,%0) \n"
2508
    "add $16, %0                \n"
2509
    "js 1b                      \n"
2510
)
2511

    
2512
static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2513
    if(channels==6)
2514
        ff_float_to_int16_interleave6_3dn2(dst, src, len);
2515
    else
2516
        float_to_int16_interleave_3dnow(dst, src, len, channels);
2517
}
2518

    
2519
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2520

    
2521
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2522
{
2523
    int mm_flags = av_get_cpu_flags();
2524

    
2525
    if (avctx->dsp_mask) {
2526
        if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
2527
            mm_flags |= (avctx->dsp_mask & 0xffff);
2528
        else
2529
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2530
    }
2531

    
2532
#if 0
2533
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2534
    if (mm_flags & AV_CPU_FLAG_MMX)
2535
        av_log(avctx, AV_LOG_INFO, " mmx");
2536
    if (mm_flags & AV_CPU_FLAG_MMX2)
2537
        av_log(avctx, AV_LOG_INFO, " mmx2");
2538
    if (mm_flags & AV_CPU_FLAG_3DNOW)
2539
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2540
    if (mm_flags & AV_CPU_FLAG_SSE)
2541
        av_log(avctx, AV_LOG_INFO, " sse");
2542
    if (mm_flags & AV_CPU_FLAG_SSE2)
2543
        av_log(avctx, AV_LOG_INFO, " sse2");
2544
    av_log(avctx, AV_LOG_INFO, "\n");
2545
#endif
2546

    
2547
    if (mm_flags & AV_CPU_FLAG_MMX) {
2548
        const int idct_algo= avctx->idct_algo;
2549

    
2550
        if(avctx->lowres==0){
2551
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2552
                c->idct_put= ff_simple_idct_put_mmx;
2553
                c->idct_add= ff_simple_idct_add_mmx;
2554
                c->idct    = ff_simple_idct_mmx;
2555
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2556
#if CONFIG_GPL
2557
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2558
                if(mm_flags & AV_CPU_FLAG_MMX2){
2559
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2560
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2561
                    c->idct    = ff_mmxext_idct;
2562
                }else{
2563
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2564
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2565
                    c->idct    = ff_mmx_idct;
2566
                }
2567
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2568
#endif
2569
            }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
2570
                     idct_algo==FF_IDCT_VP3 && HAVE_YASM){
2571
                if(mm_flags & AV_CPU_FLAG_SSE2){
2572
                    c->idct_put= ff_vp3_idct_put_sse2;
2573
                    c->idct_add= ff_vp3_idct_add_sse2;
2574
                    c->idct    = ff_vp3_idct_sse2;
2575
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2576
                }else{
2577
                    c->idct_put= ff_vp3_idct_put_mmx;
2578
                    c->idct_add= ff_vp3_idct_add_mmx;
2579
                    c->idct    = ff_vp3_idct_mmx;
2580
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2581
                }
2582
            }else if(idct_algo==FF_IDCT_CAVS){
2583
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2584
            }else if(idct_algo==FF_IDCT_XVIDMMX){
2585
                if(mm_flags & AV_CPU_FLAG_SSE2){
2586
                    c->idct_put= ff_idct_xvid_sse2_put;
2587
                    c->idct_add= ff_idct_xvid_sse2_add;
2588
                    c->idct    = ff_idct_xvid_sse2;
2589
                    c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2590
                }else if(mm_flags & AV_CPU_FLAG_MMX2){
2591
                    c->idct_put= ff_idct_xvid_mmx2_put;
2592
                    c->idct_add= ff_idct_xvid_mmx2_add;
2593
                    c->idct    = ff_idct_xvid_mmx2;
2594
                }else{
2595
                    c->idct_put= ff_idct_xvid_mmx_put;
2596
                    c->idct_add= ff_idct_xvid_mmx_add;
2597
                    c->idct    = ff_idct_xvid_mmx;
2598
                }
2599
            }
2600
        }
2601

    
2602
        c->put_pixels_clamped = ff_put_pixels_clamped_mmx;
2603
        c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
2604
        c->add_pixels_clamped = ff_add_pixels_clamped_mmx;
2605
        c->clear_block  = clear_block_mmx;
2606
        c->clear_blocks = clear_blocks_mmx;
2607
        if ((mm_flags & AV_CPU_FLAG_SSE) &&
2608
            !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
2609
            /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2610
            c->clear_block  = clear_block_sse;
2611
            c->clear_blocks = clear_blocks_sse;
2612
        }
2613

    
2614
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2615
        c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2616
        c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2617
        c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2618
        c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2619

    
2620
        SET_HPEL_FUNCS(put, 0, 16, mmx);
2621
        SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2622
        SET_HPEL_FUNCS(avg, 0, 16, mmx);
2623
        SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2624
        SET_HPEL_FUNCS(put, 1, 8, mmx);
2625
        SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2626
        SET_HPEL_FUNCS(avg, 1, 8, mmx);
2627
        SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2628

    
2629
        c->gmc= gmc_mmx;
2630

    
2631
        c->add_bytes= add_bytes_mmx;
2632
        c->add_bytes_l2= add_bytes_l2_mmx;
2633

    
2634
        c->draw_edges = draw_edges_mmx;
2635

    
2636
        if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2637
            c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2638
            c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2639
        }
2640

    
2641
#if HAVE_YASM
2642
        c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_mmx_rnd;
2643
        c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_mmx;
2644
        c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_mmx_nornd;
2645

    
2646
        c->put_rv40_chroma_pixels_tab[0]= ff_put_rv40_chroma_mc8_mmx;
2647
        c->put_rv40_chroma_pixels_tab[1]= ff_put_rv40_chroma_mc4_mmx;
2648
#endif
2649

    
2650
        if (mm_flags & AV_CPU_FLAG_MMX2) {
2651
            c->prefetch = prefetch_mmx2;
2652

    
2653
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2654
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2655

    
2656
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2657
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2658
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2659

    
2660
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2661
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2662

    
2663
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2664
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2665
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2666

    
2667
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2668
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2669
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2670
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2671
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2672
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2673
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2674

    
2675
                if (CONFIG_VP3_DECODER && HAVE_YASM) {
2676
                    c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2677
                    c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2678
                }
2679
            }
2680
            if (CONFIG_VP3_DECODER && HAVE_YASM) {
2681
                c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2682
            }
2683

    
2684
            if (CONFIG_VP3_DECODER
2685
                && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2686
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2687
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2688
            }
2689

    
2690
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2691
            c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2692
            c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2693
            c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2694
            c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2695
            c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2696
            c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2697
            c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2698
            c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2699
            c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2700
            c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2701
            c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2702
            c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2703
            c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2704
            c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2705
            c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2706
            c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2707

    
2708
            SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2709
            SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2710
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2711
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2712
            SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2713
            SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2714

    
2715
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2716
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2717
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2718
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2719
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2720
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2721

    
2722
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2723
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2724
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2725
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2726

    
2727
#if HAVE_YASM
2728
            c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_mmx2;
2729
            c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_mmx2;
2730

    
2731
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_mmx2_nornd;
2732

    
2733
            c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_mmx2_rnd;
2734
            c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_mmx2;
2735
            c->avg_h264_chroma_pixels_tab[2]= ff_avg_h264_chroma_mc2_mmx2;
2736
            c->put_h264_chroma_pixels_tab[2]= ff_put_h264_chroma_mc2_mmx2;
2737

    
2738
            c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2739
#endif
2740
#if HAVE_7REGS && HAVE_TEN_OPERANDS
2741
            if( mm_flags&AV_CPU_FLAG_3DNOW )
2742
                c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2743
#endif
2744

    
2745
            if (CONFIG_VC1_DECODER)
2746
                ff_vc1dsp_init_mmx(c, avctx);
2747

    
2748
            c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2749
        } else if (mm_flags & AV_CPU_FLAG_3DNOW) {
2750
            c->prefetch = prefetch_3dnow;
2751

    
2752
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2753
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2754

    
2755
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2756
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2757
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2758

    
2759
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2760
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2761

    
2762
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2763
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2764
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2765

    
2766
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2767
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2768
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2769
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2770
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2771
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2772
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2773
            }
2774

    
2775
            if (CONFIG_VP3_DECODER
2776
                && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2777
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2778
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2779
            }
2780

    
2781
            SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2782
            SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2783
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2784
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2785
            SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2786
            SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2787

    
2788
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2789
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2790
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2791
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2792
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2793
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2794

    
2795
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2796
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2797
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2798
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2799

    
2800
#if HAVE_YASM
2801
            c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_3dnow_rnd;
2802
            c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_3dnow;
2803

    
2804
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_3dnow_nornd;
2805

    
2806
            c->avg_rv40_chroma_pixels_tab[0]= ff_avg_rv40_chroma_mc8_3dnow;
2807
            c->avg_rv40_chroma_pixels_tab[1]= ff_avg_rv40_chroma_mc4_3dnow;
2808
#endif
2809
        }
2810

    
2811

    
2812
#define H264_QPEL_FUNCS(x, y, CPU)\
2813
            c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2814
            c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2815
            c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2816
            c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2817
        if((mm_flags & AV_CPU_FLAG_SSE2) && !(mm_flags & AV_CPU_FLAG_3DNOW)){
2818
            // these functions are slower than mmx on AMD, but faster on Intel
2819
            c->put_pixels_tab[0][0] = put_pixels16_sse2;
2820
            c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
2821
            c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2822
            H264_QPEL_FUNCS(0, 0, sse2);
2823
        }
2824
        if(mm_flags & AV_CPU_FLAG_SSE2){
2825
            H264_QPEL_FUNCS(0, 1, sse2);
2826
            H264_QPEL_FUNCS(0, 2, sse2);
2827
            H264_QPEL_FUNCS(0, 3, sse2);
2828
            H264_QPEL_FUNCS(1, 1, sse2);
2829
            H264_QPEL_FUNCS(1, 2, sse2);
2830
            H264_QPEL_FUNCS(1, 3, sse2);
2831
            H264_QPEL_FUNCS(2, 1, sse2);
2832
            H264_QPEL_FUNCS(2, 2, sse2);
2833
            H264_QPEL_FUNCS(2, 3, sse2);
2834
            H264_QPEL_FUNCS(3, 1, sse2);
2835
            H264_QPEL_FUNCS(3, 2, sse2);
2836
            H264_QPEL_FUNCS(3, 3, sse2);
2837
        }
2838
#if HAVE_SSSE3
2839
        if(mm_flags & AV_CPU_FLAG_SSSE3){
2840
            H264_QPEL_FUNCS(1, 0, ssse3);
2841
            H264_QPEL_FUNCS(1, 1, ssse3);
2842
            H264_QPEL_FUNCS(1, 2, ssse3);
2843
            H264_QPEL_FUNCS(1, 3, ssse3);
2844
            H264_QPEL_FUNCS(2, 0, ssse3);
2845
            H264_QPEL_FUNCS(2, 1, ssse3);
2846
            H264_QPEL_FUNCS(2, 2, ssse3);
2847
            H264_QPEL_FUNCS(2, 3, ssse3);
2848
            H264_QPEL_FUNCS(3, 0, ssse3);
2849
            H264_QPEL_FUNCS(3, 1, ssse3);
2850
            H264_QPEL_FUNCS(3, 2, ssse3);
2851
            H264_QPEL_FUNCS(3, 3, ssse3);
2852
            c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2853
#if HAVE_YASM
2854
            c->put_no_rnd_vc1_chroma_pixels_tab[0]= ff_put_vc1_chroma_mc8_ssse3_nornd;
2855
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= ff_avg_vc1_chroma_mc8_ssse3_nornd;
2856
            c->put_h264_chroma_pixels_tab[0]= ff_put_h264_chroma_mc8_ssse3_rnd;
2857
            c->avg_h264_chroma_pixels_tab[0]= ff_avg_h264_chroma_mc8_ssse3_rnd;
2858
            c->put_h264_chroma_pixels_tab[1]= ff_put_h264_chroma_mc4_ssse3;
2859
            c->avg_h264_chroma_pixels_tab[1]= ff_avg_h264_chroma_mc4_ssse3;
2860
            c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2861
            if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
2862
                c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2863
#endif
2864
        }
2865
#endif
2866

    
2867
        if(mm_flags & AV_CPU_FLAG_3DNOW){
2868
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2869
            c->vector_fmul = vector_fmul_3dnow;
2870
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2871
                c->float_to_int16 = float_to_int16_3dnow;
2872
                c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2873
            }
2874
        }
2875
        if(mm_flags & AV_CPU_FLAG_3DNOWEXT){
2876
            c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2877
#if HAVE_6REGS
2878
            c->vector_fmul_window = vector_fmul_window_3dnow2;
2879
#endif
2880
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2881
                c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
2882
            }
2883
        }
2884
        if(mm_flags & AV_CPU_FLAG_MMX2){
2885
#if HAVE_YASM
2886
            c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
2887
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
2888
#endif
2889
        }
2890
        if(mm_flags & AV_CPU_FLAG_SSE){
2891
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2892
            c->ac3_downmix = ac3_downmix_sse;
2893
            c->vector_fmul = vector_fmul_sse;
2894
            c->vector_fmul_reverse = vector_fmul_reverse_sse;
2895
            c->vector_fmul_add = vector_fmul_add_sse;
2896
#if HAVE_6REGS
2897
            c->vector_fmul_window = vector_fmul_window_sse;
2898
#endif
2899
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2900
            c->vector_clipf = vector_clipf_sse;
2901
            c->float_to_int16 = float_to_int16_sse;
2902
            c->float_to_int16_interleave = float_to_int16_interleave_sse;
2903
#if HAVE_YASM
2904
            c->scalarproduct_float = ff_scalarproduct_float_sse;
2905
#endif
2906
        }
2907
        if(mm_flags & AV_CPU_FLAG_3DNOW)
2908
            c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
2909
        if(mm_flags & AV_CPU_FLAG_SSE2){
2910
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2911
            c->float_to_int16 = float_to_int16_sse2;
2912
            c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2913
#if HAVE_YASM
2914
            c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2915
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2916
#endif
2917
        }
2918
        if((mm_flags & AV_CPU_FLAG_SSSE3) && !(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW)) && HAVE_YASM) // cachesplit
2919
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2920
    }
2921

    
2922
    if (CONFIG_ENCODERS)
2923
        dsputilenc_init_mmx(c, avctx);
2924

    
2925
#if 0
2926
    // for speed testing
2927
    get_pixels = just_return;
2928
    put_pixels_clamped = just_return;
2929
    add_pixels_clamped = just_return;
2930

2931
    pix_abs16x16 = just_return;
2932
    pix_abs16x16_x2 = just_return;
2933
    pix_abs16x16_y2 = just_return;
2934
    pix_abs16x16_xy2 = just_return;
2935

2936
    put_pixels_tab[0] = just_return;
2937
    put_pixels_tab[1] = just_return;
2938
    put_pixels_tab[2] = just_return;
2939
    put_pixels_tab[3] = just_return;
2940

2941
    put_no_rnd_pixels_tab[0] = just_return;
2942
    put_no_rnd_pixels_tab[1] = just_return;
2943
    put_no_rnd_pixels_tab[2] = just_return;
2944
    put_no_rnd_pixels_tab[3] = just_return;
2945

2946
    avg_pixels_tab[0] = just_return;
2947
    avg_pixels_tab[1] = just_return;
2948
    avg_pixels_tab[2] = just_return;
2949
    avg_pixels_tab[3] = just_return;
2950

2951
    avg_no_rnd_pixels_tab[0] = just_return;
2952
    avg_no_rnd_pixels_tab[1] = just_return;
2953
    avg_no_rnd_pixels_tab[2] = just_return;
2954
    avg_no_rnd_pixels_tab[3] = just_return;
2955

2956
    //av_fdct = just_return;
2957
    //ff_idct = just_return;
2958
#endif
2959
}