Statistics
| Branch: | Revision:

ffmpeg / libavcodec / x86 / dsputil_mmx.c @ f2a30bd8

History | View | Annotate | Download (124 KB)

1
/*
2
 * MMX optimized DSP utils
3
 * Copyright (c) 2000, 2001 Fabrice Bellard
4
 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5
 *
6
 * This file is part of FFmpeg.
7
 *
8
 * FFmpeg is free software; you can redistribute it and/or
9
 * modify it under the terms of the GNU Lesser General Public
10
 * License as published by the Free Software Foundation; either
11
 * version 2.1 of the License, or (at your option) any later version.
12
 *
13
 * FFmpeg is distributed in the hope that it will be useful,
14
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
16
 * Lesser General Public License for more details.
17
 *
18
 * You should have received a copy of the GNU Lesser General Public
19
 * License along with FFmpeg; if not, write to the Free Software
20
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21
 *
22
 * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23
 */
24

    
25
#include "libavutil/x86_cpu.h"
26
#include "libavcodec/dsputil.h"
27
#include "libavcodec/h264dsp.h"
28
#include "libavcodec/mpegvideo.h"
29
#include "libavcodec/simple_idct.h"
30
#include "dsputil_mmx.h"
31
#include "vp3dsp_mmx.h"
32
#include "vp3dsp_sse2.h"
33
#include "vp6dsp_mmx.h"
34
#include "vp6dsp_sse2.h"
35
#include "idct_xvid.h"
36

    
37
//#undef NDEBUG
38
//#include <assert.h>
39

    
40
int mm_flags; /* multimedia extension flags */
41

    
42
/* pixel operations */
43
DECLARE_ALIGNED(8,  const uint64_t, ff_bone) = 0x0101010101010101ULL;
44
DECLARE_ALIGNED(8,  const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
45

    
46
DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
47
{0x8000000080000000ULL, 0x8000000080000000ULL};
48

    
49
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_3  ) = 0x0003000300030003ULL;
50
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_4  ) = 0x0004000400040004ULL;
51
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
52
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
53
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
54
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
55
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
56
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
57
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
58
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
59
DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
60
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
61
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
62
DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
63

    
64
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1  ) = 0x0101010101010101ULL;
65
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_3  ) = {0x0303030303030303ULL, 0x0303030303030303ULL};
66
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_4  ) = {0x0404040404040404ULL, 0x0404040404040404ULL};
67
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
68
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
69
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
70
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_80 ) = {0x8080808080808080ULL, 0x8080808080808080ULL};
71
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
72
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
73
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_F8 ) = {0xF8F8F8F8F8F8F8F8ULL, 0xF8F8F8F8F8F8F8F8ULL};
74
DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
75
DECLARE_ALIGNED(16, const xmm_reg,  ff_pb_FE ) = {0xFEFEFEFEFEFEFEFEULL, 0xFEFEFEFEFEFEFEFEULL};
76

    
77
DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
78
DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
79

    
80
#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
81
#define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
82

    
83
#define MOVQ_BFE(regd) \
84
    __asm__ volatile ( \
85
    "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
86
    "paddb %%" #regd ", %%" #regd " \n\t" ::)
87

    
88
#ifndef PIC
89
#define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
90
#define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
91
#else
92
// for shared library it's better to use this way for accessing constants
93
// pcmpeqd -> -1
94
#define MOVQ_BONE(regd) \
95
    __asm__ volatile ( \
96
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
97
    "psrlw $15, %%" #regd " \n\t" \
98
    "packuswb %%" #regd ", %%" #regd " \n\t" ::)
99

    
100
#define MOVQ_WTWO(regd) \
101
    __asm__ volatile ( \
102
    "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
103
    "psrlw $15, %%" #regd " \n\t" \
104
    "psllw $1, %%" #regd " \n\t"::)
105

    
106
#endif
107

    
108
// using regr as temporary and for the output result
109
// first argument is unmodifed and second is trashed
110
// regfe is supposed to contain 0xfefefefefefefefe
111
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
112
    "movq " #rega ", " #regr "  \n\t"\
113
    "pand " #regb ", " #regr "  \n\t"\
114
    "pxor " #rega ", " #regb "  \n\t"\
115
    "pand " #regfe "," #regb "  \n\t"\
116
    "psrlq $1, " #regb "        \n\t"\
117
    "paddb " #regb ", " #regr " \n\t"
118

    
119
#define PAVGB_MMX(rega, regb, regr, regfe) \
120
    "movq " #rega ", " #regr "  \n\t"\
121
    "por  " #regb ", " #regr "  \n\t"\
122
    "pxor " #rega ", " #regb "  \n\t"\
123
    "pand " #regfe "," #regb "  \n\t"\
124
    "psrlq $1, " #regb "        \n\t"\
125
    "psubb " #regb ", " #regr " \n\t"
126

    
127
// mm6 is supposed to contain 0xfefefefefefefefe
128
#define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
129
    "movq " #rega ", " #regr "  \n\t"\
130
    "movq " #regc ", " #regp "  \n\t"\
131
    "pand " #regb ", " #regr "  \n\t"\
132
    "pand " #regd ", " #regp "  \n\t"\
133
    "pxor " #rega ", " #regb "  \n\t"\
134
    "pxor " #regc ", " #regd "  \n\t"\
135
    "pand %%mm6, " #regb "      \n\t"\
136
    "pand %%mm6, " #regd "      \n\t"\
137
    "psrlq $1, " #regb "        \n\t"\
138
    "psrlq $1, " #regd "        \n\t"\
139
    "paddb " #regb ", " #regr " \n\t"\
140
    "paddb " #regd ", " #regp " \n\t"
141

    
142
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
143
    "movq " #rega ", " #regr "  \n\t"\
144
    "movq " #regc ", " #regp "  \n\t"\
145
    "por  " #regb ", " #regr "  \n\t"\
146
    "por  " #regd ", " #regp "  \n\t"\
147
    "pxor " #rega ", " #regb "  \n\t"\
148
    "pxor " #regc ", " #regd "  \n\t"\
149
    "pand %%mm6, " #regb "      \n\t"\
150
    "pand %%mm6, " #regd "      \n\t"\
151
    "psrlq $1, " #regd "        \n\t"\
152
    "psrlq $1, " #regb "        \n\t"\
153
    "psubb " #regb ", " #regr " \n\t"\
154
    "psubb " #regd ", " #regp " \n\t"
155

    
156
/***********************************/
157
/* MMX no rounding */
158
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
159
#define SET_RND  MOVQ_WONE
160
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
161
#define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
162
#define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
163

    
164
#include "dsputil_mmx_rnd_template.c"
165

    
166
#undef DEF
167
#undef SET_RND
168
#undef PAVGBP
169
#undef PAVGB
170
/***********************************/
171
/* MMX rounding */
172

    
173
#define DEF(x, y) x ## _ ## y ##_mmx
174
#define SET_RND  MOVQ_WTWO
175
#define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
176
#define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
177

    
178
#include "dsputil_mmx_rnd_template.c"
179

    
180
#undef DEF
181
#undef SET_RND
182
#undef PAVGBP
183
#undef PAVGB
184
#undef OP_AVG
185

    
186
/***********************************/
187
/* 3Dnow specific */
188

    
189
#define DEF(x) x ## _3dnow
190
#define PAVGB "pavgusb"
191
#define OP_AVG PAVGB
192

    
193
#include "dsputil_mmx_avg_template.c"
194

    
195
#undef DEF
196
#undef PAVGB
197
#undef OP_AVG
198

    
199
/***********************************/
200
/* MMX2 specific */
201

    
202
#define DEF(x) x ## _mmx2
203

    
204
/* Introduced only in MMX2 set */
205
#define PAVGB "pavgb"
206
#define OP_AVG PAVGB
207

    
208
#include "dsputil_mmx_avg_template.c"
209

    
210
#undef DEF
211
#undef PAVGB
212
#undef OP_AVG
213

    
214
#define put_no_rnd_pixels16_mmx put_pixels16_mmx
215
#define put_no_rnd_pixels8_mmx put_pixels8_mmx
216
#define put_pixels16_mmx2 put_pixels16_mmx
217
#define put_pixels8_mmx2 put_pixels8_mmx
218
#define put_pixels4_mmx2 put_pixels4_mmx
219
#define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
220
#define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
221
#define put_pixels16_3dnow put_pixels16_mmx
222
#define put_pixels8_3dnow put_pixels8_mmx
223
#define put_pixels4_3dnow put_pixels4_mmx
224
#define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
225
#define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
226

    
227
/***********************************/
228
/* standard MMX */
229

    
230
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
231
{
232
    const DCTELEM *p;
233
    uint8_t *pix;
234

    
235
    /* read the pixels */
236
    p = block;
237
    pix = pixels;
238
    /* unrolled loop */
239
        __asm__ volatile(
240
                "movq   %3, %%mm0               \n\t"
241
                "movq   8%3, %%mm1              \n\t"
242
                "movq   16%3, %%mm2             \n\t"
243
                "movq   24%3, %%mm3             \n\t"
244
                "movq   32%3, %%mm4             \n\t"
245
                "movq   40%3, %%mm5             \n\t"
246
                "movq   48%3, %%mm6             \n\t"
247
                "movq   56%3, %%mm7             \n\t"
248
                "packuswb %%mm1, %%mm0          \n\t"
249
                "packuswb %%mm3, %%mm2          \n\t"
250
                "packuswb %%mm5, %%mm4          \n\t"
251
                "packuswb %%mm7, %%mm6          \n\t"
252
                "movq   %%mm0, (%0)             \n\t"
253
                "movq   %%mm2, (%0, %1)         \n\t"
254
                "movq   %%mm4, (%0, %1, 2)      \n\t"
255
                "movq   %%mm6, (%0, %2)         \n\t"
256
                ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
257
                :"memory");
258
        pix += line_size*4;
259
        p += 32;
260

    
261
    // if here would be an exact copy of the code above
262
    // compiler would generate some very strange code
263
    // thus using "r"
264
    __asm__ volatile(
265
            "movq       (%3), %%mm0             \n\t"
266
            "movq       8(%3), %%mm1            \n\t"
267
            "movq       16(%3), %%mm2           \n\t"
268
            "movq       24(%3), %%mm3           \n\t"
269
            "movq       32(%3), %%mm4           \n\t"
270
            "movq       40(%3), %%mm5           \n\t"
271
            "movq       48(%3), %%mm6           \n\t"
272
            "movq       56(%3), %%mm7           \n\t"
273
            "packuswb %%mm1, %%mm0              \n\t"
274
            "packuswb %%mm3, %%mm2              \n\t"
275
            "packuswb %%mm5, %%mm4              \n\t"
276
            "packuswb %%mm7, %%mm6              \n\t"
277
            "movq       %%mm0, (%0)             \n\t"
278
            "movq       %%mm2, (%0, %1)         \n\t"
279
            "movq       %%mm4, (%0, %1, 2)      \n\t"
280
            "movq       %%mm6, (%0, %2)         \n\t"
281
            ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
282
            :"memory");
283
}
284

    
285
DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
286
  { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
287

    
288
#define put_signed_pixels_clamped_mmx_half(off) \
289
            "movq    "#off"(%2), %%mm1          \n\t"\
290
            "movq 16+"#off"(%2), %%mm2          \n\t"\
291
            "movq 32+"#off"(%2), %%mm3          \n\t"\
292
            "movq 48+"#off"(%2), %%mm4          \n\t"\
293
            "packsswb  8+"#off"(%2), %%mm1      \n\t"\
294
            "packsswb 24+"#off"(%2), %%mm2      \n\t"\
295
            "packsswb 40+"#off"(%2), %%mm3      \n\t"\
296
            "packsswb 56+"#off"(%2), %%mm4      \n\t"\
297
            "paddb %%mm0, %%mm1                 \n\t"\
298
            "paddb %%mm0, %%mm2                 \n\t"\
299
            "paddb %%mm0, %%mm3                 \n\t"\
300
            "paddb %%mm0, %%mm4                 \n\t"\
301
            "movq %%mm1, (%0)                   \n\t"\
302
            "movq %%mm2, (%0, %3)               \n\t"\
303
            "movq %%mm3, (%0, %3, 2)            \n\t"\
304
            "movq %%mm4, (%0, %1)               \n\t"
305

    
306
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
307
{
308
    x86_reg line_skip = line_size;
309
    x86_reg line_skip3;
310

    
311
    __asm__ volatile (
312
            "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
313
            "lea (%3, %3, 2), %1                \n\t"
314
            put_signed_pixels_clamped_mmx_half(0)
315
            "lea (%0, %3, 4), %0                \n\t"
316
            put_signed_pixels_clamped_mmx_half(64)
317
            :"+&r" (pixels), "=&r" (line_skip3)
318
            :"r" (block), "r"(line_skip)
319
            :"memory");
320
}
321

    
322
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
323
{
324
    const DCTELEM *p;
325
    uint8_t *pix;
326
    int i;
327

    
328
    /* read the pixels */
329
    p = block;
330
    pix = pixels;
331
    MOVQ_ZERO(mm7);
332
    i = 4;
333
    do {
334
        __asm__ volatile(
335
                "movq   (%2), %%mm0     \n\t"
336
                "movq   8(%2), %%mm1    \n\t"
337
                "movq   16(%2), %%mm2   \n\t"
338
                "movq   24(%2), %%mm3   \n\t"
339
                "movq   %0, %%mm4       \n\t"
340
                "movq   %1, %%mm6       \n\t"
341
                "movq   %%mm4, %%mm5    \n\t"
342
                "punpcklbw %%mm7, %%mm4 \n\t"
343
                "punpckhbw %%mm7, %%mm5 \n\t"
344
                "paddsw %%mm4, %%mm0    \n\t"
345
                "paddsw %%mm5, %%mm1    \n\t"
346
                "movq   %%mm6, %%mm5    \n\t"
347
                "punpcklbw %%mm7, %%mm6 \n\t"
348
                "punpckhbw %%mm7, %%mm5 \n\t"
349
                "paddsw %%mm6, %%mm2    \n\t"
350
                "paddsw %%mm5, %%mm3    \n\t"
351
                "packuswb %%mm1, %%mm0  \n\t"
352
                "packuswb %%mm3, %%mm2  \n\t"
353
                "movq   %%mm0, %0       \n\t"
354
                "movq   %%mm2, %1       \n\t"
355
                :"+m"(*pix), "+m"(*(pix+line_size))
356
                :"r"(p)
357
                :"memory");
358
        pix += line_size*2;
359
        p += 16;
360
    } while (--i);
361
}
362

    
363
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
364
{
365
    __asm__ volatile(
366
         "lea (%3, %3), %%"REG_a"       \n\t"
367
         ASMALIGN(3)
368
         "1:                            \n\t"
369
         "movd (%1), %%mm0              \n\t"
370
         "movd (%1, %3), %%mm1          \n\t"
371
         "movd %%mm0, (%2)              \n\t"
372
         "movd %%mm1, (%2, %3)          \n\t"
373
         "add %%"REG_a", %1             \n\t"
374
         "add %%"REG_a", %2             \n\t"
375
         "movd (%1), %%mm0              \n\t"
376
         "movd (%1, %3), %%mm1          \n\t"
377
         "movd %%mm0, (%2)              \n\t"
378
         "movd %%mm1, (%2, %3)          \n\t"
379
         "add %%"REG_a", %1             \n\t"
380
         "add %%"REG_a", %2             \n\t"
381
         "subl $4, %0                   \n\t"
382
         "jnz 1b                        \n\t"
383
         : "+g"(h), "+r" (pixels),  "+r" (block)
384
         : "r"((x86_reg)line_size)
385
         : "%"REG_a, "memory"
386
        );
387
}
388

    
389
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
390
{
391
    __asm__ volatile(
392
         "lea (%3, %3), %%"REG_a"       \n\t"
393
         ASMALIGN(3)
394
         "1:                            \n\t"
395
         "movq (%1), %%mm0              \n\t"
396
         "movq (%1, %3), %%mm1          \n\t"
397
         "movq %%mm0, (%2)              \n\t"
398
         "movq %%mm1, (%2, %3)          \n\t"
399
         "add %%"REG_a", %1             \n\t"
400
         "add %%"REG_a", %2             \n\t"
401
         "movq (%1), %%mm0              \n\t"
402
         "movq (%1, %3), %%mm1          \n\t"
403
         "movq %%mm0, (%2)              \n\t"
404
         "movq %%mm1, (%2, %3)          \n\t"
405
         "add %%"REG_a", %1             \n\t"
406
         "add %%"REG_a", %2             \n\t"
407
         "subl $4, %0                   \n\t"
408
         "jnz 1b                        \n\t"
409
         : "+g"(h), "+r" (pixels),  "+r" (block)
410
         : "r"((x86_reg)line_size)
411
         : "%"REG_a, "memory"
412
        );
413
}
414

    
415
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
416
{
417
    __asm__ volatile(
418
         "lea (%3, %3), %%"REG_a"       \n\t"
419
         ASMALIGN(3)
420
         "1:                            \n\t"
421
         "movq (%1), %%mm0              \n\t"
422
         "movq 8(%1), %%mm4             \n\t"
423
         "movq (%1, %3), %%mm1          \n\t"
424
         "movq 8(%1, %3), %%mm5         \n\t"
425
         "movq %%mm0, (%2)              \n\t"
426
         "movq %%mm4, 8(%2)             \n\t"
427
         "movq %%mm1, (%2, %3)          \n\t"
428
         "movq %%mm5, 8(%2, %3)         \n\t"
429
         "add %%"REG_a", %1             \n\t"
430
         "add %%"REG_a", %2             \n\t"
431
         "movq (%1), %%mm0              \n\t"
432
         "movq 8(%1), %%mm4             \n\t"
433
         "movq (%1, %3), %%mm1          \n\t"
434
         "movq 8(%1, %3), %%mm5         \n\t"
435
         "movq %%mm0, (%2)              \n\t"
436
         "movq %%mm4, 8(%2)             \n\t"
437
         "movq %%mm1, (%2, %3)          \n\t"
438
         "movq %%mm5, 8(%2, %3)         \n\t"
439
         "add %%"REG_a", %1             \n\t"
440
         "add %%"REG_a", %2             \n\t"
441
         "subl $4, %0                   \n\t"
442
         "jnz 1b                        \n\t"
443
         : "+g"(h), "+r" (pixels),  "+r" (block)
444
         : "r"((x86_reg)line_size)
445
         : "%"REG_a, "memory"
446
        );
447
}
448

    
449
static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
450
{
451
    __asm__ volatile(
452
         "1:                            \n\t"
453
         "movdqu (%1), %%xmm0           \n\t"
454
         "movdqu (%1,%3), %%xmm1        \n\t"
455
         "movdqu (%1,%3,2), %%xmm2      \n\t"
456
         "movdqu (%1,%4), %%xmm3        \n\t"
457
         "movdqa %%xmm0, (%2)           \n\t"
458
         "movdqa %%xmm1, (%2,%3)        \n\t"
459
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
460
         "movdqa %%xmm3, (%2,%4)        \n\t"
461
         "subl $4, %0                   \n\t"
462
         "lea (%1,%3,4), %1             \n\t"
463
         "lea (%2,%3,4), %2             \n\t"
464
         "jnz 1b                        \n\t"
465
         : "+g"(h), "+r" (pixels),  "+r" (block)
466
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
467
         : "memory"
468
        );
469
}
470

    
471
static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
472
{
473
    __asm__ volatile(
474
         "1:                            \n\t"
475
         "movdqu (%1), %%xmm0           \n\t"
476
         "movdqu (%1,%3), %%xmm1        \n\t"
477
         "movdqu (%1,%3,2), %%xmm2      \n\t"
478
         "movdqu (%1,%4), %%xmm3        \n\t"
479
         "pavgb  (%2), %%xmm0           \n\t"
480
         "pavgb  (%2,%3), %%xmm1        \n\t"
481
         "pavgb  (%2,%3,2), %%xmm2      \n\t"
482
         "pavgb  (%2,%4), %%xmm3        \n\t"
483
         "movdqa %%xmm0, (%2)           \n\t"
484
         "movdqa %%xmm1, (%2,%3)        \n\t"
485
         "movdqa %%xmm2, (%2,%3,2)      \n\t"
486
         "movdqa %%xmm3, (%2,%4)        \n\t"
487
         "subl $4, %0                   \n\t"
488
         "lea (%1,%3,4), %1             \n\t"
489
         "lea (%2,%3,4), %2             \n\t"
490
         "jnz 1b                        \n\t"
491
         : "+g"(h), "+r" (pixels),  "+r" (block)
492
         : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
493
         : "memory"
494
        );
495
}
496

    
497
#define CLEAR_BLOCKS(name,n) \
498
static void name(DCTELEM *blocks)\
499
{\
500
    __asm__ volatile(\
501
                "pxor %%mm7, %%mm7              \n\t"\
502
                "mov     %1, %%"REG_a"          \n\t"\
503
                "1:                             \n\t"\
504
                "movq %%mm7, (%0, %%"REG_a")    \n\t"\
505
                "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
506
                "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
507
                "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
508
                "add $32, %%"REG_a"             \n\t"\
509
                " js 1b                         \n\t"\
510
                : : "r" (((uint8_t *)blocks)+128*n),\
511
                    "i" (-128*n)\
512
                : "%"REG_a\
513
        );\
514
}
515
CLEAR_BLOCKS(clear_blocks_mmx, 6)
516
CLEAR_BLOCKS(clear_block_mmx, 1)
517

    
518
static void clear_block_sse(DCTELEM *block)
519
{
520
    __asm__ volatile(
521
        "xorps  %%xmm0, %%xmm0  \n"
522
        "movaps %%xmm0,    (%0) \n"
523
        "movaps %%xmm0,  16(%0) \n"
524
        "movaps %%xmm0,  32(%0) \n"
525
        "movaps %%xmm0,  48(%0) \n"
526
        "movaps %%xmm0,  64(%0) \n"
527
        "movaps %%xmm0,  80(%0) \n"
528
        "movaps %%xmm0,  96(%0) \n"
529
        "movaps %%xmm0, 112(%0) \n"
530
        :: "r"(block)
531
        : "memory"
532
    );
533
}
534

    
535
static void clear_blocks_sse(DCTELEM *blocks)
536
{\
537
    __asm__ volatile(
538
        "xorps  %%xmm0, %%xmm0  \n"
539
        "mov     %1, %%"REG_a"  \n"
540
        "1:                     \n"
541
        "movaps %%xmm0,    (%0, %%"REG_a") \n"
542
        "movaps %%xmm0,  16(%0, %%"REG_a") \n"
543
        "movaps %%xmm0,  32(%0, %%"REG_a") \n"
544
        "movaps %%xmm0,  48(%0, %%"REG_a") \n"
545
        "movaps %%xmm0,  64(%0, %%"REG_a") \n"
546
        "movaps %%xmm0,  80(%0, %%"REG_a") \n"
547
        "movaps %%xmm0,  96(%0, %%"REG_a") \n"
548
        "movaps %%xmm0, 112(%0, %%"REG_a") \n"
549
        "add $128, %%"REG_a"    \n"
550
        " js 1b                 \n"
551
        : : "r" (((uint8_t *)blocks)+128*6),
552
            "i" (-128*6)
553
        : "%"REG_a
554
    );
555
}
556

    
557
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
558
    x86_reg i=0;
559
    __asm__ volatile(
560
        "jmp 2f                         \n\t"
561
        "1:                             \n\t"
562
        "movq  (%1, %0), %%mm0          \n\t"
563
        "movq  (%2, %0), %%mm1          \n\t"
564
        "paddb %%mm0, %%mm1             \n\t"
565
        "movq %%mm1, (%2, %0)           \n\t"
566
        "movq 8(%1, %0), %%mm0          \n\t"
567
        "movq 8(%2, %0), %%mm1          \n\t"
568
        "paddb %%mm0, %%mm1             \n\t"
569
        "movq %%mm1, 8(%2, %0)          \n\t"
570
        "add $16, %0                    \n\t"
571
        "2:                             \n\t"
572
        "cmp %3, %0                     \n\t"
573
        " js 1b                         \n\t"
574
        : "+r" (i)
575
        : "r"(src), "r"(dst), "r"((x86_reg)w-15)
576
    );
577
    for(; i<w; i++)
578
        dst[i+0] += src[i+0];
579
}
580

    
581
static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
582
    x86_reg i=0;
583
    __asm__ volatile(
584
        "jmp 2f                         \n\t"
585
        "1:                             \n\t"
586
        "movq   (%2, %0), %%mm0         \n\t"
587
        "movq  8(%2, %0), %%mm1         \n\t"
588
        "paddb  (%3, %0), %%mm0         \n\t"
589
        "paddb 8(%3, %0), %%mm1         \n\t"
590
        "movq %%mm0,  (%1, %0)          \n\t"
591
        "movq %%mm1, 8(%1, %0)          \n\t"
592
        "add $16, %0                    \n\t"
593
        "2:                             \n\t"
594
        "cmp %4, %0                     \n\t"
595
        " js 1b                         \n\t"
596
        : "+r" (i)
597
        : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
598
    );
599
    for(; i<w; i++)
600
        dst[i] = src1[i] + src2[i];
601
}
602

    
603
#if HAVE_7REGS && HAVE_TEN_OPERANDS
604
static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
605
    x86_reg w2 = -w;
606
    x86_reg x;
607
    int l = *left & 0xff;
608
    int tl = *left_top & 0xff;
609
    int t;
610
    __asm__ volatile(
611
        "mov    %7, %3 \n"
612
        "1: \n"
613
        "movzx (%3,%4), %2 \n"
614
        "mov    %2, %k3 \n"
615
        "sub   %b1, %b3 \n"
616
        "add   %b0, %b3 \n"
617
        "mov    %2, %1 \n"
618
        "cmp    %0, %2 \n"
619
        "cmovg  %0, %2 \n"
620
        "cmovg  %1, %0 \n"
621
        "cmp   %k3, %0 \n"
622
        "cmovg %k3, %0 \n"
623
        "mov    %7, %3 \n"
624
        "cmp    %2, %0 \n"
625
        "cmovl  %2, %0 \n"
626
        "add (%6,%4), %b0 \n"
627
        "mov   %b0, (%5,%4) \n"
628
        "inc    %4 \n"
629
        "jl 1b \n"
630
        :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
631
        :"r"(dst+w), "r"(diff+w), "rm"(top+w)
632
    );
633
    *left = l;
634
    *left_top = tl;
635
}
636
#endif
637

    
638
#define H263_LOOP_FILTER \
639
        "pxor %%mm7, %%mm7              \n\t"\
640
        "movq  %0, %%mm0                \n\t"\
641
        "movq  %0, %%mm1                \n\t"\
642
        "movq  %3, %%mm2                \n\t"\
643
        "movq  %3, %%mm3                \n\t"\
644
        "punpcklbw %%mm7, %%mm0         \n\t"\
645
        "punpckhbw %%mm7, %%mm1         \n\t"\
646
        "punpcklbw %%mm7, %%mm2         \n\t"\
647
        "punpckhbw %%mm7, %%mm3         \n\t"\
648
        "psubw %%mm2, %%mm0             \n\t"\
649
        "psubw %%mm3, %%mm1             \n\t"\
650
        "movq  %1, %%mm2                \n\t"\
651
        "movq  %1, %%mm3                \n\t"\
652
        "movq  %2, %%mm4                \n\t"\
653
        "movq  %2, %%mm5                \n\t"\
654
        "punpcklbw %%mm7, %%mm2         \n\t"\
655
        "punpckhbw %%mm7, %%mm3         \n\t"\
656
        "punpcklbw %%mm7, %%mm4         \n\t"\
657
        "punpckhbw %%mm7, %%mm5         \n\t"\
658
        "psubw %%mm2, %%mm4             \n\t"\
659
        "psubw %%mm3, %%mm5             \n\t"\
660
        "psllw $2, %%mm4                \n\t"\
661
        "psllw $2, %%mm5                \n\t"\
662
        "paddw %%mm0, %%mm4             \n\t"\
663
        "paddw %%mm1, %%mm5             \n\t"\
664
        "pxor %%mm6, %%mm6              \n\t"\
665
        "pcmpgtw %%mm4, %%mm6           \n\t"\
666
        "pcmpgtw %%mm5, %%mm7           \n\t"\
667
        "pxor %%mm6, %%mm4              \n\t"\
668
        "pxor %%mm7, %%mm5              \n\t"\
669
        "psubw %%mm6, %%mm4             \n\t"\
670
        "psubw %%mm7, %%mm5             \n\t"\
671
        "psrlw $3, %%mm4                \n\t"\
672
        "psrlw $3, %%mm5                \n\t"\
673
        "packuswb %%mm5, %%mm4          \n\t"\
674
        "packsswb %%mm7, %%mm6          \n\t"\
675
        "pxor %%mm7, %%mm7              \n\t"\
676
        "movd %4, %%mm2                 \n\t"\
677
        "punpcklbw %%mm2, %%mm2         \n\t"\
678
        "punpcklbw %%mm2, %%mm2         \n\t"\
679
        "punpcklbw %%mm2, %%mm2         \n\t"\
680
        "psubusb %%mm4, %%mm2           \n\t"\
681
        "movq %%mm2, %%mm3              \n\t"\
682
        "psubusb %%mm4, %%mm3           \n\t"\
683
        "psubb %%mm3, %%mm2             \n\t"\
684
        "movq %1, %%mm3                 \n\t"\
685
        "movq %2, %%mm4                 \n\t"\
686
        "pxor %%mm6, %%mm3              \n\t"\
687
        "pxor %%mm6, %%mm4              \n\t"\
688
        "paddusb %%mm2, %%mm3           \n\t"\
689
        "psubusb %%mm2, %%mm4           \n\t"\
690
        "pxor %%mm6, %%mm3              \n\t"\
691
        "pxor %%mm6, %%mm4              \n\t"\
692
        "paddusb %%mm2, %%mm2           \n\t"\
693
        "packsswb %%mm1, %%mm0          \n\t"\
694
        "pcmpgtb %%mm0, %%mm7           \n\t"\
695
        "pxor %%mm7, %%mm0              \n\t"\
696
        "psubb %%mm7, %%mm0             \n\t"\
697
        "movq %%mm0, %%mm1              \n\t"\
698
        "psubusb %%mm2, %%mm0           \n\t"\
699
        "psubb %%mm0, %%mm1             \n\t"\
700
        "pand %5, %%mm1                 \n\t"\
701
        "psrlw $2, %%mm1                \n\t"\
702
        "pxor %%mm7, %%mm1              \n\t"\
703
        "psubb %%mm7, %%mm1             \n\t"\
704
        "movq %0, %%mm5                 \n\t"\
705
        "movq %3, %%mm6                 \n\t"\
706
        "psubb %%mm1, %%mm5             \n\t"\
707
        "paddb %%mm1, %%mm6             \n\t"
708

    
709
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
710
    if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
711
    const int strength= ff_h263_loop_filter_strength[qscale];
712

    
713
    __asm__ volatile(
714

    
715
        H263_LOOP_FILTER
716

    
717
        "movq %%mm3, %1                 \n\t"
718
        "movq %%mm4, %2                 \n\t"
719
        "movq %%mm5, %0                 \n\t"
720
        "movq %%mm6, %3                 \n\t"
721
        : "+m" (*(uint64_t*)(src - 2*stride)),
722
          "+m" (*(uint64_t*)(src - 1*stride)),
723
          "+m" (*(uint64_t*)(src + 0*stride)),
724
          "+m" (*(uint64_t*)(src + 1*stride))
725
        : "g" (2*strength), "m"(ff_pb_FC)
726
    );
727
    }
728
}
729

    
730
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
731
    __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
732
        "movd  %4, %%mm0                \n\t"
733
        "movd  %5, %%mm1                \n\t"
734
        "movd  %6, %%mm2                \n\t"
735
        "movd  %7, %%mm3                \n\t"
736
        "punpcklbw %%mm1, %%mm0         \n\t"
737
        "punpcklbw %%mm3, %%mm2         \n\t"
738
        "movq %%mm0, %%mm1              \n\t"
739
        "punpcklwd %%mm2, %%mm0         \n\t"
740
        "punpckhwd %%mm2, %%mm1         \n\t"
741
        "movd  %%mm0, %0                \n\t"
742
        "punpckhdq %%mm0, %%mm0         \n\t"
743
        "movd  %%mm0, %1                \n\t"
744
        "movd  %%mm1, %2                \n\t"
745
        "punpckhdq %%mm1, %%mm1         \n\t"
746
        "movd  %%mm1, %3                \n\t"
747

    
748
        : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
749
          "=m" (*(uint32_t*)(dst + 1*dst_stride)),
750
          "=m" (*(uint32_t*)(dst + 2*dst_stride)),
751
          "=m" (*(uint32_t*)(dst + 3*dst_stride))
752
        :  "m" (*(uint32_t*)(src + 0*src_stride)),
753
           "m" (*(uint32_t*)(src + 1*src_stride)),
754
           "m" (*(uint32_t*)(src + 2*src_stride)),
755
           "m" (*(uint32_t*)(src + 3*src_stride))
756
    );
757
}
758

    
759
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
760
    if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
761
    const int strength= ff_h263_loop_filter_strength[qscale];
762
    DECLARE_ALIGNED(8, uint64_t, temp)[4];
763
    uint8_t *btemp= (uint8_t*)temp;
764

    
765
    src -= 2;
766

    
767
    transpose4x4(btemp  , src           , 8, stride);
768
    transpose4x4(btemp+4, src + 4*stride, 8, stride);
769
    __asm__ volatile(
770
        H263_LOOP_FILTER // 5 3 4 6
771

    
772
        : "+m" (temp[0]),
773
          "+m" (temp[1]),
774
          "+m" (temp[2]),
775
          "+m" (temp[3])
776
        : "g" (2*strength), "m"(ff_pb_FC)
777
    );
778

    
779
    __asm__ volatile(
780
        "movq %%mm5, %%mm1              \n\t"
781
        "movq %%mm4, %%mm0              \n\t"
782
        "punpcklbw %%mm3, %%mm5         \n\t"
783
        "punpcklbw %%mm6, %%mm4         \n\t"
784
        "punpckhbw %%mm3, %%mm1         \n\t"
785
        "punpckhbw %%mm6, %%mm0         \n\t"
786
        "movq %%mm5, %%mm3              \n\t"
787
        "movq %%mm1, %%mm6              \n\t"
788
        "punpcklwd %%mm4, %%mm5         \n\t"
789
        "punpcklwd %%mm0, %%mm1         \n\t"
790
        "punpckhwd %%mm4, %%mm3         \n\t"
791
        "punpckhwd %%mm0, %%mm6         \n\t"
792
        "movd %%mm5, (%0)               \n\t"
793
        "punpckhdq %%mm5, %%mm5         \n\t"
794
        "movd %%mm5, (%0,%2)            \n\t"
795
        "movd %%mm3, (%0,%2,2)          \n\t"
796
        "punpckhdq %%mm3, %%mm3         \n\t"
797
        "movd %%mm3, (%0,%3)            \n\t"
798
        "movd %%mm1, (%1)               \n\t"
799
        "punpckhdq %%mm1, %%mm1         \n\t"
800
        "movd %%mm1, (%1,%2)            \n\t"
801
        "movd %%mm6, (%1,%2,2)          \n\t"
802
        "punpckhdq %%mm6, %%mm6         \n\t"
803
        "movd %%mm6, (%1,%3)            \n\t"
804
        :: "r" (src),
805
           "r" (src + 4*stride),
806
           "r" ((x86_reg)   stride ),
807
           "r" ((x86_reg)(3*stride))
808
    );
809
    }
810
}
811

    
812
/* draw the edges of width 'w' of an image of size width, height
813
   this mmx version can only handle w==8 || w==16 */
814
static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
815
{
816
    uint8_t *ptr, *last_line;
817
    int i;
818

    
819
    last_line = buf + (height - 1) * wrap;
820
    /* left and right */
821
    ptr = buf;
822
    if(w==8)
823
    {
824
        __asm__ volatile(
825
                "1:                             \n\t"
826
                "movd (%0), %%mm0               \n\t"
827
                "punpcklbw %%mm0, %%mm0         \n\t"
828
                "punpcklwd %%mm0, %%mm0         \n\t"
829
                "punpckldq %%mm0, %%mm0         \n\t"
830
                "movq %%mm0, -8(%0)             \n\t"
831
                "movq -8(%0, %2), %%mm1         \n\t"
832
                "punpckhbw %%mm1, %%mm1         \n\t"
833
                "punpckhwd %%mm1, %%mm1         \n\t"
834
                "punpckhdq %%mm1, %%mm1         \n\t"
835
                "movq %%mm1, (%0, %2)           \n\t"
836
                "add %1, %0                     \n\t"
837
                "cmp %3, %0                     \n\t"
838
                " jb 1b                         \n\t"
839
                : "+r" (ptr)
840
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
841
        );
842
    }
843
    else
844
    {
845
        __asm__ volatile(
846
                "1:                             \n\t"
847
                "movd (%0), %%mm0               \n\t"
848
                "punpcklbw %%mm0, %%mm0         \n\t"
849
                "punpcklwd %%mm0, %%mm0         \n\t"
850
                "punpckldq %%mm0, %%mm0         \n\t"
851
                "movq %%mm0, -8(%0)             \n\t"
852
                "movq %%mm0, -16(%0)            \n\t"
853
                "movq -8(%0, %2), %%mm1         \n\t"
854
                "punpckhbw %%mm1, %%mm1         \n\t"
855
                "punpckhwd %%mm1, %%mm1         \n\t"
856
                "punpckhdq %%mm1, %%mm1         \n\t"
857
                "movq %%mm1, (%0, %2)           \n\t"
858
                "movq %%mm1, 8(%0, %2)          \n\t"
859
                "add %1, %0                     \n\t"
860
                "cmp %3, %0                     \n\t"
861
                " jb 1b                         \n\t"
862
                : "+r" (ptr)
863
                : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
864
        );
865
    }
866

    
867
    for(i=0;i<w;i+=4) {
868
        /* top and bottom (and hopefully also the corners) */
869
        ptr= buf - (i + 1) * wrap - w;
870
        __asm__ volatile(
871
                "1:                             \n\t"
872
                "movq (%1, %0), %%mm0           \n\t"
873
                "movq %%mm0, (%0)               \n\t"
874
                "movq %%mm0, (%0, %2)           \n\t"
875
                "movq %%mm0, (%0, %2, 2)        \n\t"
876
                "movq %%mm0, (%0, %3)           \n\t"
877
                "add $8, %0                     \n\t"
878
                "cmp %4, %0                     \n\t"
879
                " jb 1b                         \n\t"
880
                : "+r" (ptr)
881
                : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
882
        );
883
        ptr= last_line + (i + 1) * wrap - w;
884
        __asm__ volatile(
885
                "1:                             \n\t"
886
                "movq (%1, %0), %%mm0           \n\t"
887
                "movq %%mm0, (%0)               \n\t"
888
                "movq %%mm0, (%0, %2)           \n\t"
889
                "movq %%mm0, (%0, %2, 2)        \n\t"
890
                "movq %%mm0, (%0, %3)           \n\t"
891
                "add $8, %0                     \n\t"
892
                "cmp %4, %0                     \n\t"
893
                " jb 1b                         \n\t"
894
                : "+r" (ptr)
895
                : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
896
        );
897
    }
898
}
899

    
900
#define PAETH(cpu, abs3)\
901
static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
902
{\
903
    x86_reg i = -bpp;\
904
    x86_reg end = w-3;\
905
    __asm__ volatile(\
906
        "pxor      %%mm7, %%mm7 \n"\
907
        "movd    (%1,%0), %%mm0 \n"\
908
        "movd    (%2,%0), %%mm1 \n"\
909
        "punpcklbw %%mm7, %%mm0 \n"\
910
        "punpcklbw %%mm7, %%mm1 \n"\
911
        "add       %4, %0 \n"\
912
        "1: \n"\
913
        "movq      %%mm1, %%mm2 \n"\
914
        "movd    (%2,%0), %%mm1 \n"\
915
        "movq      %%mm2, %%mm3 \n"\
916
        "punpcklbw %%mm7, %%mm1 \n"\
917
        "movq      %%mm2, %%mm4 \n"\
918
        "psubw     %%mm1, %%mm3 \n"\
919
        "psubw     %%mm0, %%mm4 \n"\
920
        "movq      %%mm3, %%mm5 \n"\
921
        "paddw     %%mm4, %%mm5 \n"\
922
        abs3\
923
        "movq      %%mm4, %%mm6 \n"\
924
        "pminsw    %%mm5, %%mm6 \n"\
925
        "pcmpgtw   %%mm6, %%mm3 \n"\
926
        "pcmpgtw   %%mm5, %%mm4 \n"\
927
        "movq      %%mm4, %%mm6 \n"\
928
        "pand      %%mm3, %%mm4 \n"\
929
        "pandn     %%mm3, %%mm6 \n"\
930
        "pandn     %%mm0, %%mm3 \n"\
931
        "movd    (%3,%0), %%mm0 \n"\
932
        "pand      %%mm1, %%mm6 \n"\
933
        "pand      %%mm4, %%mm2 \n"\
934
        "punpcklbw %%mm7, %%mm0 \n"\
935
        "movq      %6,    %%mm5 \n"\
936
        "paddw     %%mm6, %%mm0 \n"\
937
        "paddw     %%mm2, %%mm3 \n"\
938
        "paddw     %%mm3, %%mm0 \n"\
939
        "pand      %%mm5, %%mm0 \n"\
940
        "movq      %%mm0, %%mm3 \n"\
941
        "packuswb  %%mm3, %%mm3 \n"\
942
        "movd      %%mm3, (%1,%0) \n"\
943
        "add       %4, %0 \n"\
944
        "cmp       %5, %0 \n"\
945
        "jle 1b \n"\
946
        :"+r"(i)\
947
        :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
948
         "m"(ff_pw_255)\
949
        :"memory"\
950
    );\
951
}
952

    
953
#define ABS3_MMX2\
954
        "psubw     %%mm5, %%mm7 \n"\
955
        "pmaxsw    %%mm7, %%mm5 \n"\
956
        "pxor      %%mm6, %%mm6 \n"\
957
        "pxor      %%mm7, %%mm7 \n"\
958
        "psubw     %%mm3, %%mm6 \n"\
959
        "psubw     %%mm4, %%mm7 \n"\
960
        "pmaxsw    %%mm6, %%mm3 \n"\
961
        "pmaxsw    %%mm7, %%mm4 \n"\
962
        "pxor      %%mm7, %%mm7 \n"
963

    
964
#define ABS3_SSSE3\
965
        "pabsw     %%mm3, %%mm3 \n"\
966
        "pabsw     %%mm4, %%mm4 \n"\
967
        "pabsw     %%mm5, %%mm5 \n"
968

    
969
PAETH(mmx2, ABS3_MMX2)
970
#if HAVE_SSSE3
971
PAETH(ssse3, ABS3_SSSE3)
972
#endif
973

    
974
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
975
        "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
976
        "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
977
        "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
978
        "movq "#in7", " #m3 "             \n\t" /* d */\
979
        "movq "#in0", %%mm5               \n\t" /* D */\
980
        "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
981
        "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
982
        "movq "#in1", %%mm5               \n\t" /* C */\
983
        "movq "#in2", %%mm6               \n\t" /* B */\
984
        "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
985
        "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
986
        "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
987
        "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
988
        "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
989
        "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
990
        "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
991
        "psraw $5, %%mm5                  \n\t"\
992
        "packuswb %%mm5, %%mm5            \n\t"\
993
        OP(%%mm5, out, %%mm7, d)
994

    
995
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
996
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
997
    uint64_t temp;\
998
\
999
    __asm__ volatile(\
1000
        "pxor %%mm7, %%mm7                \n\t"\
1001
        "1:                               \n\t"\
1002
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1003
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1004
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1005
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1006
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1007
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1008
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1009
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1010
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1011
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1012
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1013
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1014
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1015
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1016
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1017
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1018
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1019
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1020
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1021
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1022
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1023
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1024
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1025
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1026
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1027
        "paddw %6, %%mm6                  \n\t"\
1028
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1029
        "psraw $5, %%mm0                  \n\t"\
1030
        "movq %%mm0, %5                   \n\t"\
1031
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1032
        \
1033
        "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
1034
        "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
1035
        "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
1036
        "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
1037
        "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
1038
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
1039
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
1040
        "paddw %%mm0, %%mm2               \n\t" /* b */\
1041
        "paddw %%mm5, %%mm3               \n\t" /* c */\
1042
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1043
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1044
        "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
1045
        "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
1046
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
1047
        "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
1048
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1049
        "paddw %%mm2, %%mm1               \n\t" /* a */\
1050
        "paddw %%mm6, %%mm4               \n\t" /* d */\
1051
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1052
        "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
1053
        "paddw %6, %%mm1                  \n\t"\
1054
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
1055
        "psraw $5, %%mm3                  \n\t"\
1056
        "movq %5, %%mm1                   \n\t"\
1057
        "packuswb %%mm3, %%mm1            \n\t"\
1058
        OP_MMX2(%%mm1, (%1),%%mm4, q)\
1059
        /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1060
        \
1061
        "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
1062
        "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
1063
        "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
1064
        "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
1065
        "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
1066
        "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
1067
        "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
1068
        "paddw %%mm1, %%mm5               \n\t" /* b */\
1069
        "paddw %%mm4, %%mm0               \n\t" /* c */\
1070
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1071
        "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
1072
        "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
1073
        "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
1074
        "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
1075
        "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
1076
        "paddw %%mm3, %%mm2               \n\t" /* d */\
1077
        "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
1078
        "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
1079
        "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
1080
        "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
1081
        "paddw %%mm2, %%mm6               \n\t" /* a */\
1082
        "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1083
        "paddw %6, %%mm0                  \n\t"\
1084
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1085
        "psraw $5, %%mm0                  \n\t"\
1086
        /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1087
        \
1088
        "paddw %%mm5, %%mm3               \n\t" /* a */\
1089
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
1090
        "paddw %%mm4, %%mm6               \n\t" /* b */\
1091
        "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
1092
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
1093
        "paddw %%mm1, %%mm4               \n\t" /* c */\
1094
        "paddw %%mm2, %%mm5               \n\t" /* d */\
1095
        "paddw %%mm6, %%mm6               \n\t" /* 2b */\
1096
        "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
1097
        "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1098
        "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
1099
        "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
1100
        "paddw %6, %%mm4                  \n\t"\
1101
        "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
1102
        "psraw $5, %%mm4                  \n\t"\
1103
        "packuswb %%mm4, %%mm0            \n\t"\
1104
        OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1105
        \
1106
        "add %3, %0                       \n\t"\
1107
        "add %4, %1                       \n\t"\
1108
        "decl %2                          \n\t"\
1109
        " jnz 1b                          \n\t"\
1110
        : "+a"(src), "+c"(dst), "+D"(h)\
1111
        : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1112
        : "memory"\
1113
    );\
1114
}\
1115
\
1116
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1117
    int i;\
1118
    int16_t temp[16];\
1119
    /* quick HACK, XXX FIXME MUST be optimized */\
1120
    for(i=0; i<h; i++)\
1121
    {\
1122
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1123
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1124
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1125
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1126
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1127
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1128
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1129
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1130
        temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1131
        temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1132
        temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1133
        temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1134
        temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1135
        temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1136
        temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1137
        temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1138
        __asm__ volatile(\
1139
            "movq (%0), %%mm0               \n\t"\
1140
            "movq 8(%0), %%mm1              \n\t"\
1141
            "paddw %2, %%mm0                \n\t"\
1142
            "paddw %2, %%mm1                \n\t"\
1143
            "psraw $5, %%mm0                \n\t"\
1144
            "psraw $5, %%mm1                \n\t"\
1145
            "packuswb %%mm1, %%mm0          \n\t"\
1146
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1147
            "movq 16(%0), %%mm0             \n\t"\
1148
            "movq 24(%0), %%mm1             \n\t"\
1149
            "paddw %2, %%mm0                \n\t"\
1150
            "paddw %2, %%mm1                \n\t"\
1151
            "psraw $5, %%mm0                \n\t"\
1152
            "psraw $5, %%mm1                \n\t"\
1153
            "packuswb %%mm1, %%mm0          \n\t"\
1154
            OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1155
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1156
            : "memory"\
1157
        );\
1158
        dst+=dstStride;\
1159
        src+=srcStride;\
1160
    }\
1161
}\
1162
\
1163
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1164
    __asm__ volatile(\
1165
        "pxor %%mm7, %%mm7                \n\t"\
1166
        "1:                               \n\t"\
1167
        "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
1168
        "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
1169
        "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
1170
        "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
1171
        "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
1172
        "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
1173
        "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
1174
        "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
1175
        "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
1176
        "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
1177
        "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
1178
        "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
1179
        "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
1180
        "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
1181
        "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
1182
        "paddw %%mm3, %%mm5               \n\t" /* b */\
1183
        "paddw %%mm2, %%mm6               \n\t" /* c */\
1184
        "paddw %%mm5, %%mm5               \n\t" /* 2b */\
1185
        "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
1186
        "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
1187
        "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
1188
        "paddw %%mm4, %%mm0               \n\t" /* a */\
1189
        "paddw %%mm1, %%mm5               \n\t" /* d */\
1190
        "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1191
        "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
1192
        "paddw %5, %%mm6                  \n\t"\
1193
        "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
1194
        "psraw $5, %%mm0                  \n\t"\
1195
        /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1196
        \
1197
        "movd 5(%0), %%mm5                \n\t" /* FGHI */\
1198
        "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
1199
        "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
1200
        "paddw %%mm5, %%mm1               \n\t" /* a */\
1201
        "paddw %%mm6, %%mm2               \n\t" /* b */\
1202
        "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
1203
        "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
1204
        "paddw %%mm6, %%mm3               \n\t" /* c */\
1205
        "paddw %%mm5, %%mm4               \n\t" /* d */\
1206
        "paddw %%mm2, %%mm2               \n\t" /* 2b */\
1207
        "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
1208
        "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1209
        "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
1210
        "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
1211
        "paddw %5, %%mm1                  \n\t"\
1212
        "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
1213
        "psraw $5, %%mm3                  \n\t"\
1214
        "packuswb %%mm3, %%mm0            \n\t"\
1215
        OP_MMX2(%%mm0, (%1), %%mm4, q)\
1216
        \
1217
        "add %3, %0                       \n\t"\
1218
        "add %4, %1                       \n\t"\
1219
        "decl %2                          \n\t"\
1220
        " jnz 1b                          \n\t"\
1221
        : "+a"(src), "+c"(dst), "+d"(h)\
1222
        : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
1223
        : "memory"\
1224
    );\
1225
}\
1226
\
1227
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1228
    int i;\
1229
    int16_t temp[8];\
1230
    /* quick HACK, XXX FIXME MUST be optimized */\
1231
    for(i=0; i<h; i++)\
1232
    {\
1233
        temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1234
        temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1235
        temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1236
        temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1237
        temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1238
        temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1239
        temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1240
        temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1241
        __asm__ volatile(\
1242
            "movq (%0), %%mm0           \n\t"\
1243
            "movq 8(%0), %%mm1          \n\t"\
1244
            "paddw %2, %%mm0            \n\t"\
1245
            "paddw %2, %%mm1            \n\t"\
1246
            "psraw $5, %%mm0            \n\t"\
1247
            "psraw $5, %%mm1            \n\t"\
1248
            "packuswb %%mm1, %%mm0      \n\t"\
1249
            OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1250
            :: "r"(temp), "r"(dst), "m"(ROUNDER)\
1251
            :"memory"\
1252
        );\
1253
        dst+=dstStride;\
1254
        src+=srcStride;\
1255
    }\
1256
}
1257

    
1258
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
1259
\
1260
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1261
    uint64_t temp[17*4];\
1262
    uint64_t *temp_ptr= temp;\
1263
    int count= 17;\
1264
\
1265
    /*FIXME unroll */\
1266
    __asm__ volatile(\
1267
        "pxor %%mm7, %%mm7              \n\t"\
1268
        "1:                             \n\t"\
1269
        "movq (%0), %%mm0               \n\t"\
1270
        "movq (%0), %%mm1               \n\t"\
1271
        "movq 8(%0), %%mm2              \n\t"\
1272
        "movq 8(%0), %%mm3              \n\t"\
1273
        "punpcklbw %%mm7, %%mm0         \n\t"\
1274
        "punpckhbw %%mm7, %%mm1         \n\t"\
1275
        "punpcklbw %%mm7, %%mm2         \n\t"\
1276
        "punpckhbw %%mm7, %%mm3         \n\t"\
1277
        "movq %%mm0, (%1)               \n\t"\
1278
        "movq %%mm1, 17*8(%1)           \n\t"\
1279
        "movq %%mm2, 2*17*8(%1)         \n\t"\
1280
        "movq %%mm3, 3*17*8(%1)         \n\t"\
1281
        "add $8, %1                     \n\t"\
1282
        "add %3, %0                     \n\t"\
1283
        "decl %2                        \n\t"\
1284
        " jnz 1b                        \n\t"\
1285
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1286
        : "r" ((x86_reg)srcStride)\
1287
        : "memory"\
1288
    );\
1289
    \
1290
    temp_ptr= temp;\
1291
    count=4;\
1292
    \
1293
/*FIXME reorder for speed */\
1294
    __asm__ volatile(\
1295
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1296
        "1:                             \n\t"\
1297
        "movq (%0), %%mm0               \n\t"\
1298
        "movq 8(%0), %%mm1              \n\t"\
1299
        "movq 16(%0), %%mm2             \n\t"\
1300
        "movq 24(%0), %%mm3             \n\t"\
1301
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1302
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1303
        "add %4, %1                     \n\t"\
1304
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1305
        \
1306
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1307
        "add %4, %1                     \n\t"\
1308
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1309
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
1310
        "add %4, %1                     \n\t"\
1311
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
1312
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
1313
        "add %4, %1                     \n\t"\
1314
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
1315
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
1316
        "add %4, %1                     \n\t"\
1317
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
1318
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
1319
        "add %4, %1                     \n\t"\
1320
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
1321
        \
1322
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
1323
        "add %4, %1                     \n\t"  \
1324
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
1325
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
1326
        \
1327
        "add $136, %0                   \n\t"\
1328
        "add %6, %1                     \n\t"\
1329
        "decl %2                        \n\t"\
1330
        " jnz 1b                        \n\t"\
1331
        \
1332
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1333
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
1334
        :"memory"\
1335
    );\
1336
}\
1337
\
1338
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
1339
    uint64_t temp[9*2];\
1340
    uint64_t *temp_ptr= temp;\
1341
    int count= 9;\
1342
\
1343
    /*FIXME unroll */\
1344
    __asm__ volatile(\
1345
        "pxor %%mm7, %%mm7              \n\t"\
1346
        "1:                             \n\t"\
1347
        "movq (%0), %%mm0               \n\t"\
1348
        "movq (%0), %%mm1               \n\t"\
1349
        "punpcklbw %%mm7, %%mm0         \n\t"\
1350
        "punpckhbw %%mm7, %%mm1         \n\t"\
1351
        "movq %%mm0, (%1)               \n\t"\
1352
        "movq %%mm1, 9*8(%1)            \n\t"\
1353
        "add $8, %1                     \n\t"\
1354
        "add %3, %0                     \n\t"\
1355
        "decl %2                        \n\t"\
1356
        " jnz 1b                        \n\t"\
1357
        : "+r" (src), "+r" (temp_ptr), "+r"(count)\
1358
        : "r" ((x86_reg)srcStride)\
1359
        : "memory"\
1360
    );\
1361
    \
1362
    temp_ptr= temp;\
1363
    count=2;\
1364
    \
1365
/*FIXME reorder for speed */\
1366
    __asm__ volatile(\
1367
        /*"pxor %%mm7, %%mm7              \n\t"*/\
1368
        "1:                             \n\t"\
1369
        "movq (%0), %%mm0               \n\t"\
1370
        "movq 8(%0), %%mm1              \n\t"\
1371
        "movq 16(%0), %%mm2             \n\t"\
1372
        "movq 24(%0), %%mm3             \n\t"\
1373
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
1374
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
1375
        "add %4, %1                     \n\t"\
1376
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
1377
        \
1378
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
1379
        "add %4, %1                     \n\t"\
1380
        QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
1381
        \
1382
        QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
1383
        "add %4, %1                     \n\t"\
1384
        QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
1385
        QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
1386
                \
1387
        "add $72, %0                    \n\t"\
1388
        "add %6, %1                     \n\t"\
1389
        "decl %2                        \n\t"\
1390
        " jnz 1b                        \n\t"\
1391
         \
1392
        : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
1393
        : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
1394
        : "memory"\
1395
   );\
1396
}\
1397
\
1398
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1399
    OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
1400
}\
1401
\
1402
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1403
    uint64_t temp[8];\
1404
    uint8_t * const half= (uint8_t*)temp;\
1405
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1406
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1407
}\
1408
\
1409
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1410
    OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
1411
}\
1412
\
1413
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1414
    uint64_t temp[8];\
1415
    uint8_t * const half= (uint8_t*)temp;\
1416
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
1417
    OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
1418
}\
1419
\
1420
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1421
    uint64_t temp[8];\
1422
    uint8_t * const half= (uint8_t*)temp;\
1423
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1424
    OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
1425
}\
1426
\
1427
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1428
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
1429
}\
1430
\
1431
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1432
    uint64_t temp[8];\
1433
    uint8_t * const half= (uint8_t*)temp;\
1434
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
1435
    OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
1436
}\
1437
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1438
    uint64_t half[8 + 9];\
1439
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1440
    uint8_t * const halfHV= ((uint8_t*)half);\
1441
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1442
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1443
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1444
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1445
}\
1446
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1447
    uint64_t half[8 + 9];\
1448
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1449
    uint8_t * const halfHV= ((uint8_t*)half);\
1450
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1451
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1452
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1453
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1454
}\
1455
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1456
    uint64_t half[8 + 9];\
1457
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1458
    uint8_t * const halfHV= ((uint8_t*)half);\
1459
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1460
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1461
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1462
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1463
}\
1464
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1465
    uint64_t half[8 + 9];\
1466
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1467
    uint8_t * const halfHV= ((uint8_t*)half);\
1468
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1469
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1470
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1471
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1472
}\
1473
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1474
    uint64_t half[8 + 9];\
1475
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1476
    uint8_t * const halfHV= ((uint8_t*)half);\
1477
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1478
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1479
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
1480
}\
1481
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1482
    uint64_t half[8 + 9];\
1483
    uint8_t * const halfH= ((uint8_t*)half) + 64;\
1484
    uint8_t * const halfHV= ((uint8_t*)half);\
1485
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1486
    put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
1487
    OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
1488
}\
1489
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1490
    uint64_t half[8 + 9];\
1491
    uint8_t * const halfH= ((uint8_t*)half);\
1492
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1493
    put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
1494
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1495
}\
1496
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1497
    uint64_t half[8 + 9];\
1498
    uint8_t * const halfH= ((uint8_t*)half);\
1499
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1500
    put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
1501
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1502
}\
1503
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1504
    uint64_t half[9];\
1505
    uint8_t * const halfH= ((uint8_t*)half);\
1506
    put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
1507
    OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
1508
}\
1509
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
1510
    OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
1511
}\
1512
\
1513
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1514
    uint64_t temp[32];\
1515
    uint8_t * const half= (uint8_t*)temp;\
1516
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1517
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1518
}\
1519
\
1520
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1521
    OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
1522
}\
1523
\
1524
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1525
    uint64_t temp[32];\
1526
    uint8_t * const half= (uint8_t*)temp;\
1527
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
1528
    OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
1529
}\
1530
\
1531
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1532
    uint64_t temp[32];\
1533
    uint8_t * const half= (uint8_t*)temp;\
1534
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1535
    OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
1536
}\
1537
\
1538
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1539
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
1540
}\
1541
\
1542
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1543
    uint64_t temp[32];\
1544
    uint8_t * const half= (uint8_t*)temp;\
1545
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
1546
    OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
1547
}\
1548
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1549
    uint64_t half[16*2 + 17*2];\
1550
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1551
    uint8_t * const halfHV= ((uint8_t*)half);\
1552
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1553
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1554
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1555
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1556
}\
1557
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1558
    uint64_t half[16*2 + 17*2];\
1559
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1560
    uint8_t * const halfHV= ((uint8_t*)half);\
1561
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1562
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1563
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1564
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1565
}\
1566
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1567
    uint64_t half[16*2 + 17*2];\
1568
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1569
    uint8_t * const halfHV= ((uint8_t*)half);\
1570
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1571
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1572
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1573
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1574
}\
1575
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1576
    uint64_t half[16*2 + 17*2];\
1577
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1578
    uint8_t * const halfHV= ((uint8_t*)half);\
1579
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1580
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1581
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1582
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1583
}\
1584
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1585
    uint64_t half[16*2 + 17*2];\
1586
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1587
    uint8_t * const halfHV= ((uint8_t*)half);\
1588
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1589
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1590
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
1591
}\
1592
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1593
    uint64_t half[16*2 + 17*2];\
1594
    uint8_t * const halfH= ((uint8_t*)half) + 256;\
1595
    uint8_t * const halfHV= ((uint8_t*)half);\
1596
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1597
    put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
1598
    OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
1599
}\
1600
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1601
    uint64_t half[17*2];\
1602
    uint8_t * const halfH= ((uint8_t*)half);\
1603
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1604
    put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
1605
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1606
}\
1607
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1608
    uint64_t half[17*2];\
1609
    uint8_t * const halfH= ((uint8_t*)half);\
1610
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1611
    put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
1612
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1613
}\
1614
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1615
    uint64_t half[17*2];\
1616
    uint8_t * const halfH= ((uint8_t*)half);\
1617
    put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
1618
    OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
1619
}
1620

    
1621
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
1622
#define AVG_3DNOW_OP(a,b,temp, size) \
1623
"mov" #size " " #b ", " #temp "   \n\t"\
1624
"pavgusb " #temp ", " #a "        \n\t"\
1625
"mov" #size " " #a ", " #b "      \n\t"
1626
#define AVG_MMX2_OP(a,b,temp, size) \
1627
"mov" #size " " #b ", " #temp "   \n\t"\
1628
"pavgb " #temp ", " #a "          \n\t"\
1629
"mov" #size " " #a ", " #b "      \n\t"
1630

    
1631
QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
1632
QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
1633
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
1634
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
1635
QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
1636
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
1637
QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
1638
QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
1639
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
1640

    
1641
/***********************************/
1642
/* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
1643

    
1644
#define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
1645
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1646
    OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
1647
}
1648
#define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
1649
static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1650
    OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
1651
}
1652

    
1653
#define QPEL_2TAP(OPNAME, SIZE, MMX)\
1654
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
1655
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
1656
QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
1657
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
1658
                          OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
1659
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
1660
                          OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
1661
static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
1662
                          OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
1663
static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1664
    OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
1665
}\
1666
static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
1667
    OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
1668
}\
1669
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
1670
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
1671
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
1672
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
1673
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
1674
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
1675
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
1676
QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
1677

    
1678
QPEL_2TAP(put_, 16, mmx2)
1679
QPEL_2TAP(avg_, 16, mmx2)
1680
QPEL_2TAP(put_,  8, mmx2)
1681
QPEL_2TAP(avg_,  8, mmx2)
1682
QPEL_2TAP(put_, 16, 3dnow)
1683
QPEL_2TAP(avg_, 16, 3dnow)
1684
QPEL_2TAP(put_,  8, 3dnow)
1685
QPEL_2TAP(avg_,  8, 3dnow)
1686

    
1687

    
1688
#if 0
1689
static void just_return(void) { return; }
1690
#endif
1691

    
1692
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
1693
                    int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
1694
    const int w = 8;
1695
    const int ix = ox>>(16+shift);
1696
    const int iy = oy>>(16+shift);
1697
    const int oxs = ox>>4;
1698
    const int oys = oy>>4;
1699
    const int dxxs = dxx>>4;
1700
    const int dxys = dxy>>4;
1701
    const int dyxs = dyx>>4;
1702
    const int dyys = dyy>>4;
1703
    const uint16_t r4[4] = {r,r,r,r};
1704
    const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
1705
    const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
1706
    const uint64_t shift2 = 2*shift;
1707
    uint8_t edge_buf[(h+1)*stride];
1708
    int x, y;
1709

    
1710
    const int dxw = (dxx-(1<<(16+shift)))*(w-1);
1711
    const int dyh = (dyy-(1<<(16+shift)))*(h-1);
1712
    const int dxh = dxy*(h-1);
1713
    const int dyw = dyx*(w-1);
1714
    if( // non-constant fullpel offset (3% of blocks)
1715
        ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
1716
         (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
1717
        // uses more than 16 bits of subpel mv (only at huge resolution)
1718
        || (dxx|dxy|dyx|dyy)&15 )
1719
    {
1720
        //FIXME could still use mmx for some of the rows
1721
        ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
1722
        return;
1723
    }
1724

    
1725
    src += ix + iy*stride;
1726
    if( (unsigned)ix >= width-w ||
1727
        (unsigned)iy >= height-h )
1728
    {
1729
        ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
1730
        src = edge_buf;
1731
    }
1732

    
1733
    __asm__ volatile(
1734
        "movd         %0, %%mm6 \n\t"
1735
        "pxor      %%mm7, %%mm7 \n\t"
1736
        "punpcklwd %%mm6, %%mm6 \n\t"
1737
        "punpcklwd %%mm6, %%mm6 \n\t"
1738
        :: "r"(1<<shift)
1739
    );
1740

    
1741
    for(x=0; x<w; x+=4){
1742
        uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
1743
                            oxs - dxys + dxxs*(x+1),
1744
                            oxs - dxys + dxxs*(x+2),
1745
                            oxs - dxys + dxxs*(x+3) };
1746
        uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
1747
                            oys - dyys + dyxs*(x+1),
1748
                            oys - dyys + dyxs*(x+2),
1749
                            oys - dyys + dyxs*(x+3) };
1750

    
1751
        for(y=0; y<h; y++){
1752
            __asm__ volatile(
1753
                "movq   %0,  %%mm4 \n\t"
1754
                "movq   %1,  %%mm5 \n\t"
1755
                "paddw  %2,  %%mm4 \n\t"
1756
                "paddw  %3,  %%mm5 \n\t"
1757
                "movq   %%mm4, %0  \n\t"
1758
                "movq   %%mm5, %1  \n\t"
1759
                "psrlw  $12, %%mm4 \n\t"
1760
                "psrlw  $12, %%mm5 \n\t"
1761
                : "+m"(*dx4), "+m"(*dy4)
1762
                : "m"(*dxy4), "m"(*dyy4)
1763
            );
1764

    
1765
            __asm__ volatile(
1766
                "movq   %%mm6, %%mm2 \n\t"
1767
                "movq   %%mm6, %%mm1 \n\t"
1768
                "psubw  %%mm4, %%mm2 \n\t"
1769
                "psubw  %%mm5, %%mm1 \n\t"
1770
                "movq   %%mm2, %%mm0 \n\t"
1771
                "movq   %%mm4, %%mm3 \n\t"
1772
                "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
1773
                "pmullw %%mm5, %%mm3 \n\t" // dx*dy
1774
                "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
1775
                "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
1776

    
1777
                "movd   %4,    %%mm5 \n\t"
1778
                "movd   %3,    %%mm4 \n\t"
1779
                "punpcklbw %%mm7, %%mm5 \n\t"
1780
                "punpcklbw %%mm7, %%mm4 \n\t"
1781
                "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
1782
                "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
1783

    
1784
                "movd   %2,    %%mm5 \n\t"
1785
                "movd   %1,    %%mm4 \n\t"
1786
                "punpcklbw %%mm7, %%mm5 \n\t"
1787
                "punpcklbw %%mm7, %%mm4 \n\t"
1788
                "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
1789
                "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
1790
                "paddw  %5,    %%mm1 \n\t"
1791
                "paddw  %%mm3, %%mm2 \n\t"
1792
                "paddw  %%mm1, %%mm0 \n\t"
1793
                "paddw  %%mm2, %%mm0 \n\t"
1794

    
1795
                "psrlw    %6,    %%mm0 \n\t"
1796
                "packuswb %%mm0, %%mm0 \n\t"
1797
                "movd     %%mm0, %0    \n\t"
1798

    
1799
                : "=m"(dst[x+y*stride])
1800
                : "m"(src[0]), "m"(src[1]),
1801
                  "m"(src[stride]), "m"(src[stride+1]),
1802
                  "m"(*r4), "m"(shift2)
1803
            );
1804
            src += stride;
1805
        }
1806
        src += 4-h*stride;
1807
    }
1808
}
1809

    
1810
#define PREFETCH(name, op) \
1811
static void name(void *mem, int stride, int h){\
1812
    const uint8_t *p= mem;\
1813
    do{\
1814
        __asm__ volatile(#op" %0" :: "m"(*p));\
1815
        p+= stride;\
1816
    }while(--h);\
1817
}
1818
PREFETCH(prefetch_mmx2,  prefetcht0)
1819
PREFETCH(prefetch_3dnow, prefetch)
1820
#undef PREFETCH
1821

    
1822
#include "h264dsp_mmx.c"
1823
#include "rv40dsp_mmx.c"
1824

    
1825
/* CAVS specific */
1826
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1827
    put_pixels8_mmx(dst, src, stride, 8);
1828
}
1829
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1830
    avg_pixels8_mmx(dst, src, stride, 8);
1831
}
1832
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1833
    put_pixels16_mmx(dst, src, stride, 16);
1834
}
1835
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
1836
    avg_pixels16_mmx(dst, src, stride, 16);
1837
}
1838

    
1839
/* VC1 specific */
1840
void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1841
    put_pixels8_mmx(dst, src, stride, 8);
1842
}
1843
void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
1844
    avg_pixels8_mmx2(dst, src, stride, 8);
1845
}
1846

    
1847
/* XXX: those functions should be suppressed ASAP when all IDCTs are
1848
   converted */
1849
#if CONFIG_GPL
1850
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1851
{
1852
    ff_mmx_idct (block);
1853
    put_pixels_clamped_mmx(block, dest, line_size);
1854
}
1855
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1856
{
1857
    ff_mmx_idct (block);
1858
    add_pixels_clamped_mmx(block, dest, line_size);
1859
}
1860
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
1861
{
1862
    ff_mmxext_idct (block);
1863
    put_pixels_clamped_mmx(block, dest, line_size);
1864
}
1865
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
1866
{
1867
    ff_mmxext_idct (block);
1868
    add_pixels_clamped_mmx(block, dest, line_size);
1869
}
1870
#endif
1871
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
1872
{
1873
    ff_idct_xvid_mmx (block);
1874
    put_pixels_clamped_mmx(block, dest, line_size);
1875
}
1876
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
1877
{
1878
    ff_idct_xvid_mmx (block);
1879
    add_pixels_clamped_mmx(block, dest, line_size);
1880
}
1881
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
1882
{
1883
    ff_idct_xvid_mmx2 (block);
1884
    put_pixels_clamped_mmx(block, dest, line_size);
1885
}
1886
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
1887
{
1888
    ff_idct_xvid_mmx2 (block);
1889
    add_pixels_clamped_mmx(block, dest, line_size);
1890
}
1891

    
1892
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
1893
{
1894
    int i;
1895
    __asm__ volatile("pxor %%mm7, %%mm7":);
1896
    for(i=0; i<blocksize; i+=2) {
1897
        __asm__ volatile(
1898
            "movq    %0,    %%mm0 \n\t"
1899
            "movq    %1,    %%mm1 \n\t"
1900
            "movq    %%mm0, %%mm2 \n\t"
1901
            "movq    %%mm1, %%mm3 \n\t"
1902
            "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
1903
            "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
1904
            "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
1905
            "pxor    %%mm2, %%mm1 \n\t"
1906
            "movq    %%mm3, %%mm4 \n\t"
1907
            "pand    %%mm1, %%mm3 \n\t"
1908
            "pandn   %%mm1, %%mm4 \n\t"
1909
            "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1910
            "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1911
            "movq    %%mm3, %1    \n\t"
1912
            "movq    %%mm0, %0    \n\t"
1913
            :"+m"(mag[i]), "+m"(ang[i])
1914
            ::"memory"
1915
        );
1916
    }
1917
    __asm__ volatile("femms");
1918
}
1919
static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
1920
{
1921
    int i;
1922

    
1923
    __asm__ volatile(
1924
            "movaps  %0,     %%xmm5 \n\t"
1925
        ::"m"(ff_pdw_80000000[0])
1926
    );
1927
    for(i=0; i<blocksize; i+=4) {
1928
        __asm__ volatile(
1929
            "movaps  %0,     %%xmm0 \n\t"
1930
            "movaps  %1,     %%xmm1 \n\t"
1931
            "xorps   %%xmm2, %%xmm2 \n\t"
1932
            "xorps   %%xmm3, %%xmm3 \n\t"
1933
            "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
1934
            "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
1935
            "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
1936
            "xorps   %%xmm2, %%xmm1 \n\t"
1937
            "movaps  %%xmm3, %%xmm4 \n\t"
1938
            "andps   %%xmm1, %%xmm3 \n\t"
1939
            "andnps  %%xmm1, %%xmm4 \n\t"
1940
            "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
1941
            "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
1942
            "movaps  %%xmm3, %1     \n\t"
1943
            "movaps  %%xmm0, %0     \n\t"
1944
            :"+m"(mag[i]), "+m"(ang[i])
1945
            ::"memory"
1946
        );
1947
    }
1948
}
1949

    
1950
#define IF1(x) x
1951
#define IF0(x)
1952

    
1953
#define MIX5(mono,stereo)\
1954
    __asm__ volatile(\
1955
        "movss          0(%2), %%xmm5 \n"\
1956
        "movss          8(%2), %%xmm6 \n"\
1957
        "movss         24(%2), %%xmm7 \n"\
1958
        "shufps    $0, %%xmm5, %%xmm5 \n"\
1959
        "shufps    $0, %%xmm6, %%xmm6 \n"\
1960
        "shufps    $0, %%xmm7, %%xmm7 \n"\
1961
        "1: \n"\
1962
        "movaps       (%0,%1), %%xmm0 \n"\
1963
        "movaps  0x400(%0,%1), %%xmm1 \n"\
1964
        "movaps  0x800(%0,%1), %%xmm2 \n"\
1965
        "movaps  0xc00(%0,%1), %%xmm3 \n"\
1966
        "movaps 0x1000(%0,%1), %%xmm4 \n"\
1967
        "mulps         %%xmm5, %%xmm0 \n"\
1968
        "mulps         %%xmm6, %%xmm1 \n"\
1969
        "mulps         %%xmm5, %%xmm2 \n"\
1970
        "mulps         %%xmm7, %%xmm3 \n"\
1971
        "mulps         %%xmm7, %%xmm4 \n"\
1972
 stereo("addps         %%xmm1, %%xmm0 \n")\
1973
        "addps         %%xmm1, %%xmm2 \n"\
1974
        "addps         %%xmm3, %%xmm0 \n"\
1975
        "addps         %%xmm4, %%xmm2 \n"\
1976
   mono("addps         %%xmm2, %%xmm0 \n")\
1977
        "movaps  %%xmm0,      (%0,%1) \n"\
1978
 stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
1979
        "add $16, %0 \n"\
1980
        "jl 1b \n"\
1981
        :"+&r"(i)\
1982
        :"r"(samples[0]+len), "r"(matrix)\
1983
        :"memory"\
1984
    );
1985

    
1986
#define MIX_MISC(stereo)\
1987
    __asm__ volatile(\
1988
        "1: \n"\
1989
        "movaps  (%3,%0), %%xmm0 \n"\
1990
 stereo("movaps   %%xmm0, %%xmm1 \n")\
1991
        "mulps    %%xmm6, %%xmm0 \n"\
1992
 stereo("mulps    %%xmm7, %%xmm1 \n")\
1993
        "lea 1024(%3,%0), %1 \n"\
1994
        "mov %5, %2 \n"\
1995
        "2: \n"\
1996
        "movaps   (%1),   %%xmm2 \n"\
1997
 stereo("movaps   %%xmm2, %%xmm3 \n")\
1998
        "mulps   (%4,%2), %%xmm2 \n"\
1999
 stereo("mulps 16(%4,%2), %%xmm3 \n")\
2000
        "addps    %%xmm2, %%xmm0 \n"\
2001
 stereo("addps    %%xmm3, %%xmm1 \n")\
2002
        "add $1024, %1 \n"\
2003
        "add $32, %2 \n"\
2004
        "jl 2b \n"\
2005
        "movaps   %%xmm0,     (%3,%0) \n"\
2006
 stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
2007
        "add $16, %0 \n"\
2008
        "jl 1b \n"\
2009
        :"+&r"(i), "=&r"(j), "=&r"(k)\
2010
        :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
2011
        :"memory"\
2012
    );
2013

    
2014
static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
2015
{
2016
    int (*matrix_cmp)[2] = (int(*)[2])matrix;
2017
    intptr_t i,j,k;
2018

    
2019
    i = -len*sizeof(float);
2020
    if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
2021
        MIX5(IF0,IF1);
2022
    } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
2023
        MIX5(IF1,IF0);
2024
    } else {
2025
        DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
2026
        j = 2*in_ch*sizeof(float);
2027
        __asm__ volatile(
2028
            "1: \n"
2029
            "sub $8, %0 \n"
2030
            "movss     (%2,%0), %%xmm6 \n"
2031
            "movss    4(%2,%0), %%xmm7 \n"
2032
            "shufps $0, %%xmm6, %%xmm6 \n"
2033
            "shufps $0, %%xmm7, %%xmm7 \n"
2034
            "movaps %%xmm6,   (%1,%0,4) \n"
2035
            "movaps %%xmm7, 16(%1,%0,4) \n"
2036
            "jg 1b \n"
2037
            :"+&r"(j)
2038
            :"r"(matrix_simd), "r"(matrix)
2039
            :"memory"
2040
        );
2041
        if(out_ch == 2) {
2042
            MIX_MISC(IF1);
2043
        } else {
2044
            MIX_MISC(IF0);
2045
        }
2046
    }
2047
}
2048

    
2049
static void vector_fmul_3dnow(float *dst, const float *src, int len){
2050
    x86_reg i = (len-4)*4;
2051
    __asm__ volatile(
2052
        "1: \n\t"
2053
        "movq    (%1,%0), %%mm0 \n\t"
2054
        "movq   8(%1,%0), %%mm1 \n\t"
2055
        "pfmul   (%2,%0), %%mm0 \n\t"
2056
        "pfmul  8(%2,%0), %%mm1 \n\t"
2057
        "movq   %%mm0,  (%1,%0) \n\t"
2058
        "movq   %%mm1, 8(%1,%0) \n\t"
2059
        "sub  $16, %0 \n\t"
2060
        "jge 1b \n\t"
2061
        "femms  \n\t"
2062
        :"+r"(i)
2063
        :"r"(dst), "r"(src)
2064
        :"memory"
2065
    );
2066
}
2067
static void vector_fmul_sse(float *dst, const float *src, int len){
2068
    x86_reg i = (len-8)*4;
2069
    __asm__ volatile(
2070
        "1: \n\t"
2071
        "movaps    (%1,%0), %%xmm0 \n\t"
2072
        "movaps  16(%1,%0), %%xmm1 \n\t"
2073
        "mulps     (%2,%0), %%xmm0 \n\t"
2074
        "mulps   16(%2,%0), %%xmm1 \n\t"
2075
        "movaps  %%xmm0,   (%1,%0) \n\t"
2076
        "movaps  %%xmm1, 16(%1,%0) \n\t"
2077
        "sub  $32, %0 \n\t"
2078
        "jge 1b \n\t"
2079
        :"+r"(i)
2080
        :"r"(dst), "r"(src)
2081
        :"memory"
2082
    );
2083
}
2084

    
2085
static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
2086
    x86_reg i = len*4-16;
2087
    __asm__ volatile(
2088
        "1: \n\t"
2089
        "pswapd   8(%1), %%mm0 \n\t"
2090
        "pswapd    (%1), %%mm1 \n\t"
2091
        "pfmul  (%3,%0), %%mm0 \n\t"
2092
        "pfmul 8(%3,%0), %%mm1 \n\t"
2093
        "movq  %%mm0,  (%2,%0) \n\t"
2094
        "movq  %%mm1, 8(%2,%0) \n\t"
2095
        "add   $16, %1 \n\t"
2096
        "sub   $16, %0 \n\t"
2097
        "jge   1b \n\t"
2098
        :"+r"(i), "+r"(src1)
2099
        :"r"(dst), "r"(src0)
2100
    );
2101
    __asm__ volatile("femms");
2102
}
2103
static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
2104
    x86_reg i = len*4-32;
2105
    __asm__ volatile(
2106
        "1: \n\t"
2107
        "movaps        16(%1), %%xmm0 \n\t"
2108
        "movaps          (%1), %%xmm1 \n\t"
2109
        "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
2110
        "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
2111
        "mulps        (%3,%0), %%xmm0 \n\t"
2112
        "mulps      16(%3,%0), %%xmm1 \n\t"
2113
        "movaps     %%xmm0,   (%2,%0) \n\t"
2114
        "movaps     %%xmm1, 16(%2,%0) \n\t"
2115
        "add    $32, %1 \n\t"
2116
        "sub    $32, %0 \n\t"
2117
        "jge    1b \n\t"
2118
        :"+r"(i), "+r"(src1)
2119
        :"r"(dst), "r"(src0)
2120
    );
2121
}
2122

    
2123
static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
2124
                                  const float *src2, int len){
2125
    x86_reg i = (len-4)*4;
2126
    __asm__ volatile(
2127
        "1: \n\t"
2128
        "movq    (%2,%0), %%mm0 \n\t"
2129
        "movq   8(%2,%0), %%mm1 \n\t"
2130
        "pfmul   (%3,%0), %%mm0 \n\t"
2131
        "pfmul  8(%3,%0), %%mm1 \n\t"
2132
        "pfadd   (%4,%0), %%mm0 \n\t"
2133
        "pfadd  8(%4,%0), %%mm1 \n\t"
2134
        "movq  %%mm0,   (%1,%0) \n\t"
2135
        "movq  %%mm1,  8(%1,%0) \n\t"
2136
        "sub  $16, %0 \n\t"
2137
        "jge  1b \n\t"
2138
        :"+r"(i)
2139
        :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2140
        :"memory"
2141
    );
2142
    __asm__ volatile("femms");
2143
}
2144
static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
2145
                                const float *src2, int len){
2146
    x86_reg i = (len-8)*4;
2147
    __asm__ volatile(
2148
        "1: \n\t"
2149
        "movaps   (%2,%0), %%xmm0 \n\t"
2150
        "movaps 16(%2,%0), %%xmm1 \n\t"
2151
        "mulps    (%3,%0), %%xmm0 \n\t"
2152
        "mulps  16(%3,%0), %%xmm1 \n\t"
2153
        "addps    (%4,%0), %%xmm0 \n\t"
2154
        "addps  16(%4,%0), %%xmm1 \n\t"
2155
        "movaps %%xmm0,   (%1,%0) \n\t"
2156
        "movaps %%xmm1, 16(%1,%0) \n\t"
2157
        "sub  $32, %0 \n\t"
2158
        "jge  1b \n\t"
2159
        :"+r"(i)
2160
        :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
2161
        :"memory"
2162
    );
2163
}
2164

    
2165
static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
2166
                                      const float *win, float add_bias, int len){
2167
#if HAVE_6REGS
2168
    if(add_bias == 0){
2169
        x86_reg i = -len*4;
2170
        x86_reg j = len*4-8;
2171
        __asm__ volatile(
2172
            "1: \n"
2173
            "pswapd  (%5,%1), %%mm1 \n"
2174
            "movq    (%5,%0), %%mm0 \n"
2175
            "pswapd  (%4,%1), %%mm5 \n"
2176
            "movq    (%3,%0), %%mm4 \n"
2177
            "movq      %%mm0, %%mm2 \n"
2178
            "movq      %%mm1, %%mm3 \n"
2179
            "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
2180
            "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
2181
            "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
2182
            "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
2183
            "pfadd     %%mm3, %%mm2 \n"
2184
            "pfsub     %%mm0, %%mm1 \n"
2185
            "pswapd    %%mm2, %%mm2 \n"
2186
            "movq      %%mm1, (%2,%0) \n"
2187
            "movq      %%mm2, (%2,%1) \n"
2188
            "sub $8, %1 \n"
2189
            "add $8, %0 \n"
2190
            "jl 1b \n"
2191
            "femms \n"
2192
            :"+r"(i), "+r"(j)
2193
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2194
        );
2195
    }else
2196
#endif
2197
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2198
}
2199

    
2200
static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
2201
                                   const float *win, float add_bias, int len){
2202
#if HAVE_6REGS
2203
    if(add_bias == 0){
2204
        x86_reg i = -len*4;
2205
        x86_reg j = len*4-16;
2206
        __asm__ volatile(
2207
            "1: \n"
2208
            "movaps       (%5,%1), %%xmm1 \n"
2209
            "movaps       (%5,%0), %%xmm0 \n"
2210
            "movaps       (%4,%1), %%xmm5 \n"
2211
            "movaps       (%3,%0), %%xmm4 \n"
2212
            "shufps $0x1b, %%xmm1, %%xmm1 \n"
2213
            "shufps $0x1b, %%xmm5, %%xmm5 \n"
2214
            "movaps        %%xmm0, %%xmm2 \n"
2215
            "movaps        %%xmm1, %%xmm3 \n"
2216
            "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
2217
            "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
2218
            "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
2219
            "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
2220
            "addps         %%xmm3, %%xmm2 \n"
2221
            "subps         %%xmm0, %%xmm1 \n"
2222
            "shufps $0x1b, %%xmm2, %%xmm2 \n"
2223
            "movaps        %%xmm1, (%2,%0) \n"
2224
            "movaps        %%xmm2, (%2,%1) \n"
2225
            "sub $16, %1 \n"
2226
            "add $16, %0 \n"
2227
            "jl 1b \n"
2228
            :"+r"(i), "+r"(j)
2229
            :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
2230
        );
2231
    }else
2232
#endif
2233
        ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
2234
}
2235

    
2236
static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
2237
{
2238
    x86_reg i = -4*len;
2239
    __asm__ volatile(
2240
        "movss  %3, %%xmm4 \n"
2241
        "shufps $0, %%xmm4, %%xmm4 \n"
2242
        "1: \n"
2243
        "cvtpi2ps   (%2,%0), %%xmm0 \n"
2244
        "cvtpi2ps  8(%2,%0), %%xmm1 \n"
2245
        "cvtpi2ps 16(%2,%0), %%xmm2 \n"
2246
        "cvtpi2ps 24(%2,%0), %%xmm3 \n"
2247
        "movlhps  %%xmm1,    %%xmm0 \n"
2248
        "movlhps  %%xmm3,    %%xmm2 \n"
2249
        "mulps    %%xmm4,    %%xmm0 \n"
2250
        "mulps    %%xmm4,    %%xmm2 \n"
2251
        "movaps   %%xmm0,   (%1,%0) \n"
2252
        "movaps   %%xmm2, 16(%1,%0) \n"
2253
        "add $32, %0 \n"
2254
        "jl 1b \n"
2255
        :"+r"(i)
2256
        :"r"(dst+len), "r"(src+len), "m"(mul)
2257
    );
2258
}
2259

    
2260
static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
2261
{
2262
    x86_reg i = -4*len;
2263
    __asm__ volatile(
2264
        "movss  %3, %%xmm4 \n"
2265
        "shufps $0, %%xmm4, %%xmm4 \n"
2266
        "1: \n"
2267
        "cvtdq2ps   (%2,%0), %%xmm0 \n"
2268
        "cvtdq2ps 16(%2,%0), %%xmm1 \n"
2269
        "mulps    %%xmm4,    %%xmm0 \n"
2270
        "mulps    %%xmm4,    %%xmm1 \n"
2271
        "movaps   %%xmm0,   (%1,%0) \n"
2272
        "movaps   %%xmm1, 16(%1,%0) \n"
2273
        "add $32, %0 \n"
2274
        "jl 1b \n"
2275
        :"+r"(i)
2276
        :"r"(dst+len), "r"(src+len), "m"(mul)
2277
    );
2278
}
2279

    
2280
static void vector_clipf_sse(float *dst, const float *src, float min, float max,
2281
                             int len)
2282
{
2283
    x86_reg i = (len-16)*4;
2284
    __asm__ volatile(
2285
        "movss  %3, %%xmm4 \n"
2286
        "movss  %4, %%xmm5 \n"
2287
        "shufps $0, %%xmm4, %%xmm4 \n"
2288
        "shufps $0, %%xmm5, %%xmm5 \n"
2289
        "1: \n\t"
2290
        "movaps    (%2,%0), %%xmm0 \n\t" // 3/1 on intel
2291
        "movaps  16(%2,%0), %%xmm1 \n\t"
2292
        "movaps  32(%2,%0), %%xmm2 \n\t"
2293
        "movaps  48(%2,%0), %%xmm3 \n\t"
2294
        "maxps      %%xmm4, %%xmm0 \n\t"
2295
        "maxps      %%xmm4, %%xmm1 \n\t"
2296
        "maxps      %%xmm4, %%xmm2 \n\t"
2297
        "maxps      %%xmm4, %%xmm3 \n\t"
2298
        "minps      %%xmm5, %%xmm0 \n\t"
2299
        "minps      %%xmm5, %%xmm1 \n\t"
2300
        "minps      %%xmm5, %%xmm2 \n\t"
2301
        "minps      %%xmm5, %%xmm3 \n\t"
2302
        "movaps  %%xmm0,   (%1,%0) \n\t"
2303
        "movaps  %%xmm1, 16(%1,%0) \n\t"
2304
        "movaps  %%xmm2, 32(%1,%0) \n\t"
2305
        "movaps  %%xmm3, 48(%1,%0) \n\t"
2306
        "sub  $64, %0 \n\t"
2307
        "jge 1b \n\t"
2308
        :"+&r"(i)
2309
        :"r"(dst), "r"(src), "m"(min), "m"(max)
2310
        :"memory"
2311
    );
2312
}
2313

    
2314
static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
2315
    x86_reg reglen = len;
2316
    // not bit-exact: pf2id uses different rounding than C and SSE
2317
    __asm__ volatile(
2318
        "add        %0          , %0        \n\t"
2319
        "lea         (%2,%0,2)  , %2        \n\t"
2320
        "add        %0          , %1        \n\t"
2321
        "neg        %0                      \n\t"
2322
        "1:                                 \n\t"
2323
        "pf2id       (%2,%0,2)  , %%mm0     \n\t"
2324
        "pf2id      8(%2,%0,2)  , %%mm1     \n\t"
2325
        "pf2id     16(%2,%0,2)  , %%mm2     \n\t"
2326
        "pf2id     24(%2,%0,2)  , %%mm3     \n\t"
2327
        "packssdw   %%mm1       , %%mm0     \n\t"
2328
        "packssdw   %%mm3       , %%mm2     \n\t"
2329
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2330
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2331
        "add        $16         , %0        \n\t"
2332
        " js 1b                             \n\t"
2333
        "femms                              \n\t"
2334
        :"+r"(reglen), "+r"(dst), "+r"(src)
2335
    );
2336
}
2337
static void float_to_int16_sse(int16_t *dst, const float *src, long len){
2338
    x86_reg reglen = len;
2339
    __asm__ volatile(
2340
        "add        %0          , %0        \n\t"
2341
        "lea         (%2,%0,2)  , %2        \n\t"
2342
        "add        %0          , %1        \n\t"
2343
        "neg        %0                      \n\t"
2344
        "1:                                 \n\t"
2345
        "cvtps2pi    (%2,%0,2)  , %%mm0     \n\t"
2346
        "cvtps2pi   8(%2,%0,2)  , %%mm1     \n\t"
2347
        "cvtps2pi  16(%2,%0,2)  , %%mm2     \n\t"
2348
        "cvtps2pi  24(%2,%0,2)  , %%mm3     \n\t"
2349
        "packssdw   %%mm1       , %%mm0     \n\t"
2350
        "packssdw   %%mm3       , %%mm2     \n\t"
2351
        "movq       %%mm0       ,  (%1,%0)  \n\t"
2352
        "movq       %%mm2       , 8(%1,%0)  \n\t"
2353
        "add        $16         , %0        \n\t"
2354
        " js 1b                             \n\t"
2355
        "emms                               \n\t"
2356
        :"+r"(reglen), "+r"(dst), "+r"(src)
2357
    );
2358
}
2359

    
2360
static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
2361
    x86_reg reglen = len;
2362
    __asm__ volatile(
2363
        "add        %0          , %0        \n\t"
2364
        "lea         (%2,%0,2)  , %2        \n\t"
2365
        "add        %0          , %1        \n\t"
2366
        "neg        %0                      \n\t"
2367
        "1:                                 \n\t"
2368
        "cvtps2dq    (%2,%0,2)  , %%xmm0    \n\t"
2369
        "cvtps2dq  16(%2,%0,2)  , %%xmm1    \n\t"
2370
        "packssdw   %%xmm1      , %%xmm0    \n\t"
2371
        "movdqa     %%xmm0      ,  (%1,%0)  \n\t"
2372
        "add        $16         , %0        \n\t"
2373
        " js 1b                             \n\t"
2374
        :"+r"(reglen), "+r"(dst), "+r"(src)
2375
    );
2376
}
2377

    
2378
void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
2379
void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
2380
void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
2381
int32_t ff_scalarproduct_int16_mmx2(const int16_t *v1, const int16_t *v2, int order, int shift);
2382
int32_t ff_scalarproduct_int16_sse2(const int16_t *v1, const int16_t *v2, int order, int shift);
2383
int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2384
int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2385
int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, const int16_t *v2, const int16_t *v3, int order, int mul);
2386
void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
2387
int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
2388
int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
2389
void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2390
void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
2391
void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2392
void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2393
void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
2394

    
2395
#if HAVE_YASM && ARCH_X86_32
2396
void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
2397
static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
2398
{
2399
    ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
2400
    ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
2401
}
2402
#elif !HAVE_YASM
2403
#define ff_float_to_int16_interleave6_sse(a,b,c)   float_to_int16_interleave_misc_sse(a,b,c,6)
2404
#define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
2405
#define ff_float_to_int16_interleave6_3dn2(a,b,c)  float_to_int16_interleave_misc_3dnow(a,b,c,6)
2406
#endif
2407
#define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
2408

    
2409
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
2410
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
2411
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
2412
    DECLARE_ALIGNED(16, int16_t, tmp)[len];\
2413
    int i,j,c;\
2414
    for(c=0; c<channels; c++){\
2415
        float_to_int16_##cpu(tmp, src[c], len);\
2416
        for(i=0, j=c; i<len; i++, j+=channels)\
2417
            dst[j] = tmp[i];\
2418
    }\
2419
}\
2420
\
2421
static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
2422
    if(channels==1)\
2423
        float_to_int16_##cpu(dst, src[0], len);\
2424
    else if(channels==2){\
2425
        x86_reg reglen = len; \
2426
        const float *src0 = src[0];\
2427
        const float *src1 = src[1];\
2428
        __asm__ volatile(\
2429
            "shl $2, %0 \n"\
2430
            "add %0, %1 \n"\
2431
            "add %0, %2 \n"\
2432
            "add %0, %3 \n"\
2433
            "neg %0 \n"\
2434
            body\
2435
            :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
2436
        );\
2437
    }else if(channels==6){\
2438
        ff_float_to_int16_interleave6_##cpu(dst, src, len);\
2439
    }else\
2440
        float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
2441
}
2442

    
2443
FLOAT_TO_INT16_INTERLEAVE(3dnow,
2444
    "1:                         \n"
2445
    "pf2id     (%2,%0), %%mm0   \n"
2446
    "pf2id    8(%2,%0), %%mm1   \n"
2447
    "pf2id     (%3,%0), %%mm2   \n"
2448
    "pf2id    8(%3,%0), %%mm3   \n"
2449
    "packssdw    %%mm1, %%mm0   \n"
2450
    "packssdw    %%mm3, %%mm2   \n"
2451
    "movq        %%mm0, %%mm1   \n"
2452
    "punpcklwd   %%mm2, %%mm0   \n"
2453
    "punpckhwd   %%mm2, %%mm1   \n"
2454
    "movq        %%mm0,  (%1,%0)\n"
2455
    "movq        %%mm1, 8(%1,%0)\n"
2456
    "add $16, %0                \n"
2457
    "js 1b                      \n"
2458
    "femms                      \n"
2459
)
2460

    
2461
FLOAT_TO_INT16_INTERLEAVE(sse,
2462
    "1:                         \n"
2463
    "cvtps2pi  (%2,%0), %%mm0   \n"
2464
    "cvtps2pi 8(%2,%0), %%mm1   \n"
2465
    "cvtps2pi  (%3,%0), %%mm2   \n"
2466
    "cvtps2pi 8(%3,%0), %%mm3   \n"
2467
    "packssdw    %%mm1, %%mm0   \n"
2468
    "packssdw    %%mm3, %%mm2   \n"
2469
    "movq        %%mm0, %%mm1   \n"
2470
    "punpcklwd   %%mm2, %%mm0   \n"
2471
    "punpckhwd   %%mm2, %%mm1   \n"
2472
    "movq        %%mm0,  (%1,%0)\n"
2473
    "movq        %%mm1, 8(%1,%0)\n"
2474
    "add $16, %0                \n"
2475
    "js 1b                      \n"
2476
    "emms                       \n"
2477
)
2478

    
2479
FLOAT_TO_INT16_INTERLEAVE(sse2,
2480
    "1:                         \n"
2481
    "cvtps2dq  (%2,%0), %%xmm0  \n"
2482
    "cvtps2dq  (%3,%0), %%xmm1  \n"
2483
    "packssdw   %%xmm1, %%xmm0  \n"
2484
    "movhlps    %%xmm0, %%xmm1  \n"
2485
    "punpcklwd  %%xmm1, %%xmm0  \n"
2486
    "movdqa     %%xmm0, (%1,%0) \n"
2487
    "add $16, %0                \n"
2488
    "js 1b                      \n"
2489
)
2490

    
2491
static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
2492
    if(channels==6)
2493
        ff_float_to_int16_interleave6_3dn2(dst, src, len);
2494
    else
2495
        float_to_int16_interleave_3dnow(dst, src, len, channels);
2496
}
2497

    
2498
float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
2499

    
2500
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2501
{
2502
    mm_flags = mm_support();
2503

    
2504
    if (avctx->dsp_mask) {
2505
        if (avctx->dsp_mask & FF_MM_FORCE)
2506
            mm_flags |= (avctx->dsp_mask & 0xffff);
2507
        else
2508
            mm_flags &= ~(avctx->dsp_mask & 0xffff);
2509
    }
2510

    
2511
#if 0
2512
    av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2513
    if (mm_flags & FF_MM_MMX)
2514
        av_log(avctx, AV_LOG_INFO, " mmx");
2515
    if (mm_flags & FF_MM_MMX2)
2516
        av_log(avctx, AV_LOG_INFO, " mmx2");
2517
    if (mm_flags & FF_MM_3DNOW)
2518
        av_log(avctx, AV_LOG_INFO, " 3dnow");
2519
    if (mm_flags & FF_MM_SSE)
2520
        av_log(avctx, AV_LOG_INFO, " sse");
2521
    if (mm_flags & FF_MM_SSE2)
2522
        av_log(avctx, AV_LOG_INFO, " sse2");
2523
    av_log(avctx, AV_LOG_INFO, "\n");
2524
#endif
2525

    
2526
    if (mm_flags & FF_MM_MMX) {
2527
        const int idct_algo= avctx->idct_algo;
2528

    
2529
        if(avctx->lowres==0){
2530
            if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2531
                c->idct_put= ff_simple_idct_put_mmx;
2532
                c->idct_add= ff_simple_idct_add_mmx;
2533
                c->idct    = ff_simple_idct_mmx;
2534
                c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2535
#if CONFIG_GPL
2536
            }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2537
                if(mm_flags & FF_MM_MMX2){
2538
                    c->idct_put= ff_libmpeg2mmx2_idct_put;
2539
                    c->idct_add= ff_libmpeg2mmx2_idct_add;
2540
                    c->idct    = ff_mmxext_idct;
2541
                }else{
2542
                    c->idct_put= ff_libmpeg2mmx_idct_put;
2543
                    c->idct_add= ff_libmpeg2mmx_idct_add;
2544
                    c->idct    = ff_mmx_idct;
2545
                }
2546
                c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2547
#endif
2548
            }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
2549
                     idct_algo==FF_IDCT_VP3){
2550
                if(mm_flags & FF_MM_SSE2){
2551
                    c->idct_put= ff_vp3_idct_put_sse2;
2552
                    c->idct_add= ff_vp3_idct_add_sse2;
2553
                    c->idct    = ff_vp3_idct_sse2;
2554
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2555
                }else{
2556
                    c->idct_put= ff_vp3_idct_put_mmx;
2557
                    c->idct_add= ff_vp3_idct_add_mmx;
2558
                    c->idct    = ff_vp3_idct_mmx;
2559
                    c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2560
                }
2561
            }else if(idct_algo==FF_IDCT_CAVS){
2562
                    c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2563
            }else if(idct_algo==FF_IDCT_XVIDMMX){
2564
                if(mm_flags & FF_MM_SSE2){
2565
                    c->idct_put= ff_idct_xvid_sse2_put;
2566
                    c->idct_add= ff_idct_xvid_sse2_add;
2567
                    c->idct    = ff_idct_xvid_sse2;
2568
                    c->idct_permutation_type= FF_SSE2_IDCT_PERM;
2569
                }else if(mm_flags & FF_MM_MMX2){
2570
                    c->idct_put= ff_idct_xvid_mmx2_put;
2571
                    c->idct_add= ff_idct_xvid_mmx2_add;
2572
                    c->idct    = ff_idct_xvid_mmx2;
2573
                }else{
2574
                    c->idct_put= ff_idct_xvid_mmx_put;
2575
                    c->idct_add= ff_idct_xvid_mmx_add;
2576
                    c->idct    = ff_idct_xvid_mmx;
2577
                }
2578
            }
2579
        }
2580

    
2581
        c->put_pixels_clamped = put_pixels_clamped_mmx;
2582
        c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2583
        c->add_pixels_clamped = add_pixels_clamped_mmx;
2584
        c->clear_block  = clear_block_mmx;
2585
        c->clear_blocks = clear_blocks_mmx;
2586
        if ((mm_flags & FF_MM_SSE) &&
2587
            !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
2588
            /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
2589
            c->clear_block  = clear_block_sse;
2590
            c->clear_blocks = clear_blocks_sse;
2591
        }
2592

    
2593
#define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2594
        c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
2595
        c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
2596
        c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
2597
        c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
2598

    
2599
        SET_HPEL_FUNCS(put, 0, 16, mmx);
2600
        SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
2601
        SET_HPEL_FUNCS(avg, 0, 16, mmx);
2602
        SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
2603
        SET_HPEL_FUNCS(put, 1, 8, mmx);
2604
        SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
2605
        SET_HPEL_FUNCS(avg, 1, 8, mmx);
2606
        SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
2607

    
2608
        c->gmc= gmc_mmx;
2609

    
2610
        c->add_bytes= add_bytes_mmx;
2611
        c->add_bytes_l2= add_bytes_l2_mmx;
2612

    
2613
        c->draw_edges = draw_edges_mmx;
2614

    
2615
        if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
2616
            c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2617
            c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2618
        }
2619
        c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
2620
        c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2621
        c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
2622

    
2623
        c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
2624
        c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
2625

    
2626
        if (CONFIG_VP6_DECODER) {
2627
            c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
2628
        }
2629

    
2630
        if (mm_flags & FF_MM_MMX2) {
2631
            c->prefetch = prefetch_mmx2;
2632

    
2633
            c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2634
            c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2635

    
2636
            c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2637
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2638
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2639

    
2640
            c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2641
            c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2642

    
2643
            c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2644
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2645
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2646

    
2647
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2648
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2649
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2650
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2651
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2652
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2653
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2654

    
2655
                if (CONFIG_VP3_DECODER) {
2656
                    c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
2657
                    c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
2658
                }
2659
            }
2660
            if (CONFIG_VP3_DECODER) {
2661
                c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
2662
            }
2663

    
2664
            if (CONFIG_VP3_DECODER
2665
                && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2666
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
2667
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
2668
            }
2669

    
2670
#define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
2671
            c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
2672
            c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
2673
            c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
2674
            c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
2675
            c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
2676
            c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
2677
            c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
2678
            c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
2679
            c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
2680
            c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
2681
            c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
2682
            c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
2683
            c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
2684
            c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
2685
            c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
2686
            c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
2687

    
2688
            SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
2689
            SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
2690
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
2691
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
2692
            SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
2693
            SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
2694

    
2695
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
2696
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
2697
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
2698
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
2699
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
2700
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
2701

    
2702
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
2703
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
2704
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
2705
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
2706

    
2707
            c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
2708
            c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
2709

    
2710
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
2711

    
2712
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
2713
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
2714
            c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
2715
            c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
2716

    
2717
#if HAVE_YASM
2718
            c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
2719
#endif
2720
#if HAVE_7REGS && HAVE_TEN_OPERANDS
2721
            if( mm_flags&FF_MM_3DNOW )
2722
                c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
2723
#endif
2724

    
2725
            if (CONFIG_CAVS_DECODER)
2726
                ff_cavsdsp_init_mmx2(c, avctx);
2727

    
2728
            if (CONFIG_VC1_DECODER)
2729
                ff_vc1dsp_init_mmx(c, avctx);
2730

    
2731
            c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
2732
        } else if (mm_flags & FF_MM_3DNOW) {
2733
            c->prefetch = prefetch_3dnow;
2734

    
2735
            c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
2736
            c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
2737

    
2738
            c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
2739
            c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
2740
            c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
2741

    
2742
            c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
2743
            c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
2744

    
2745
            c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
2746
            c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
2747
            c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
2748

    
2749
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2750
                c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
2751
                c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
2752
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
2753
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
2754
                c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
2755
                c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
2756
            }
2757

    
2758
            if (CONFIG_VP3_DECODER
2759
                && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
2760
                c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
2761
                c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
2762
            }
2763

    
2764
            SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
2765
            SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
2766
            SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
2767
            SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
2768
            SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
2769
            SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
2770

    
2771
            SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
2772
            SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
2773
            SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
2774
            SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
2775
            SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
2776
            SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
2777

    
2778
            SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
2779
            SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
2780
            SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
2781
            SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
2782

    
2783
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
2784
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
2785

    
2786
            c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
2787
            c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
2788

    
2789
            if (CONFIG_CAVS_DECODER)
2790
                ff_cavsdsp_init_3dnow(c, avctx);
2791
        }
2792

    
2793

    
2794
#define H264_QPEL_FUNCS(x, y, CPU)\
2795
            c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
2796
            c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
2797
            c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
2798
            c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
2799
        if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
2800
            // these functions are slower than mmx on AMD, but faster on Intel
2801
            c->put_pixels_tab[0][0] = put_pixels16_sse2;
2802
            c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
2803
            H264_QPEL_FUNCS(0, 0, sse2);
2804
        }
2805
        if(mm_flags & FF_MM_SSE2){
2806
            H264_QPEL_FUNCS(0, 1, sse2);
2807
            H264_QPEL_FUNCS(0, 2, sse2);
2808
            H264_QPEL_FUNCS(0, 3, sse2);
2809
            H264_QPEL_FUNCS(1, 1, sse2);
2810
            H264_QPEL_FUNCS(1, 2, sse2);
2811
            H264_QPEL_FUNCS(1, 3, sse2);
2812
            H264_QPEL_FUNCS(2, 1, sse2);
2813
            H264_QPEL_FUNCS(2, 2, sse2);
2814
            H264_QPEL_FUNCS(2, 3, sse2);
2815
            H264_QPEL_FUNCS(3, 1, sse2);
2816
            H264_QPEL_FUNCS(3, 2, sse2);
2817
            H264_QPEL_FUNCS(3, 3, sse2);
2818

    
2819
            if (CONFIG_VP6_DECODER) {
2820
                c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
2821
            }
2822
        }
2823
#if HAVE_SSSE3
2824
        if(mm_flags & FF_MM_SSSE3){
2825
            H264_QPEL_FUNCS(1, 0, ssse3);
2826
            H264_QPEL_FUNCS(1, 1, ssse3);
2827
            H264_QPEL_FUNCS(1, 2, ssse3);
2828
            H264_QPEL_FUNCS(1, 3, ssse3);
2829
            H264_QPEL_FUNCS(2, 0, ssse3);
2830
            H264_QPEL_FUNCS(2, 1, ssse3);
2831
            H264_QPEL_FUNCS(2, 2, ssse3);
2832
            H264_QPEL_FUNCS(2, 3, ssse3);
2833
            H264_QPEL_FUNCS(3, 0, ssse3);
2834
            H264_QPEL_FUNCS(3, 1, ssse3);
2835
            H264_QPEL_FUNCS(3, 2, ssse3);
2836
            H264_QPEL_FUNCS(3, 3, ssse3);
2837
            c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
2838
            c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
2839
            c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
2840
            c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
2841
            c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
2842
            c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
2843
            c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
2844
#if HAVE_YASM
2845
            c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
2846
            if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
2847
                c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
2848
#endif
2849
        }
2850
#endif
2851

    
2852
        if(mm_flags & FF_MM_3DNOW){
2853
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
2854
            c->vector_fmul = vector_fmul_3dnow;
2855
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2856
                c->float_to_int16 = float_to_int16_3dnow;
2857
                c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
2858
            }
2859
        }
2860
        if(mm_flags & FF_MM_3DNOWEXT){
2861
            c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
2862
            c->vector_fmul_window = vector_fmul_window_3dnow2;
2863
            if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2864
                c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
2865
            }
2866
        }
2867
        if(mm_flags & FF_MM_MMX2){
2868
#if HAVE_YASM
2869
            c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
2870
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
2871
#endif
2872
        }
2873
        if(mm_flags & FF_MM_SSE){
2874
            c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
2875
            c->ac3_downmix = ac3_downmix_sse;
2876
            c->vector_fmul = vector_fmul_sse;
2877
            c->vector_fmul_reverse = vector_fmul_reverse_sse;
2878
            c->vector_fmul_add = vector_fmul_add_sse;
2879
            c->vector_fmul_window = vector_fmul_window_sse;
2880
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
2881
            c->vector_clipf = vector_clipf_sse;
2882
            c->float_to_int16 = float_to_int16_sse;
2883
            c->float_to_int16_interleave = float_to_int16_interleave_sse;
2884
#if HAVE_YASM
2885
            c->scalarproduct_float = ff_scalarproduct_float_sse;
2886
#endif
2887
        }
2888
        if(mm_flags & FF_MM_3DNOW)
2889
            c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
2890
        if(mm_flags & FF_MM_SSE2){
2891
            c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
2892
            c->float_to_int16 = float_to_int16_sse2;
2893
            c->float_to_int16_interleave = float_to_int16_interleave_sse2;
2894
#if HAVE_YASM
2895
            c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
2896
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
2897
#endif
2898
        }
2899
        if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
2900
            c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
2901
    }
2902

    
2903
    if (CONFIG_ENCODERS)
2904
        dsputilenc_init_mmx(c, avctx);
2905

    
2906
#if 0
2907
    // for speed testing
2908
    get_pixels = just_return;
2909
    put_pixels_clamped = just_return;
2910
    add_pixels_clamped = just_return;
2911

2912
    pix_abs16x16 = just_return;
2913
    pix_abs16x16_x2 = just_return;
2914
    pix_abs16x16_y2 = just_return;
2915
    pix_abs16x16_xy2 = just_return;
2916

2917
    put_pixels_tab[0] = just_return;
2918
    put_pixels_tab[1] = just_return;
2919
    put_pixels_tab[2] = just_return;
2920
    put_pixels_tab[3] = just_return;
2921

2922
    put_no_rnd_pixels_tab[0] = just_return;
2923
    put_no_rnd_pixels_tab[1] = just_return;
2924
    put_no_rnd_pixels_tab[2] = just_return;
2925
    put_no_rnd_pixels_tab[3] = just_return;
2926

2927
    avg_pixels_tab[0] = just_return;
2928
    avg_pixels_tab[1] = just_return;
2929
    avg_pixels_tab[2] = just_return;
2930
    avg_pixels_tab[3] = just_return;
2931

2932
    avg_no_rnd_pixels_tab[0] = just_return;
2933
    avg_no_rnd_pixels_tab[1] = just_return;
2934
    avg_no_rnd_pixels_tab[2] = just_return;
2935
    avg_no_rnd_pixels_tab[3] = just_return;
2936

2937
    //av_fdct = just_return;
2938
    //ff_idct = just_return;
2939
#endif
2940
}
2941

    
2942
#if CONFIG_H264DSP
2943
void ff_h264dsp_init_x86(H264DSPContext *c)
2944
{
2945
    mm_flags = mm_support();
2946

    
2947
    if (mm_flags & FF_MM_MMX) {
2948
        c->h264_idct_dc_add=
2949
        c->h264_idct_add= ff_h264_idct_add_mmx;
2950
        c->h264_idct8_dc_add=
2951
        c->h264_idct8_add= ff_h264_idct8_add_mmx;
2952

    
2953
        c->h264_idct_add16     = ff_h264_idct_add16_mmx;
2954
        c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
2955
        c->h264_idct_add8      = ff_h264_idct_add8_mmx;
2956
        c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
2957

    
2958
        if (mm_flags & FF_MM_MMX2) {
2959
            c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2960
            c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2961
            c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
2962
            c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
2963
            c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
2964
            c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
2965

    
2966
            c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
2967
            c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
2968
            c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
2969
            c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
2970
            c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
2971
            c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
2972
            c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
2973

    
2974
            c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
2975
            c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
2976
            c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
2977
            c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
2978
            c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
2979
            c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
2980
            c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
2981
            c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
2982

    
2983
            c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
2984
            c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
2985
            c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
2986
            c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
2987
            c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
2988
            c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
2989
            c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
2990
            c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
2991
        }
2992
        if(mm_flags & FF_MM_SSE2){
2993
            c->h264_idct8_add = ff_h264_idct8_add_sse2;
2994
            c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
2995
        }
2996

    
2997
#if CONFIG_GPL && HAVE_YASM
2998
        if (mm_flags & FF_MM_MMX2){
2999
#if ARCH_X86_32
3000
            c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
3001
            c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
3002
#endif
3003
            if( mm_flags&FF_MM_SSE2 ){
3004
#if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110
3005
                c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
3006
                c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
3007
                c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
3008
                c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
3009
#endif
3010
                c->h264_idct_add16 = ff_h264_idct_add16_sse2;
3011
                c->h264_idct_add8  = ff_h264_idct_add8_sse2;
3012
                c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
3013
            }
3014
        }
3015
#endif
3016
    }
3017
}
3018
#endif /* CONFIG_H264DSP */